diff --git a/spaces/0xHacked/zkProver/Dockerfile b/spaces/0xHacked/zkProver/Dockerfile deleted file mode 100644 index 48b42c021f80740492facb573bdfffea5696cf78..0000000000000000000000000000000000000000 --- a/spaces/0xHacked/zkProver/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM nvidia/cuda:12.1.1-devel-ubuntu20.04 -ARG DEBIAN_FRONTEND=noninteractive -ENV TZ=Asia/Hong_Kong -RUN apt-get update && apt-get install --no-install-recommends -y tzdata python3.9 python3.9-dev python3.9-venv build-essential && \ - apt-get clean && rm -rf /var/lib/apt/lists/* - -RUN useradd -m -u 1000 user -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -WORKDIR $HOME/app -COPY --chown=user . $HOME/app - -RUN python3.9 -m venv $HOME/app/venv && $HOME/app/venv/bin/pip install --no-cache-dir --upgrade pip -RUN $HOME/app/venv/bin/pip install --no-cache-dir --upgrade -r requirements.txt - -RUN cd $HOME/app && chmod +x $HOME/app/bin/* - -CMD ["/home/user/app/venv/bin/python", "app.py"] \ No newline at end of file diff --git a/spaces/1368565466ki/ZSTRD/attentions.py b/spaces/1368565466ki/ZSTRD/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/17TheWord/RealESRGAN/tests/test_discriminator_arch.py b/spaces/17TheWord/RealESRGAN/tests/test_discriminator_arch.py deleted file mode 100644 index c56a40c7743630aa63b3e99bca8dc1a85949c4c5..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/tests/test_discriminator_arch.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch - -from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN - - -def test_unetdiscriminatorsn(): - """Test arch: UNetDiscriminatorSN.""" - - # model init and forward (cpu) - net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True) - img = torch.rand((1, 3, 32, 32), dtype=torch.float32) - output = net(img) - assert output.shape == (1, 1, 32, 32) - - # model init and forward (gpu) - if torch.cuda.is_available(): - net.cuda() - output = net(img.cuda()) - assert output.shape == (1, 1, 32, 32) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contoh Surat Rasmi Permohonan Tapak Jualan.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contoh Surat Rasmi Permohonan Tapak Jualan.md deleted file mode 100644 index 836d5a0981804081cd50300fd8bab00030733638..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contoh Surat Rasmi Permohonan Tapak Jualan.md +++ /dev/null @@ -1,66 +0,0 @@ - -
Apakah anda ingin memohon tapak jualan untuk menjalankan perniagaan anda? Jika ya, anda perlu menulis surat rasmi permohonan tapak jualan yang betul dan lengkap. Surat rasmi permohonan tapak jualan adalah surat yang ditulis oleh pemohon kepada pihak berkuasa yang menguruskan tapak jualan, seperti majlis perbandaran, pejabat tanah, atau pihak swasta. Surat ini bertujuan untuk meminta kebenaran dan persetujuan untuk menyewa atau menggunakan tapak jualan yang diingini.
-DOWNLOAD 🔗 https://byltly.com/2uKx6I
Surat rasmi permohonan tapak jualan harus mengandungi beberapa maklumat penting, seperti:
-Berikut adalah contoh surat rasmi permohonan tapak jualan yang boleh dijadikan rujukan:
- -SI FULAN BIN SI FULAN
-No. 100, Kampung Tiada Nama
-58900 Kuala Tiada
-Negeri Darul Ikhlas
-
-Kepada,
-Pihak Pengurusan Tapak Jualan
-Majlis Perbandaran Kuala Tiada
-58900 Kuala Tiada
-Negeri Darul Ikhlas
-
-12 April 2023
-
-Tuan/Puan,
-
-PERMOHONAN SEWA TAPAK JUALAN DI TAMAN REKREASI KUALA TIADA
-
-Merujuk perkara di atas, saya Si Fulan Bin Si Fulan ingin memohon untuk menyewa satu tapak jualan di Taman Rekreasi Kuala Tiada. Tujuan saya memohon sewa tapak jualan ini adalah untuk menjalankan perniagaan saya iaitu menjual makanan ringan dan minuman sejuk.
-
-Dibawah ini disertakan butir-butir perniagaan saya untuk rujukan pihak tuan/puan:
-
-Nama: Si Fulan Bin Si Fulan
-No. Kad Pengenalan: 830101-01-1234
-No. Telefon: 012-3456789
-Alamat Tetap: No. 100, Kampung Tiada Nama, 58900 Kuala Tiada, Negeri Darul Ikhlas
-Pekerjaan Tetap: Guru Sekolah Menengah Kebangsaan Kuala Tiada
-Jenis Perniagaan: Menjual makanan ringan dan minuman sejuk
-Masa Perniagaan: Setiap hujung minggu dari jam 10 pagi hingga 6 petang
-
-Disini saya sertakan sekali dokumen-dokumen sokongan saya, iaitu salinan kad pengenalan, sijil pendaftaran perniagaan (SSM), lesen perniagaan (MPK), pelan lokasi tapak jualan yang dikehendaki, dan gambar tapak jualan tersebut di bahagian lampiran.
-
-Semoga permohonan saya ini dapat dipertimbangkan dengan sebaiknya oleh pihak tuan/puan. Saya amat berharap dapat
-
-menyewa tapak jualan di Taman Rekreasi Kuala Tiada untuk menambah pendapatan saya dan memberi perkhidmatan yang baik kepada pengunjung taman.
-
-Segala kerjasama dan bantuan dari pihak tuan/puan saya dahulukan dengan ribuan terima kasih. Sekiranya ada sebarang pertanyaan atau maklum balas, sila hubungi saya di nombor telefon yang diberikan.
-
-Sekian, terima kasih.
-
-Yang benar,
-
-..................................
-(SI FULAN BIN SI FULAN)
-No. Telefon: 012-3456789
-
- cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cours archicad 15 gratuit Matrisez le logiciel de modlisation BIM.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cours archicad 15 gratuit Matrisez le logiciel de modlisation BIM.md
deleted file mode 100644
index b9e39100b580cb7786dabb54d4232da08273bfba..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cours archicad 15 gratuit Matrisez le logiciel de modlisation BIM.md
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
- What are the benefits of learning Archicad 15
- How to access free courses and tutorials on Archicad 15 | | H2: Archicad basics | - How to install and set up Archicad 15
- How to use the interface and tools of Archicad 15
- How to create and edit 2D and 3D objects in Archicad 15 | | H2: Archicad advanced | - How to use graphic overrides and substitutions in Archicad 15
- How to create custom stairs, railings, roofs, and slabs in Archicad 15
- How to use libraries, attributes, and layers in Archicad 15 | | H2: Archicad projects | - How to create a floor plan, a section, a elevation, and a detail in Archicad 15
- How to generate photorealistic renderings and animations in Archicad 15
- How to print and export drawings and models in Archicad 15 | | H2: Archicad resources | - How to find and download free Archicad templates, objects, and materials
- How to access online courses, tutorials, and forums on Archicad 15
- How to get certified and improve your skills on Archicad 15 | | H1: Conclusion | - A summary of the main points of the article
- A call to action for the readers to start learning Archicad 15 | # Article with HTML formatting Introduction
-Are you an architect, a designer, or a student who wants to create stunning architectural projects with ease and efficiency? If so, you might want to learn how to use Archicad, one of the most popular and powerful software for building information modeling (BIM).
-Cours archicad 15 gratuit
Download 🆗 https://byltly.com/2uKwi0
-Archicad is a software that allows you to design, model, document, and visualize your projects in 2D and 3D. You can create realistic models of buildings, structures, interiors, landscapes, and more. You can also produce high-quality drawings, renderings, animations, and reports with Archicad.
-But how can you learn how to use Archicad without spending a fortune on courses or books? The answer is simple: you can access free courses and tutorials on Archicad 15 online. In this article, we will show you how you can learn everything you need to know about Archicad 15 for free. We will cover the basics, the advanced features, the projects, and the resources that you can use to master Archicad 15.
- Archicad basics
-Before you start working on your projects with Archicad 15, you need to learn some basic concepts and skills. In this section, we will show you how to install and set up Archicad 15, how to use the interface and tools of Archicad 15, and how to create and edit 2D and 3D objects in Archicad 15.
-How to install and set up Archicad 15
-To install Archicad 15 on your computer, you need to download the installer from the official website of Graphisoft, the developer of Archicad. You can choose between Windows or Mac versions depending on your operating system. You can also select your preferred language from a list of options.
-Formation archicad 15 en ligne gratuite
-Tutoriel archicad 15 pour débutants gratuit
-Apprendre archicad 15 pas à pas gratuitement
-Cours archicad 15 pdf télécharger gratuitement
-Vidéo cours archicad 15 complet gratuit
-Cours archicad 15 niveau avancé gratuit
-Cours archicad 15 en français gratuit
-Cours archicad 15 avec certificat gratuit
-Cours archicad 15 sur udemy gratuit
-Cours archicad 15 sur youtube gratuit
-Cours archicad 15 avec exercices pratiques gratuit
-Cours archicad 15 pour architectes gratuit
-Cours archicad 15 pour étudiants gratuit
-Cours archicad 15 pour professionnels gratuit
-Cours archicad 15 pour débutants gratuit
-Cours archicad 15 pour maîtriser le logiciel gratuit
-Cours archicad 15 pour apprendre les bases gratuit
-Cours archicad 15 pour créer des plans gratuits
-Cours archicad 15 pour réaliser des projets gratuits
-Cours archicad 15 pour dessiner en 3D gratuit
-Cours archicad 15 pour modéliser des bâtiments gratuits
-Cours archicad 15 pour concevoir des structures gratuites
-Cours archicad 15 pour optimiser des espaces gratuits
-Cours archicad 15 pour gérer des documents gratuits
-Cours archicad 15 pour collaborer avec d'autres utilisateurs gratuits
-Cours archicad 15 pour exporter des fichiers gratuits
-Cours archicad 15 pour importer des données gratuites
-Cours archicad 15 pour personnaliser des paramètres gratuits
-Cours archicad 15 pour utiliser des outils gratuits
-Cours archicad 15 pour appliquer des effets gratuits
-Cours archicad 15 pour animer des scènes gratuites
-Cours archicad 15 pour simuler des éclairages gratuits
-Cours archicad 15 pour calculer des coûts gratuits
-Cours archicad 15 pour respecter des normes gratuites
-Cours archicad 15 pour intégrer des éléments gratuits
-Cours archicad 15 pour ajouter des textures gratuites
-Cours archicad 15 pour modifier des couleurs gratuites
-Cours archicad 15 pour insérer des objets gratuits
-Cours archicad 15 pour composer des vues gratuites
-Cours archicad 15 pour générer des rendus gratuits
-Cours archicad 15 pour imprimer des plans gratuits
-Cours archicad 15 pour publier des rapports gratuits
-Cours archicad 15 pour partager des résultats gratuits
-Cours archicad 15 pour sauvegarder des travaux gratuits
-Cours archicad 15 pour restaurer des versions gratuites
-Cours archicad 15 pour corriger des erreurs gratuites
-Cours archicad 15 pour améliorer la qualité gratuite
-Cours archicad 15 pour augmenter la productivité gratuite
-Once you have downloaded the installer, you need to run it and follow the instructions on the screen. You will need to accept the license agreement, choose a destination folder, and enter your serial number if you have one. If you don't have a serial number, you can use the trial version of Archicad 15 for 30 days.
-After the installation is complete, you can launch Archicad 15 from your desktop or start menu. You will see a welcome screen that will guide you through some initial settings. You can choose your project type (residential or commercial), your measurement system (metric or imperial), your working environment (standard or customized), and your template file (default or user-defined).
-How to use the interface and tools of Archicad 15
-The interface of Archicad 15 consists of several elements that help you navigate and work on your projects. The main elements are:
-
-- The menu bar: it contains various menus that give you access to different commands and options.
-- The toolbar: it contains icons that represent different tools that you can use to create and modify objects.
-- The toolbox: it contains icons that represent different object types that you can create with the tools.
-- The info box: it displays information about the selected tool or object such as parameters, settings, properties, etc.
-- The navigator: it shows the structure of your project in terms of stories, sections, elevations, layouts, etc.
-- The project map: it shows a graphical representation of your project in terms of views such as floor plans, perspectives, axonometries, etc.
-- The view map: it shows a list of saved views that you can recall at any time.
-- The layout book: it shows a list of layouts that contain drawings or models that you can print or export.
-- The publisher: it allows you to publish your project as PDF files, DWG files, images files, etc.
-- The organizer: it allows you to organize your project data in terms of attributes such as layers, pensets 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/((FULL)) Xforce Keygen 64-bit Alias AutoStudio 2019 Activation.md b/spaces/1gistliPinn/ChatGPT4/Examples/((FULL)) Xforce Keygen 64-bit Alias AutoStudio 2019 Activation.md
deleted file mode 100644
index 017aef0d874029addff0739d8de306850da989f0..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/((FULL)) Xforce Keygen 64-bit Alias AutoStudio 2019 Activation.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
- Free Porn Movie Dirty Latino Trini Porni
Xforce Keygen HD2k3 Full Version Keys Free Download
bromance
One Piece Xforce Keygen Sony Playstation
Private Postings on WhiskeyX® 4 - 1.0.4.00.0 Offline Crack
Free Mobile Apps Search and Browser
Linux News, Recipes, And Tutorials
Mouse Mute - DEVBuild 2007
XXX Movies HD Netlix
download film vk
-xforce keygen 64-bit Alias AutoStudio 2019 activation
DOWNLOAD ✵ https://imgfil.com/2uxXfh
- DXF Import Tutorial is a
Noob Friendly Xferxramen unetbootin lite download iran
The Orchard-cms.de lite - author Klaus Dehne
Earnest Journey 2011 English SDH ISO
Dance Central 3 Hack apk
AIDA32 128bit Download
Wifi Router: http://www.webfixer.eu/download.php?id=5e9e45ea4d94a2259e3a70a7ede14e91.pdf.l
Move Support for Exchange, Address Book and more..pptx
India+2 hack patch/crack
Just a bunch of kink.mobile.10.55.0_CRL
Cloud Septar - Apk.torrent
xforce keygen 64-bit AutoCAD LT 2014 Crack
Disney's Live-action Aladdin movie to hit UK cinemas on Wednesday!
Freeware file converter and zip file creator!.exe
Everything - Free Download for Mac
Hitman 2 free download with trials keygen crack for play
Anime Episode Fxu Cheat For Each Episode Download
URL Hack No Survey Unlimited Money Best Site Newtonsoft.JSON
bhengasi Pokkiri Raja - Full Hindi Movie HD.mp4
11169442343798870042ULBG6HAL
- Illustrator Tutorials and Tips Free Tutorial and Tips Videos
Free NTI Audio Editor 3.0.0 Crack with Keygen
Sony IS300 V2 For Free Download
Algorithm Design Manual PDF
Toughest archer ppd statistics for 1.5
FARM ITALIA - LA SCUOLA DEI MIGLIORI AMATI ( www.teammiglioriamati.com )
PEUGEOT 400 2009 MANUAL FREE DOWNLOAD
Exclusively for: Xfire
Xforce Keygen Activation WINDOWS-7
Manually define the number of tile groups, and the appropriate WCF settings for them
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Backstreet Boys Millennium Full Album Zip.md b/spaces/1gistliPinn/ChatGPT4/Examples/Backstreet Boys Millennium Full Album Zip.md
deleted file mode 100644
index 00d0a5489805005b382baa3ad618a10754a79335..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Backstreet Boys Millennium Full Album Zip.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Backstreet Boys, Millennium Full Album Zip
Download File ✯✯✯ https://imgfil.com/2uxXly
-
-Backstreet Boys, Millennium Full Album Zip tinyurl.com/lttzqfo. 4d29de3e1b
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cyberlink Powerdvd 14 Crack Serial Key.md b/spaces/1gistliPinn/ChatGPT4/Examples/Cyberlink Powerdvd 14 Crack Serial Key.md
deleted file mode 100644
index 23086ef2bf904465a4a74bf539ce1ec8797901cc..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Cyberlink Powerdvd 14 Crack Serial Key.md
+++ /dev/null
@@ -1,64 +0,0 @@
-Cyberlink powerdvd 14 crack serial key
Download ⇔ https://imgfil.com/2uy0mX
-
-1. Close this window and restart the software.
-
-2. Launch the software and register.
-
-3. Please select the license key and click on "Update".
-
-4. Your new activation key will be displayed.
-
-What If I Dislike the Product?
-
-1. Use the product for 30 days (For Limited Period)
-
-For users who have a good experience with the software, please give us a good review. In order to enhance the compatibility and performance of the product, we will take no charge for technical support for 90 days after your purchase. If you have any issue after the 90 days, please use the "Customer Service" page of the product page to ask for assistance.
-
-1. Log into the product and open the activation tab.
-
-2. Click "Customer Service" and then click "Activation Request".
-
-3. Please indicate your purchase date and product name in the next blank spaces.
-
-4. Please indicate your version, operating system and the reason for your request.
-
-5. Your Customer Service will be received within 24 hours and your request will be handled within 3 days.
-
-1. Go to
-
-2. Please register the product and log in.
-
-3. Click the "Customer Service" link and follow the instructions.
-
-What If the Software I have Already Paid is Suboptimal?
-
-1. Within 3 days from your purchase date, if you are dissatisfied with the software you have paid for, please contact Customer Support.
-
-2. Please indicate the purchase date, product name, operating system and the reason for your request.
-
-3. Please submit the "Requisition for Customer Service" form and include with your message your purchase date and product name.
-
-What If the License Key I Have Entered Is Incorrect?
-
-1. Please re-register.
-
-2. Please follow the steps on the activation page.
-
-What If I Have Forgotten the License Key?
-
-1. Please follow the steps on the activation page.
-
-3. Go to the "Settings" page.
-
-4. Select the "License Keys" tab.
-
-5. Please enter the activation code and save.
-
-6. Your activation code will be displayed.
-
-Is My License Key Valid?
-
-1. Please visit 4fefd39f24
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar UPDATED.md b/spaces/1gistliPinn/ChatGPT4/Examples/DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar UPDATED.md
deleted file mode 100644
index 747dd850de609540ca9bdd4f5e3d01c5a518290e..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar UPDATED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-DWG TrueView 2012 (64bit) (Product Key And Xforce Keygen) .rar
DOWNLOAD ⭐ https://imgfil.com/2uy1f8
-
-Autodesk DWG TrueView. 2012 64-bit - free AutoCAD DWG file .... DWG TrueView 2008 (64bit) (Product Key And Xforce Keygen) .rar ... 4d29de3e1b
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Digifish Aqua Real 2 Version 1.04 Full With Serial.md b/spaces/1gistliPinn/ChatGPT4/Examples/Digifish Aqua Real 2 Version 1.04 Full With Serial.md
deleted file mode 100644
index 5165a37e66fd7dcaf53921c863ffe4985d0f7a91..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Digifish Aqua Real 2 Version 1.04 Full With Serial.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-this new version includes two improvements over the previous version 1.03: more species, updated sounds, stable and bug-free operation. added in this release: 1.04: new lighting effects, new background music engine, new fish. aquarium hd for xbox 360 is a screensaver that features a unique aquarium simulation as well as music playback! experience an exotic tropical aquarium in real time as thousands of colorful fish swim in a beautiful underwater world. the aquarium simulation includes light, sound, water and even real fish! play background music from your hard drive or from a networked pc! the music is not only for background, you can even add your own music to play while the aquarium is running. the music can be played from your hard drive or from a networked pc. choose between a quiet and a slightly more active light and sound settings.
-digifish aqua real 2 version 1.04 full with serial
Download File »»» https://imgfil.com/2uxXpp
-this release was created for you, eager to use aqua real 2 full version v1.04 full and without any limitations. our intentions are not to harm aqua software company but to give the possibility to those who can not pay for any piece of software out there. this should be your intention too, as a user, to fully evaluate aqua real 2 full version v1.04 without restrictions and then decide.
-digifish aqua real 2 for pc is a really nice fish-watching program. it eschews the traditional aquarium, instead placing your fish in the open ocean, complete with sharks. the free trial is pretty limited, but has nice animations, backgrounds, and fish. the interface is really clean and easy-to-use, as well. probably the top-of-the-line for fish screensavers. full version costs $20.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia on PC How to Download and Play with LDPlayer Emulator.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia on PC How to Download and Play with LDPlayer Emulator.md
deleted file mode 100644
index 90062530aed579de8e89a22f7e00e012f8b596f8..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia on PC How to Download and Play with LDPlayer Emulator.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-How to Download and Play Bus Simulator Indonesia on PC with LDPlayer
-Bus Simulator Indonesia is a popular and realistic game that lets you experience what it's like to be a bus driver in Indonesia. You can design your own livery, drive in authentic Indonesian cities and places, honk your horn with the iconic "Om Telolet Om" sound, and enjoy high-quality graphics and gameplay. But what if you want to play Bus Simulator Indonesia on a bigger screen, with better performance, and more control options? That's where LDPlayer comes in. LDPlayer is a free and powerful Android emulator that allows you to play Android games on your PC. In this article, we will show you how to download and play Bus Simulator Indonesia on PC with LDPlayer.
-ldplayer download bus simulator indonesia
Download Zip ✵ https://urlin.us/2uSVbG
- What is Bus Simulator Indonesia?
-Bus Simulator Indonesia (aka BUSSID) is a simulation game developed by Maleo, an Indonesian game studio. It was released in 2017 and has been updated regularly with new features and improvements. According to the Google Play Store, it has over 100 million downloads and 4.2 stars rating.
-Features of Bus Simulator Indonesia
-Below are some of the top features of Bus Simulator Indonesia:
-
-- Design your own livery
-- Very easy and intuitive control
-- Authentic Indonesian cities and places
-- Indonesian Buses
-- Cool and fun honks
-- "Om Telolet Om!" (Uncle, honk your horn, uncle! )
-- High quality and detailed 3D graphics
-- No obstructive ads while driving
-- Leaderboard
-- Data saved online
-- Use your own 3D model using vehicle mod system
-- Online multiplayer convoy
-
- Why play Bus Simulator Indonesia on PC?
-Playing Bus Simulator Indonesia on PC has many advantages over playing it on mobile devices. Here are some of them:
-
-- You can enjoy a larger screen and better graphics quality.
-- You can use your keyboard and mouse for more precise and comfortable control.
-- You can customize your keymapping according to your preference.
-- You can run multiple instances of the game at the same time using LDPlayer's multi-instance feature.
-- You can avoid battery drain, overheating, and phone calls that interrupt your gameplay.
-
- What is LDPlayer?
-LDPlayer is a free Android emulator for Windows PC that allows you to play Android games and apps on your computer. It is based on Android 9 kernel and supports both 64-bit and 32-bit apps. It has many features that make it one of the best emulators for gaming.
-How to play Bus Simulator Indonesia on PC with LDPlayer
-Bus Simulator Indonesia PC version free download
-LDPlayer emulator for Bus Simulator Indonesia
-Bus Simulator Indonesia online multiplayer with LDPlayer
-Best settings for Bus Simulator Indonesia on LDPlayer
-Bus Simulator Indonesia mod apk download on LDPlayer
-LDPlayer Android emulator for Bus Simulator Indonesia Maleo
-Bus Simulator Indonesia graphics and FPS on LDPlayer
-How to install Bus Simulator Indonesia on PC using LDPlayer
-Bus Simulator Indonesia custom controls with LDPlayer
-Bus Simulator Indonesia livery design on PC with LDPlayer
-Bus Simulator Indonesia klakson om telolet om on LDPlayer
-Bus Simulator Indonesia leaderboards and data on LDPlayer
-How to update Bus Simulator Indonesia on PC with LDPlayer
-Bus Simulator Indonesia bug fixes and improvements on LDPlayer
-Bus Simulator Indonesia authentic Indonesian environment on PC with LDPlayer
-Bus Simulator Indonesia easy and intuitive control on LDPlayer
-Bus Simulator Indonesia high quality and detailed 3D graphics on PC with LDPlayer
-How to add music to Bus Simulator Indonesia on PC with LDPlayer
-Bus Simulator Indonesia vehicle mod system on PC with LDPlayer
-How to play Bus Simulator Indonesia offline on PC with LDPlayer
-Bus Simulator Indonesia tips and tricks on PC with LDPlayer
-How to use macros and scripts for Bus Simulator Indonesia on LDPlayer
-Bus Simulator Indonesia review and rating on PC with LDPlayer
-How to sync data between mobile and PC for Bus Simulator Indonesia with LDPlayer
-How to play Bus Simulator Indonesia in full screen mode on PC with LDPlayer
-How to record and share gameplay of Bus Simulator Indonesia on PC with LDPlayer
-How to use keyboard and mouse for Bus Simulator Indonesia on PC with LDPlayer
-How to customize interface and layout for Bus Simulator Indonesia on PC with LDPlayer
-How to run multiple instances of Bus Simulator Indonesia on PC with LDPlayer
-How to play Bus Simulator Indonesia in different languages on PC with LDPlayer
-How to change resolution and orientation for Bus Simulator Indonesia on PC with LDPlayer
-How to enable virtualization for better performance of Bus Simulator Indonesia on PC with LDPlayer
-How to fix lag and crash issues of Bus Simulator Indonesia on PC with LDPlayer
-How to access Google Play Store and Google Play Games for Bus Simulator Indonesia on PC with LDPlayer
-How to use gamepad or controller for Bus Simulator Indonesia on PC with LDPlayer
-How to enable smart keymapping for Bus Simulator Indonesia on PC with LDPlayer
-How to enable turbo mode for faster loading of Bus Simulator Indonesia on PC with LDPlayer
-How to enable root permission for advanced features of Bus Simulator Indonesia on PC with LDPlayer
-How to enable network bridge for better connectivity of Bus Simulator Indonesia on PC with LDPlayer
-How to enable eco mode for lower CPU usage of Bus Simulator Indonesia on PC with LDPlayer
-How to enable screenshot and screen recorder for capturing moments of Bus Simulator Indonesia on PC with LDPlayer
-How to enable operation recorder for automating tasks of Bus Simulator Indonesia on PC with LDPlayer
-How to enable sync settings for synchronizing preferences of Bus Simulator Indonesia across devices with LDPlayer
-How to enable game booster for optimizing performance of Bus Simulator Indonesia on PC with LDPlayer
-How to enable disk cleanup for freeing up space of Bus Simulator Indonesia on PC with LDPlayer
-How to enable app cloner for creating copies of Bus Simulator Indonesia on PC with LDPlayer
-How to enable app market for discovering more games like Bus Simulator Indonesia on PC with LDPlayer
- Features of LDPlayer
-Below are some of the top features of LDPlayer:
-
-- High performance and stability
-- Low CPU and GPU consumption
-- Graphic quality optimization
-- Custom controls and keymapping tool
-- Multi-instance and multi-instance sync
-- Macros and scripts
-- Data encryption in transit
-- Data deletion request
-- No data shared with third parties
-- Compatible with Hyper-V
- Why use LDPlayer to play Bus Simulator Indonesia on PC?
- Using LDPlayer to play Bus Simulator Indonesia on PC has many benefits, such as:
-
-- You can play the game smoothly and without lag, even on low-end PCs.
-- You can enjoy the game with high-resolution graphics and realistic sound effects.
-- You can customize your controls and keymapping to suit your play style and preferences.
-- You can use LDPlayer's features to enhance your gameplay, such as macros, scripts, multi-instance, and multi-instance sync.
-- You can play the game safely and securely, without worrying about data leakage or malware.
-
- How to download and install LDPlayer and Bus Simulator Indonesia on PC?
-Downloading and installing LDPlayer and Bus Simulator Indonesia on PC is very easy and simple. Just follow these steps:
- Step 1: Download LDPlayer from the official website
-Go to the official website of LDPlayer () and click on the "Download" button. You will see a pop-up window asking you to save the LDPlayer installer file. Choose a location where you want to save the file and click "Save". The file size is about 500 MB, so it may take some time depending on your internet speed.
- Step 2: Install LDPlayer on your PC
-Once the download is complete, locate the LDPlayer installer file and double-click on it. You will see a window asking you to choose the installation language. Select your preferred language and click "OK". Then, follow the instructions on the screen to complete the installation process. It may take a few minutes depending on your PC specifications.
- Step 3: Launch LDPlayer and search for Bus Simulator Indonesia on the Play Store
-After the installation is done, launch LDPlayer from your desktop or start menu. You will see the LDPlayer home screen with various icons and options. Click on the "Play Store" icon to open the Google Play Store app. You will need to sign in with your Google account or create a new one if you don't have one. Then, type "Bus Simulator Indonesia" in the search bar and hit enter. You will see a list of results related to your search query.
- Step 4: Install Bus Simulator Indonesia and enjoy the game
-Find the Bus Simulator Indonesia app from the list of results and click on it. You will see a page with more information about the app, such as screenshots, ratings, reviews, and description. Click on the "Install" button to start downloading and installing the app on your PC. The app size is about 300 MB, so it may take some time depending on your internet speed. Once the installation is complete, you can click on the "Open" button to launch the game. Alternatively, you can also find the game icon on your LDPlayer home screen or app drawer and click on it to start playing.
- Conclusion
-Bus Simulator Indonesia is a fun and realistic game that lets you experience what it's like to be a bus driver in Indonesia. You can design your own livery, drive in authentic Indonesian cities and places, honk your horn with the iconic "Om Telolet Om" sound, and enjoy high-quality graphics and gameplay. However, playing Bus Simulator Indonesia on mobile devices may not give you the best gaming experience due to small screen size, limited control options, low performance, battery drain, overheating, phone calls, etc. That's why we recommend you to play Bus Simulator Indonesia on PC with LDPlayer, a free and powerful Android emulator that allows you to play Android games on your computer with larger screen size, better graphics quality, more control options, higher performance, and more features. In this article, we have shown you how to download and play Bus Simulator Indonesia on PC with LDPlayer in four easy steps. We hope you find this article helpful and enjoy playing Bus Simulator Indonesia on PC with LDPlayer.
- FAQs
-Here are some frequently asked questions about playing Bus Simulator Indonesia on PC with LDPlayer:
- Q: Is LDPlayer safe to use?
-A: Yes, LDPlayer is safe to use. It does not contain any malware or virus that can harm your PC or data. It also does not share your data with any third parties without your consent . You can use LDPlayer with confidence and peace of mind.
- Q: Is LDPlayer free to use?
-A: Yes, LDPlayer is free to use. You don't have to pay anything to download or use LDPlayer. However, some optional features may require payment or subscription, such as removing ads or unlocking premium features . You can choose whether to use these features or not according to your needs.
Q: How can I update LDPlayer and Bus Simulator Indonesia on PC?
-A: To update LDPlayer, you can go to the LDPlayer settings and click on the "Check for updates" button. You will see a pop-up window telling you whether there is a new version available or not. If there is, you can click on the "Update" button to download and install the latest version of LDPlayer. To update Bus Simulator Indonesia, you can go to the Play Store app and search for Bus Simulator Indonesia. You will see a page with more information about the app, such as screenshots, ratings, reviews, and description. If there is an update available, you will see an "Update" button next to the "Open" button. You can click on the "Update" button to download and install the latest version of Bus Simulator Indonesia.
- Q: How can I use LDPlayer's features to enhance my gameplay of Bus Simulator Indonesia?
-A: LDPlayer has many features that can enhance your gameplay of Bus Simulator Indonesia, such as macros, scripts, multi-instance, and multi-instance sync. Macros and scripts allow you to automate certain actions or commands in the game, such as honking, braking, accelerating, etc. You can record your own macros or scripts using LDPlayer's built-in tool, or import them from other sources. Multi-instance and multi-instance sync allow you to run multiple instances of the game at the same time on your PC, and synchronize your actions across all instances. This way, you can play with multiple accounts or characters, or join online multiplayer convoys with yourself.
- Q: How can I contact LDPlayer's customer service if I have any questions or issues?
-A: If you have any questions or issues regarding LDPlayer or Bus Simulator Indonesia, you can contact LDPlayer's customer service through various channels, such as email, Facebook, Twitter, Discord, Reddit, YouTube, etc. You can find the contact information on the official website of LDPlayer () or on the LDPlayer app itself. You can also check out the FAQ section or the blog section on the website for more information and tips.
- Q: How can I share my feedback or suggestions about LDPlayer or Bus Simulator Indonesia?
-A: We appreciate your feedback and suggestions about LDPlayer or Bus Simulator Indonesia. You can share your thoughts with us through various channels, such as email, Facebook, Twitter, Discord, Reddit, YouTube, etc. You can also leave a comment or a review on the Play Store app or on the official website of LDPlayer (). Your feedback and suggestions will help us improve our products and services and provide you with a better gaming experience.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el secreto de Clash Royale Todo Infinito APK fcil y rpido.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el secreto de Clash Royale Todo Infinito APK fcil y rpido.md
deleted file mode 100644
index ca63ded1bb90a8466e066d510605b8aadccc6182..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el secreto de Clash Royale Todo Infinito APK fcil y rpido.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-Clash Royale Todo Infinito Apk: What Is It and How to Get It?
-If you are a fan of Clash Royale, you might have heard of Clash Royale Todo Infinito Apk. This is a modified version of the game that gives you unlimited resources, such as gems, gold, and cards. But what exactly is Clash Royale, and how can you get this apk? In this article, we will answer these questions and give you some tips and tricks for playing the game.
-What is Clash Royale?
-Clash Royale is a popular mobile game developed and published by Supercell, the same company behind Clash of Clans. It is a real-time multiplayer battle game that combines elements of collectible card games and strategy games. Here are some of the main features of the game:
-clash royale todo infinito apk
Download ✔✔✔ https://urlin.us/2uT29L
-A real-time multiplayer battle game
-In Clash Royale, you can challenge players from around the world in fast-paced duels. The goal is to destroy your opponent's three towers, or at least more towers than they do, before the time runs out. You can use a variety of units, spells, and buildings to attack and defend. Each match lasts for three minutes, or longer if there is a tie.
-A collectible card game
-Clash Royale features over 100 cards that represent different troops, spells, and buildings from the Clash universe. You can collect and upgrade these cards by winning battles, opening chests, or buying them with gems or gold. You can also create your own battle deck with eight cards that suit your play style and strategy.
-A strategic game
-Clash Royale is not just about spamming cards on the battlefield. You need to think carefully about when and where to place your cards, how to counter your opponent's moves, and how to manage your elixir. Elixir is the resource that you use to play cards, and it regenerates over time. You also need to consider the strengths and weaknesses of each card, as well as their synergies and interactions.
-What is Clash Royale Todo Infinito Apk?
-Clash Royale Todo Infinito Apk is a modified version of the game that gives you access to unlimited resources. This means that you can get as many gems, gold, and cards as you want without spending any money or time. You can also unlock all the arenas, chests, and features that are normally restricted by your level or progress. Here are some of the benefits and risks of using this apk:
-The benefits of using it
-
-- You can enjoy the game without any limitations or frustrations.
-- You can experiment with different cards and decks without worrying about wasting resources.
-- You can dominate your opponents with powerful cards and strategies.
-- You can have more fun and excitement in the game.
-
-The risks of using it
-
-- You might lose the challenge and thrill of the game.
-- You might get bored or lose interest in the game.
-- You might face technical issues or errors in the game.
-- You might get banned or suspended by Supercell for violating their terms of service.
-
-How to get Clash Royale Todo Infinito Apk?
-If you want to try Clash Royale Todo Infinito Apk, you need to follow these steps:
-Download from a reliable source
-There are many websites that claim to offer Clash Royale Todo Infinito Apk, but not all of them are safe or trustworthy. Some of them might contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before downloading anything. You can check the reviews, ratings, and comments of other users to see if the apk is reliable and working. You can also use antivirus software or scan the apk file before installing it.
-Install on your device
-Once you have downloaded the apk file, you need to install it on your device. However, you might encounter some issues or errors during the installation process. This is because Clash Royale Todo Infinito Apk is not an official version of the game, and it might not be compatible with your device or operating system. To fix this, you need to enable the unknown sources option on your device settings. This will allow you to install apps from sources other than the Google Play Store. You can also uninstall the original version of Clash Royale if you have it on your device, as it might cause conflicts or crashes with the apk.
-clash royale mod apk unlimited gems and coins
-clash royale hack apk download for android
-clash royale private server apk with all cards unlocked
-clash royale apk mod menu with god mode
-clash royale latest version apk free download
-clash royale cheat apk no root no survey
-clash royale online generator apk without human verification
-clash royale modded apk by master royale infinity
-clash royale apk atualizado 2023 com tudo infinito
-clash royale apk mediafire link download 2023
-clash royale apk modificado com gemas e ouro infinito
-clash royale hackeado apk descargar gratis para android
-clash royale servidor privado apk con todos los personajes desbloqueados
-clash royale apk mod menu con modo dios
-clash royale ultima version apk descargar gratis
-clash royale truco apk sin root sin encuesta
-clash royale generador online apk sin verificacion humana
-clash royale modificado apk por master royale infinity
-clash royale descargar master royale infinity 3.2729 apk gratis para android
-clash royale novo apk mod do clash royale todo infinito youtube
-clash royale download the apk from uptodown for android
-clash royale unlimited resources apk for android and ios
-clash royale best private server apk with custom cards and skins
-clash royale hack tool apk no password no jailbreak
-clash royale working cheats apk 2023 updated daily
-clash royale free gems and gold apk without offers or surveys
-clash royale cracked apk by nulls royal latest version
-clash royale baixar master royale infinity 3.2729 apk gratis para android
-clash royale novo apk mod do clash royale tudo infinito mediafire
-clash royale baixar o apk do uptodown para android
-clash royale recursos ilimitados apk para android e ios
-clash royale melhor servidor privado apk com cartas e skins personalizados
-clash royale ferramenta de hackear apk sem senha sem jailbreak
-clash royale truques funcionando apk 2023 atualizado diariamente
-clash royale gemas e ouro gratis apk sem ofertas ou pesquisas
-clash royale apk rachado por nulls royal ultima versao
-descargar master royal infinity 3.2729 APK gratis para Android - Malavida.com/clashroyal/
-nuevo APK mod de Clash Royale todo infinito YouTube - YouTube.com/watch?v=EOCZMUdAql4
-descargar el APK de Uptodown para Android - Clash-Royal.en.uptodown.com/android
-recursos ilimitados APK para Android y iOS - Clash-Royals.com/unlimited-resources-apk/
-mejor servidor privado APK con cartas y skins personalizados - Clash-Royals.net/best-private-server-apk/
-herramienta de hackear APK sin contraseña sin jailbreak - Clash-Royals.org/hack-tool-apk/
-trucos funcionando APK 2023 actualizado diariamente - Clash-Royals.info/working-cheats-apk/
-gemas y oro gratis APK sin ofertas o encuestas - Clash-Royals.co/free-gems-and-gold-apk/
-APK agrietado por nulls royal última versión - Clash-Royals.io/cracked-apk-by-nulls/
-Enjoy the game
-After installing the apk, you can launch the game and enjoy the unlimited resources and features. You can create your own custom deck with any cards you want, unlock all the chests and arenas, and challenge anyone in the game. You can also join a clan and share cards with other players who use the same apk. However, you should be aware that using Clash Royale Todo Infinito Apk might affect your game experience and performance. You might face lag, glitches, or bugs in the game. You might also lose your progress or account if Supercell detects that you are using a modified version of the game.
-Tips and tricks for playing Clash Royale
-Whether you use Clash Royale Todo Infinito Apk or not, there are some tips and tricks that can help you improve your skills and win more battles in Clash Royale. Here are some of them:
-Join a clan and share cards
-One of the best ways to progress faster and get more cards in Clash Royale is to join a clan and share cards with other members. You can request and donate cards every day, which will give you gold and experience points. You can also chat with your clanmates, ask for advice, and practice with friendly battles. You can also participate in clan wars and clan chest events, which will give you more rewards and fun.
-Attack in pairs and use combos
-Another important tip for playing Clash Royale is to attack in pairs and use combos. This means that you should not play your cards one by one, but rather combine them to create powerful attacks and defenses. For example, you can pair a tank unit like a giant or a golem with a support unit like a wizard or a musketeer behind it. This will create a strong push that can deal a lot of damage to your opponent's towers. You can also use spells like fireball or zap to support your units or counter your opponent's units.
-Be patient and count elixir
-The last tip for playing Clash Royale is to be patient and count elixir. This means that you should not rush into attacking or defending without thinking first. You should wait for the right moment to play your cards, depending on the situation and your elixir advantage. Elixir advantage is the difference between your elixir and your opponent's elixir at any given time. You can gain elixir advantage by playing cheaper cards than your opponent, by making positive elixir trades (using less elixir to counter more elixir), or by letting your opponent waste their elixir on unnecessary moves.
-Conclusion
-Clash Royale is a fun and addictive game that combines real-time multiplayer battles, collectible card games, and strategy games. Clash Royale Todo Infinito Apk is a modified version of the game that gives you unlimited resources and features. However, it also comes with some risks and drawbacks that might affect your game experience and performance. If you want to try it, you need to download it from a reliable source, install it on your device, and enjoy the game. You can also follow some tips and tricks to improve your skills and win more battles in Clash Royale.
-FAQs
-
-- What is the difference between Clash Royale Todo Infinito Apk and Clash Royale Mod Apk?
-- A: Clash Royale Todo Infinito Apk and Clash Royale Mod Apk are both modified versions of Clash Royale that give you unlimited resources and features. However they might have different sources, versions, or features. For example, some Clash Royale Mod Apks might have custom servers, private servers, or unlimited chests, while others might not. You should always check the details and specifications of the apk before downloading it.
-- Is Clash Royale Todo Infinito Apk safe to use?
-- A: Clash Royale Todo Infinito Apk is not an official version of the game, and it might not be safe to use. It might contain viruses, malware, or spyware that can harm your device or steal your personal information. It might also cause technical issues or errors in the game. It might also get you banned or suspended by Supercell for violating their terms of service. Therefore, you should use it at your own risk and discretion.
-- Can I play Clash Royale Todo Infinito Apk with my friends who use the original version of the game?
-- A: No, you cannot play Clash Royale Todo Infinito Apk with your friends who use the original version of the game. This is because Clash Royale Todo Infinito Apk uses a different server and database than the original version of the game. Therefore, you can only play with other players who use the same apk as you.
-- How can I update Clash Royale Todo Infinito Apk?
-- A: Clash Royale Todo Infinito Apk does not update automatically like the original version of the game. You need to manually download and install the latest version of the apk from a reliable source whenever there is a new update. However, you might lose your progress or account if you update the apk, as it might not be compatible with the previous version.
-- Where can I find more information about Clash Royale Todo Infinito Apk?
-- A: You can find more information about Clash Royale Todo Infinito Apk on various websites, blogs, forums, or social media platforms that are dedicated to Clash Royale or mobile gaming. You can also watch videos, tutorials, or reviews of the apk on YouTube or other streaming platforms. However, you should always verify the credibility and accuracy of the information before trusting it.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing Lite Mod APK with Unlimited Money and Unlocked Features.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing Lite Mod APK with Unlimited Money and Unlocked Features.md
deleted file mode 100644
index 4a88d1b67cca889db8aff65831b58e6ada650e8c..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing Lite Mod APK with Unlimited Money and Unlocked Features.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-CarX Drift Racing Lite Mod APK: A Fun and Free Racing Game
-If you are a fan of car racing games, you might have heard of CarX Drift Racing Lite. It is a popular game that lets you experience the thrill of drifting on different tracks with realistic physics and graphics. But what if you want to enjoy the game without any limitations or interruptions? That's where CarX Drift Racing Lite Mod APK comes in handy. In this article, we will tell you what CarX Drift Racing Lite is, how to download and install the mod apk version, and why you should use it.
-carx drift racing lite mod apk happymod
Download Zip »»» https://urlin.us/2uSV3T
- What is CarX Drift Racing Lite?
-CarX Drift Racing Lite is a racing game developed by CarX Technologies. It is a lite version of the original CarX Drift Racing game, which means it has fewer features and content, but it is also more suitable for low-end devices. The game allows you to choose from different cars and tracks, customize your vehicle, and compete with other players online or offline. You can also earn coins by drifting and use them to upgrade your car or unlock new scenes.
- Features of CarX Drift Racing Lite
-CarX Drift Racing Lite has many features that make it an enjoyable and addictive game for racing enthusiasts. Here are some of them:
- Realistic physics and graphics
-The game uses a sophisticated car physics engine that simulates the behavior of real cars on different surfaces and conditions. You can feel the difference between asphalt, grass, sand, or snow, as well as the impact of speed, weight, and inertia on your car's performance. The game also has stunning graphics that create a realistic atmosphere for your racing experience. You can see the smoke, dust, sparks, and skid marks as you drift on the tracks.
- Customizable cars and tracks
-The game offers you a variety of cars to choose from, each with its own characteristics and specifications. You can also customize your car's appearance, color, wheels, engine, suspension, and more. You can also choose from different tracks that have different layouts, obstacles, and weather effects. You can even create your own tracks using the track editor feature.
- Leaderboards and achievements
-The game has a competitive mode that lets you challenge other players online or offline. You can see your ranking on the global or local leaderboards, as well as your personal statistics and records. You can also earn achievements by completing various tasks and challenges in the game.
-carx drift racing lite unlimited money mod apk
-carx drift racing lite hack apk download
-carx drift racing lite mod apk latest version
-carx drift racing lite unlocked cars mod apk
-carx drift racing lite mod apk android 1
-carx drift racing lite mod apk revdl
-carx drift racing lite mod apk free download
-carx drift racing lite mod apk offline
-carx drift racing lite mod apk no root
-carx drift racing lite mod apk obb
-carx drift racing lite mod apk unlimited coins
-carx drift racing lite mod apk 1.1
-carx drift racing lite mod apk rexdl
-carx drift racing lite mod apk pure
-carx drift racing lite mod apk 2023
-carx drift racing lite mod apk happymod.com
-carx drift racing lite mod apk all cars unlocked
-carx drift racing lite mod apk unlimited everything
-carx drift racing lite mod apk for pc
-carx drift racing lite mod apk ios
-carx drift racing lite cheat codes mod apk
-carx drift racing lite hack tool mod apk
-carx drift racing lite premium mod apk
-carx drift racing lite pro mod apk
-carx drift racing lite full version mod apk
-carx drift racing lite mega mod apk
-carx drift racing lite vip mod apk
-carx drift racing lite gold mod apk
-carx drift racing lite cracked mod apk
-carx drift racing lite original mod apk
-download game carx drift racing lite mod apk
-how to install carx drift racing lite mod apk
-cara download carx drift racing lite mod apk
-descargar carx drift racing lite mod apk
-telecharger carx drift racing lite mod apk
-baixar carx drift racing lite mod apk
-indir carx drift racing lite mod apk
-scaricare carx drift racing lite mod apk
-unduh carx drift racing lite mod apk
-скачать carx drift racing lite мод апк
- How to download and install CarX Drift Racing Lite Mod APK?
-If you want to download and install CarX Drift Racing Lite Mod APK, you need to follow these simple steps:
- Download the mod apk file from a trusted source
-You can find many websites that offer CarX Drift Racing Lite Mod APK files for free download. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful when choosing a source for downloading the mod apk file. One of the trusted sources that we recommend is HappyMod. It is a platform that provides modded versions of various games and apps with 3x speed download and verified by users.
- Enable unknown sources on your device
-Before you can install the mod apk file, you need to enable unknown sources on your device. This is a security setting that prevents the installation of apps from sources other than the official app store. To enable unknown sources, you need to go to your device's settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". This may vary depending on your device model and Android version.
- Install the mod apk file and enjoy the game
-Once you have downloaded the mod apk file and enabled unknown sources, you can install the mod apk file by tapping on it and following the instructions. After the installation is complete, you can launch the game and enjoy the modded features. You don't need to uninstall the original game or create a new account. You can use your existing account and data with the mod apk version.
- Why use CarX Drift Racing Lite Mod APK?
-You might be wondering why you should use CarX Drift Racing Lite Mod APK instead of the original game. Well, there are many benefits of using the mod apk version that will enhance your gaming experience. Here are some of them:
- Benefits of using the mod apk version
-The mod apk version of CarX Drift Racing Lite has many advantages over the original game. Some of them are:
- Unlimited coins and unlocked scenes
-One of the main benefits of using the mod apk version is that you get unlimited coins and unlocked scenes in the game. Coins are the currency that you use to buy and upgrade cars, as well as unlock new tracks and scenes. Normally, you have to earn coins by drifting or watching ads, which can be time-consuming and annoying. But with the mod apk version, you get unlimited coins that you can spend as you wish. You also get access to all the scenes that are otherwise locked in the original game. You can enjoy different environments and challenges without any restrictions.
- No ads and no root required
-Another benefit of using the mod apk version is that you don't have to deal with any ads or root your device. Ads are annoying and distracting, especially when they pop up in the middle of your game. They can also consume your data and battery. But with the mod apk version, you don't have to worry about any ads interrupting your game. You can play smoothly and peacefully without any interruptions. You also don't need to root your device to use the mod apk version. Rooting is a process that gives you full control over your device, but it also voids your warranty and exposes your device to risks. But with the mod apk version, you don't need to root your device at all. You can use it safely and easily without any complications.
- Compatible with most devices and easy to use
-The last benefit of using the mod apk version is that it is compatible with most devices and easy to use. The mod apk version is optimized for low-end devices, which means it runs smoothly and efficiently on most Android devices. You don't need a high-end device or a lot of storage space to play the game. You also don't need any special skills or knowledge to use the mod apk version. You just need to follow the simple steps mentioned above and you are good to go.
- Conclusion
-CarX Drift Racing Lite is a fun and free racing game that lets you drift on different tracks with realistic physics and graphics. It has many features that make it an enjoyable and addictive game for racing enthusiasts. However, if you want to enjoy the game without any limitations or interruptions, you should use CarX Drift Racing Lite Mod APK. It is a modified version of the original game that gives you unlimited coins, unlocked scenes, no ads, no root required, and compatibility with most devices. You can download and install it easily from HappyMod and enjoy the game with more fun and freedom.
- I hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
- FAQs
-Here are some frequently asked questions about CarX Drift Racing Lite Mod APK:
-
-- Is CarX Drift Racing Lite Mod APK safe?
-Yes, CarX Drift Racing Lite Mod APK is safe to use as long as you download it from a trusted source like HappyMod. It does not contain any viruses, malware, or spyware that can harm your device or steal your personal information.
-- Is CarX Drift Racing Lite Mod APK legal?
-Yes, CarX Drift Racing Lite Mod APK is legal to use as long as you don t use it for any illegal or unethical purposes, such as cheating, hacking, or pirating. You should also respect the rights and interests of the original developers and publishers of the game.
-- Can I update CarX Drift Racing Lite Mod APK?
-Yes, you can update CarX Drift Racing Lite Mod APK whenever there is a new version available. However, you need to download and install the new mod apk file from the same source that you used before. You don't need to uninstall the previous mod apk file or lose your data. You can simply overwrite the old file with the new one and enjoy the updated features.
-- Can I play CarX Drift Racing Lite Mod APK online?
-Yes, you can play CarX Drift Racing Lite Mod APK online with other players. However, you need to be careful when playing online, as some players may report you for using the mod apk version. This may result in your account being banned or suspended by the game's servers. Therefore, you should use the mod apk version at your own risk and discretion when playing online.
-- Can I use CarX Drift Racing Lite Mod APK on iOS devices?
-No, CarX Drift Racing Lite Mod APK is only compatible with Android devices. It is not available for iOS devices, such as iPhones or iPads. If you want to play CarX Drift Racing Lite on iOS devices, you need to download and install the original game from the App Store.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Age of History APK - Download the Best Strategy Game for Android.md b/spaces/1phancelerku/anime-remove-background/Age of History APK - Download the Best Strategy Game for Android.md
deleted file mode 100644
index 5155c2fbcfcaeaa35e4cd014f96c3c2b9a730a5b..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Age of History APK - Download the Best Strategy Game for Android.md
+++ /dev/null
@@ -1,155 +0,0 @@
-
-Age of History APK Indir: A Grand Strategy Game for Android
- If you are a fan of strategy games and history, you might want to check out Age of History APK indir, a grand strategy wargame that is simple to learn yet hard to master. In this game, you can use military tactics and cunning diplomacy to either unify the world or conquer it. You can also create your own history using in-game editors and play with as many players as civilizations in scenario. In this article, we will tell you everything you need to know about this game, including how to download and install it, how to play it, and how it compares to other strategy games.
-age of history apk indir
Download Zip ✫ https://jinyurl.com/2uNLz0
- What is Age of History?
- Age of History is a turn-based strategy game that covers the whole history of humanity, from the dawn of civilization to the far future. You can play as many civilizations ranging from the largest empire to the smallest tribe, and lead your people to glory in a campaign spanning thousands of years. You can also choose from different ages, such as the Age of Civilizations, the Age of Feudalism, the Age of Industrialization, the Age of World War II, and more.
- One of the most interesting features of this game is that it lets you create your own scenarios and civilizations using in-game editors. You can customize everything from the map, the provinces, the terrain types, the growth rate, the cities, the flags, the alliances, the diplomacy colors, and more. You can also share your creations with other players online and download their scenarios as well.
- Another feature that makes this game unique is that it has two maps: Earth and Kepler-22b. Earth is a detailed map of the world with many historical borders and realistic geography. Kepler-22b is a fictional planet that has 404 provinces and different terrain types. You can explore both maps and see how different civilizations interact with each other.
- How to download and install Age of History APK?
- If you want to play this game on your Android device, you will need to download and install its APK file from a reliable source. One of the best sources for this is APKCombo, a website that offers free APK downloads for various apps and games. Here are the steps to download and install Age of History APK from APKCombo:
-
-- Go to https://apkcombo.com/tr/age-of-history-ii/age.of.civilizations2.jakowski.lukasz.lite/ on your browser.
-- Click on the green "Download APK" button on the page.
-- Select your preferred version (the latest one is recommended) and click on "Download".
-- Wait for the download to finish and then open the APK file on your device.
-- Follow the instructions on your screen to install the game on your device.
-
- Some of the requirements and permissions for the game are:
-
-- Android version: 4.4 or higher
-- Storage: 87 MB or more
-- Internet: required for online features
-- Other permissions: access network state, access wifi state, internet, read external storage, write external storage
-
- Some of the advantages of downloading the game from APKCombo are:
-
-- You can get the latest version of the game without waiting for updates from Google Play Store.
-- You can download the game for free without any ads or in-app purchases.
-- You can download the game safely and securely without any viruses or malware.
-
- How to play Age of History?
- Age of History is a game that requires both strategic thinking and historical knowledge. You will need to manage your economy, your military, your diplomacy, and your population as you expand your territory and influence. Here are some of the basics of the game that you should know before you start playing:
- The game is divided into turns, each representing a certain amount of time depending on the age you are playing. Each turn, you can issue orders to your provinces using movement points (MP). You can move your troops, build buildings, recruit units, declare war, make peace, form alliances, and more. You can also use diplomacy points (DP) to influence other civilizations and improve your relations with them. You can also use gold to buy more MP or DP, or to bribe other leaders.
-age of history apk download free
-age of history apk full version
-age of history apk mod unlimited money
-age of history apk android oyun club
-age of history apk latest version
-age of history apk hile indir
-age of history apk obb
-age of history apk revdl
-age of history apk hack
-age of history apk 1.1548a
-age of history apk uptodown
-age of history apk rexdl
-age of history apk pure
-age of history apk 2023
-age of history apk para hilesi
-age of history apk pc
-age of history apk tam sürüm indir
-age of history apk son sürüm indir
-age of history apk güncel indir
-age of history apk bedava indir
-age of history apk mega hile indir
-age of history apk altın hilesi indir
-age of history apk kurulumu
-age of history apk nasıl indirilir
-age of history apk nasıl yüklenir
-age of history lite apk indir
-age of history 2 apk indir
-age of history europe apk indir
-age of history world war 2 apk indir
-age of history world war 1 apk indir
-age of history civil war apk indir
-age of history asia apk indir
-age of history africa apk indir
-age of history america apk indir
-age of civilizations 2 apk indir
-age of civilizations 2 lite apk indir
-age of civilizations 2 europe apk indir
-age of civilizations 2 world war 2 apk indir
-age of civilizations 2 world war 1 apk indir
-age of civilizations 2 civil war apk indir
-age of civilizations 2 asia apk indir
-age of civilizations 2 africa apk indir
-age of civilizations 2 america apk indir
-download game age of history mod apk android 1.com
-download game android offline mod terbaik - Age Of History APK
-download game perang offline mod - Age Of History APK
-download game strategi offline mod - Age Of History APK
-download game sejarah offline mod - Age Of History APK
- The combat system in the game is based on dice rolls and modifiers. Each unit has a certain attack and defense value, as well as a morale value that affects its performance. When two units clash, they roll dice to determine the outcome of the battle. The modifiers depend on factors such as terrain type, fortification level, technology level, and leader skill. The winner of the battle is the one who inflicts more casualties on the enemy or forces them to retreat. The loser may lose some units or provinces as a result.
- Some of the tips and tricks for playing the game effectively are:
-
-- Plan ahead and prioritize your goals. Do you want to conquer the world or unify your region? Do you want to focus on military or economic development? Do you want to ally with other civilizations or go solo?
-- Balance your budget and resources. Do not spend more than you earn or you will go into debt. Do not overextend your army or you will lose morale and supply. Do not neglect your population or you will face rebellions and unrest.
-- Research new technologies and upgrade your units. Technology is the key to progress and power in this game. You can research new technologies using research points (RP) that you gain from buildings, events, or achievements. You can also upgrade your units using gold or RP to improve their stats and abilities.
-- Explore the map and discover new lands and civilizations. The map is full of secrets and surprises that can benefit or harm you. You can find new resources, events, wonders, relics, and more. You can also encounter new civilizations that can be friendly or hostile to you.
-
- Some of the features and modes of the game are:
-
-- Historical grand campaign: This is the main mode of the game where you can play as any civilization from any age and try to achieve your objectives.
-- Scenario editor: This is where you can create your own scenarios using in-game editors. You can customize everything from the map, the provinces, the terrain types, the growth rate, the cities, the flags, the alliances, the diplomacy colors, and more.
-- Civilization creator: This is where you can create your own civilizations using in-game editors. You can customize everything from the name, the flag, the color, the leader, the ideology, the government type, the religion, the culture, and more.
-- Sandbox mode: This is where you can play with as many players as civilizations in scenario. You can set up your own rules and conditions for the game.
-- Online mode: This is where you can play with other players online using multiplayer servers. You can chat with them, cooperate with them, or compete with them.
-
- How does Age of History compare to other strategy games?
- Age of History is a game that has many similarities and differences with other strategy games in terms of gameplay, graphics, sound, and content. Here are some of them:
- The similarities and differences between Age of History and Age of Empires
- Age of Empires is a real-time strategy game that covers different historical periods from the Stone Age to the Iron Age. You can play as different civilizations and build your empire by collecting resources, training units, constructing buildings, and fighting enemies. You can also advance through different ages and unlock new technologies and units.
- Some of the similarities between Age of History and Age of Empires are:
-
-- Both games are strategy games that involve historical civilizations and scenarios.
-- Both games have different ages and technologies that affect the gameplay and the units.
-- Both games have editors that allow you to create your own maps and scenarios.
-
- Some of the differences between Age of History and Age of Empires are:
-
-- Age of History is a turn-based game while Age of Empires is a real-time game.
-- Age of History covers the whole history of humanity while Age of Empires covers only a few historical periods.
-- Age of History has more civilizations and provinces than Age of Empires.
-
- The similarities and differences between Age of History and Hearts of Iron IV
- Hearts of Iron IV is a grand strategy game that focuses on the World War II era. You can play as any country in the world and lead it to victory or defeat in the global conflict. You can also customize your country's political, economic, military, and diplomatic aspects. You can also join or create factions, declare war, make peace, research new technologies, and more.
- Some of the similarities between Age of History and Hearts of Iron IV are:
-
-- Both games are grand strategy games that involve historical scenarios and events.
-- Both games have a detailed map of the world with many provinces and regions.
-- Both games have a complex combat system that involves dice rolls, modifiers, and morale.
-
- Some of the differences between Age of History and Hearts of Iron IV are:
-
-- Age of History covers the whole history of humanity while Hearts of Iron IV covers only the World War II era.
-- Age of History has more civilizations and ages than Hearts of Iron IV.
-- Hearts of Iron IV has more features and mechanics than Age of History, such as air warfare, naval warfare, espionage, resistance, supply lines, etc.
-
- The similarities and differences between Age of History and Civilization VI
- Civilization VI is a turn-based strategy game that lets you build your own civilization from scratch. You can choose from different leaders and civilizations, each with their own unique abilities and bonuses. You can also explore the map, found cities, develop districts, build wonders, research technologies, adopt policies, engage in diplomacy, wage war, and more. You can also win the game by achieving one of several victory conditions, such as science, culture, religion, domination, or diplomacy.
- Some of the similarities between Age of History and Civilization VI are:
-
-- Both games are turn-based strategy games that involve historical civilizations and leaders.
-- Both games have different ages and technologies that affect the gameplay and the units.
-- Both games have diplomacy points and options that allow you to interact with other civilizations.
-
- Some of the differences between Age of History and Civilization VI are:
-
-- Age of History covers the whole history of humanity while Civilization VI covers only a few historical periods.
-- Civilization VI has more features and mechanics than Age of History, such as city management, district placement, wonder construction, policy cards, religion system, loyalty system, etc.
-- Civilization VI has different victory conditions while Age of History has only one: world domination.
-
- Conclusion
- In conclusion, Age of History APK indir is a grand strategy game for Android that covers the whole history of humanity, from the dawn of civilization to the far future. You can play as any civilization and lead it to glory or ruin in a campaign spanning thousands of years. You can also create your own scenarios and civilizations using in-game editors and share them with other players online. You can also explore two maps: Earth and Kepler-22b, and see how different civilizations interact with each other. Age of History is a game that is simple to learn yet hard to master, and it will challenge your strategic thinking and historical knowledge. If you are looking for a game that combines history, strategy, and creativity, you should definitely give Age of History a try.
- If you want to download and install Age of History APK on your Android device, you can do so easily and safely from APKCombo, a website that offers free APK downloads for various apps and games. You can get the latest version of the game without any ads or in-app purchases, and enjoy its features and modes without any hassle. You can also compare this game to other strategy games, such as Age of Empires, Hearts of Iron IV, and Civilization VI, and see how it differs from them in terms of gameplay, graphics, sound, and content.
- So what are you waiting for? Download Age of History APK indir today and start creating your own history!
- Five unique FAQs about the game
-
-- Q: How many civilizations are there in Age of History?
A: There are over 250 civilizations in the game, each with their own flag, leader, ideology, government type, religion, culture, and more.
-- Q: How can I change the language of the game?
A: You can change the language of the game from the settings menu. The game supports 11 languages: English, Polish, French, German, Russian, Spanish, Portuguese, Turkish, Italian, Chinese, and Japanese.
-- Q: How can I play with other players online?
A: You can play with other players online using multiplayer servers. You can join or create a server from the online mode menu. You can also chat with other players using the chat feature.
-- Q: How can I save and load my game progress?
A: You can save and load your game progress from the pause menu. You can have up to 10 save slots for each scenario. You can also autosave your game every turn or every 10 turns.
-- Q: How can I get more gold, MP, DP, or RP in the game?
A: You can get more gold by collecting taxes from your provinces or by trading with other civilizations. You can get more MP by building roads or ports in your provinces or by researching new technologies. You can get more DP by improving your relations with other civilizations or by completing achievements. You can get more RP by building universities or libraries in your provinces or by researching new technologies.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Tag After School APK for Android - ThaiAPK.md b/spaces/1phancelerku/anime-remove-background/Download Tag After School APK for Android - ThaiAPK.md
deleted file mode 100644
index 9a582f12105eccce3b6cfe02d9ba01a785f82e1e..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Tag After School APK for Android - ThaiAPK.md
+++ /dev/null
@@ -1,150 +0,0 @@
-
-Tag After School APK: A Horror School Life Simulation Game
-If you are looking for a thrilling and exciting game that combines horror, romance, and mystery, then you should try Tag After School APK. This is a game developed by Genius Studio Japan Inc., a company that specializes in creating anime-style games for Android devices. In this game, you will play as Shota-Kun, a high school student who gets involved in a deadly game of tag with ghostly girls. You will have to make choices that will affect the outcome of the story and your relationships with the girls. Are you ready to face the horrors of Tag After School APK? Read on to find out more about this game.
-tag after school apk thaiapk
Download Zip --->>> https://jinyurl.com/2uNK1K
- What is Tag After School APK?
-Tag After School APK is a horror school life simulation game that was released in January 2023. It is available for free download on various websites, such as ThaiAPK, APKCombo, and others. The game has an age rating of 18+, as it contains mature visuals and themes that are not suitable for younger audiences. The game has a file size of about 100 MB, and it requires Android 5.0 or higher to run smoothly.
- The story and the characters of Tag After School APK
-The game follows the story of Shota-Kun, a normal high school student who has a crush on his childhood friend, Yui-Chan. One day, he decides to confess his feelings to her after school, but he gets interrupted by a mysterious voice that invites him to play a game of tag. He soon realizes that he is trapped in a haunted school with four ghostly girls who are after him. He has to survive until dawn by hiding from them or fighting them back. However, he also discovers that each girl has a tragic backstory that explains why they became ghosts. He can choose to help them or ignore them, depending on his actions and decisions.
-The four ghostly girls are:
-
-- Ayumi-Chan: She is the first girl that Shota-Kun encounters in the game. She is a cheerful and energetic girl who loves sports and music. She died in a car accident while going to a concert with her friends.
-- Miyuki-Chan: She is the second girl that Shota-Kun meets in the game. She is a shy and timid girl who loves books and animals. She died from an illness that made her unable to breathe properly.
-- Sakura-Chan: She is the third girl that Shota-Kun runs into in the game. She is a sweet and kind girl who loves flowers and gardening. She died from a fire that burned down her house.
-- Rin-Chan: She is the fourth and final girl that Shota-Kun faces in the game. She is a cold and aloof girl who hates everyone and everything. She died from suicide after being bullied at school.
-
- The gameplay and the features of Tag After School APK
-The game is divided into several chapters, each focusing on one of the ghostly girls. The game has two modes: story mode and free mode. In story mode, you will follow the main plot and make choices that will affect the ending of each chapter. You will also have to interact with the girls by talking to them, giving them gifts, or fighting them. In free mode, you can replay any chapter you have completed and explore different outcomes.
-The game also has several features that make it more enjoyable and challenging, such as: Tag After School APK has a timer, a map, a inventory, and a status bar. You can use these tools to plan your strategy and manage your resources. You can also collect items and clues that will help you solve the mystery of the school and the girls. Some items can also be used as weapons or gifts for the girls. You can also unlock achievements and gallery images as you progress through the game.
-tag after school android game thaiapk
-tag after school apk download thaiapk
-tag after school apk latest version thaiapk
-tag after school apk mod thaiapk
-tag after school apk offline thaiapk
-tag after school apk update thaiapk
-tag after school app thaiapk
-tag after school game thaiapk
-tag after school gameplay thaiapk
-tag after school guide thaiapk
-tag after school review thaiapk
-tag after school tips thaiapk
-tag after school walkthrough thaiapk
-thaiapk tag after school 18+
-thaiapk tag after school android
-thaiapk tag after school apk 2023
-thaiapk tag after school apk free
-thaiapk tag after school apk full
-thaiapk tag after school apk v5.0
-thaiapk tag after school cheats
-thaiapk tag after school codes
-thaiapk tag after school hack
-thaiapk tag after school how to play
-thaiapk tag after school install
-thaiapk tag after school link
-thaiapk tag after school online
-thaiapk tag after school video
-ดาวน์โหลด tag after school apk thaiapk
-วิธีเล่น tag after school apk thaiapk
-สอนเล่น tag after school apk thaiapk
-เกม tag after school apk thaiapk
-เกมส์ tag after school apk thaiapk
-เล่นเกมส์ tag after school apk thaiapk
-เวอร์ชั่นล่าสุดของเกมส์ tag after school apk thaiapk
-แจกเกมส์ tag after school apk thaiapk
-แนะนำเกมส์ tag after school apk thaiapk
-โปรเกมส์ tag after school apk thaiapk
-โหลดเกมส์ tag after school apk thaiapk
-ไทยแอปคอม เกมส์tagafter-school-apk-thai-apk
-ไทยแอปคอม เกมส์tag-after-school-apk-thai-apk
-ไทยแอปคอม เกมส์tagafterschool-apk-thai-apk
-ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk
-ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk
-ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk
-ไทยแอปคอม เกมส์tagafterschoolandroid-thai-apk
- The graphics and the sound of Tag After School APK
-The game has stunning graphics that create a realistic and immersive atmosphere. The game uses 3D models and animations for the characters and the environments. The game also has a dark and gloomy color scheme that enhances the horror vibe. The game also has a great sound design that adds to the tension and suspense. The game has voice acting for the main characters, as well as sound effects and background music that match the mood of each scene.
- How to download and install Tag After School APK on Android devices?
-If you want to play Tag After School APK on your Android device, you will need to download and install it from a third-party source, as it is not available on the Google Play Store. Here are the steps you need to follow:
- Requirements and compatibility of Tag After School APK
-Before you download and install Tag After School APK, you need to make sure that your device meets the following requirements:
-
-- Your device must have Android 5.0 or higher.
-- Your device must have at least 1 GB of RAM and 200 MB of free storage space.
-- Your device must have a stable internet connection.
-- Your device must allow installation of apps from unknown sources. You can enable this option by going to Settings > Security > Unknown Sources.
-
- Steps to download and install Tag After School APK
-Once you have checked the requirements and compatibility of Tag After School APK, you can follow these steps to download and install it:
-
-- Go to a reliable website that offers Tag After School APK for free download, such as ThaiAPK, APKCombo, or others.
-- Click on the download button and wait for the file to be downloaded on your device.
-- Locate the downloaded file in your file manager and tap on it to start the installation process.
-- Follow the instructions on the screen and wait for the installation to be completed.
-- Launch the game from your app drawer and enjoy playing Tag After School APK.
-
- Tips and tricks for playing Tag After School APK
-If you want to have a better gaming experience with Tag After School APK, you can use these tips and tricks:
-
-- Save your game frequently, as you may encounter different endings depending on your choices.
-- Explore every corner of the school, as you may find hidden items and secrets that will help you in your quest.
-- Pay attention to the timer, as you only have until dawn to survive and escape from the school.
-- Use your map wisely, as it will show you where you are and where the girls are.
-- Use your inventory smartly, as it will store your items and clues. You can also use some items as weapons or gifts for the girls.
-- Use your status bar carefully, as it will show you your health and stamina. You need to keep them high by resting, eating, or drinking.
-- Talk to the girls whenever you can, as it will affect your relationship with them. You can also give them gifts to increase their affection towards you.
-- Fight back when necessary, as some girls may attack you if they catch you. You can use items or skills to defend yourself or escape from them.
-- Be careful with your decisions, as they will have consequences on the story and the ending. You can also replay any chapter in free mode to see different outcomes.
-
- Why should you play Tag After School APK?
-If you are still wondering whether Tag After School APK is worth playing or not, here are some reasons why you should give it a try:
- The pros and cons of Tag After School APK
-Like any other game, Tag After School APK has its pros and cons. Here are some of them:
-
-
-Pros Cons
-
-It has a captivating and original story that will keep you hooked until the end. It has some mature and disturbing scenes that may not be suitable for everyone.
-
-It has beautiful and realistic graphics that create a immersive atmosphere. It has a large file size that may take up a lot of storage space on your device.
-
-It has a great sound design that adds to the tension and suspense. It has some bugs and glitches that may affect the gameplay and performance.
-
-It has multiple endings and outcomes that depend on your choices and actions. It has some repetitive and tedious tasks that may bore you after a while.
-
-It has a variety of items and clues that will help you in your quest. It has a limited inventory space that may force you to discard some items.
-
-It has a fun and challenging gameplay that will test your skills and strategy. It has a difficult and unforgiving gameplay that may frustrate you at times.
-
- The ratings and reviews of Tag After School APK
-Tag After School APK has received positive ratings and reviews from many players who have tried it. The game has an average rating of 4.5 out of 5 stars on ThaiAPK, based on more than 1000 votes. The game also has more than 500 comments from satisfied users who have praised the game for its story, graphics, sound, gameplay, and features. Here are some of the comments from the users:
-
-"This game is amazing! I love the story and the characters. It is so scary and exciting at the same time. I can't wait to see what happens next."
-"This game is awesome! I love the graphics and the sound. It is so realistic and immersive. I feel like I am really in the haunted school."
-"This game is fantastic! I love the gameplay and the features. It is so fun and challenging. I have to think carefully before making any decision."
-
- The alternatives and similar games to Tag After School APK
-If you like Tag After School APK, you may also like these games that are similar to it in terms of genre, theme, or style:
-
-- High School Simulator: This is a game developed by KUMA GAMES, a company that also creates anime-style games for Android devices. In this game, you will play as a high school student who can do whatever you want in a realistic school environment. You can interact with other students, teachers, or objects, as well as use weapons or vehicles. You can also customize your character and your school.
-- Horrorfield: This is a game developed by Skytec Games, Inc., a company that specializes in creating horror games for Android devices. In this game, you will play as either a survivor or a killer in a multiplayer mode. You will have to cooperate with other survivors or hunt them down as a killer in various maps. You can also upgrade your skills and equipment.
-- School Days: This is a game developed by MDickie, a company that produces simulation games for Android devices. In this game, you will play as a student who has to survive the drama and chaos of school life. You can interact with other students, teachers, or objects, as well as fight or romance them. You can also customize your character and your school.
-
- Conclusion
-Tag After School APK is a horror school life simulation game that will give you a thrilling and exciting gaming experience. You will play as Shota-Kun, a high school student who gets trapped in a haunted school with four ghostly girls who are after him. You will have to make choices that will affect the story and the ending of each chapter. You will also have to interact with the girls by talking to them, giving them gifts, or fighting them. You will also have to use your skills and strategy to survive until dawn by hiding from them or escaping from them. The game has stunning graphics, great sound, multiple endings, and various features that make it more enjoyable and challenging. You can download and install Tag After School APK from a third-party source, as it is not available on the Google Play Store. You can also use some tips and tricks to have a better gaming experience with Tag After School APK. If you like this game, you may also like some alternatives and similar games that are also available for Android devices.
-Tag After School APK is a game that will keep you on the edge of your seat and make you feel a range of emotions. It is a game that will make you laugh, cry, scream, and smile. It is a game that will make you think, feel, and act. It is a game that will make you love, hate, and fear. It is a game that will make you live a horror school life simulation.
- FAQs
-Here are some frequently asked questions about Tag After School APK:
-
-- Q: Is Tag After School APK safe to download and install?
-- A: Yes, Tag After School APK is safe to download and install, as long as you use a reliable website that offers the original and virus-free file. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain malware or spyware that can harm your device or compromise your privacy.
-- Q: Is Tag After School APK free to play?
-- A: Yes, Tag After School APK is free to play, as it does not require any payment or subscription to access the full content of the game. However, the game may contain some ads or in-app purchases that can enhance your gaming experience or support the developers.
-- Q: How long does it take to finish Tag After School APK?
-- A: The length of Tag After School APK depends on your choices and actions, as well as the mode and the difficulty level you choose. However, on average, it may take you about 5 to 10 hours to complete the game.
-- Q: How many endings does Tag After School APK have?
-- A: Tag After School APK has multiple endings that vary depending on your choices and actions throughout the game. There are four main endings for each girl, as well as a true ending that reveals the whole truth behind the game of tag.
-- Q: How can I get the true ending of Tag After School APK?
-- A: To get the true ending of Tag After School APK, you need to complete all the chapters with all the girls and unlock all the achievements and gallery images. You also need to make the right choices that will lead you to the true ending.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_singlestep.py b/spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_singlestep.py
deleted file mode 100644
index 6a8f8cae80259114aa4fd46117fa0c2e3bd3b617..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/schedulers/scheduling_dpmsolver_singlestep.py
+++ /dev/null
@@ -1,592 +0,0 @@
-# Copyright 2022 TSAIL Team and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
-
-import math
-from typing import List, Optional, Tuple, Union
-
-import numpy as np
-import paddle
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS
-from .scheduling_utils import SchedulerMixin, SchedulerOutput
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
- (1-beta) over time from t = [0,1].
-
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
- to that part of the diffusion process.
-
-
- Args:
- num_diffusion_timesteps (`int`): the number of betas to produce.
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
- prevent singularities.
-
- Returns:
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
- """
-
- def alpha_bar(time_step):
- return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
-
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return paddle.to_tensor(betas, dtype=paddle.float32)
-
-
-class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin):
- """
- DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with
- the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality
- samples, and it can generate quite good samples even in only 10 steps.
-
- For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
-
- Currently, we support the singlestep DPM-Solver for both noise prediction models and data prediction models. We
- recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
-
- We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
- diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
- thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as
- stable-diffusion).
-
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
- [`~SchedulerMixin.from_pretrained`] functions.
-
- Args:
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
- beta_start (`float`): the starting `beta` value of inference.
- beta_end (`float`): the final `beta` value.
- beta_schedule (`str`):
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`np.ndarray`, optional):
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
- solver_order (`int`, default `2`):
- the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided
- sampling, and `solver_order=3` for unconditional sampling.
- prediction_type (`str`, default `epsilon`):
- indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`,
- or `v-prediction`.
- thresholding (`bool`, default `False`):
- whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
- For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to
- use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion
- models (such as stable-diffusion).
- dynamic_thresholding_ratio (`float`, default `0.995`):
- the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
- (https://arxiv.org/abs/2205.11487).
- sample_max_value (`float`, default `1.0`):
- the threshold value for dynamic thresholding. Valid only when `thresholding=True` and
- `algorithm_type="dpmsolver++`.
- algorithm_type (`str`, default `dpmsolver++`):
- the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the
- algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in
- https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided
- sampling (e.g. stable-diffusion).
- solver_type (`str`, default `midpoint`):
- the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects
- the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are
- slightly better, so we recommend to use the `midpoint` type.
- lower_order_final (`bool`, default `True`):
- whether to use lower-order solvers in the final steps. For singlestep schedulers, we recommend to enable
- this to use up all the function evaluations.
-
- """
-
- _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
- order = 1
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.0001,
- beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[np.ndarray] = None,
- solver_order: int = 2,
- prediction_type: str = "epsilon",
- thresholding: bool = False,
- dynamic_thresholding_ratio: float = 0.995,
- sample_max_value: float = 1.0,
- algorithm_type: str = "dpmsolver++",
- solver_type: str = "midpoint",
- lower_order_final: bool = True,
- ):
- if trained_betas is not None:
- self.betas = paddle.to_tensor(trained_betas, dtype=paddle.float32)
- elif beta_schedule == "linear":
- self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype=paddle.float32)
- elif beta_schedule == "scaled_linear":
- # this schedule is very specific to the latent diffusion model.
- self.betas = (
- paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=paddle.float32) ** 2
- )
- elif beta_schedule == "squaredcos_cap_v2":
- # Glide cosine schedule
- self.betas = betas_for_alpha_bar(num_train_timesteps)
- else:
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
- self.alphas = 1.0 - self.betas
- self.alphas_cumprod = paddle.cumprod(self.alphas, 0)
- # Currently we only support VP-type noise schedule
- self.alpha_t = paddle.sqrt(self.alphas_cumprod)
- self.sigma_t = paddle.sqrt(1 - self.alphas_cumprod)
- self.lambda_t = paddle.log(self.alpha_t) - paddle.log(self.sigma_t)
-
- # standard deviation of the initial noise distribution
- self.init_noise_sigma = 1.0
-
- # settings for DPM-Solver
- if algorithm_type not in ["dpmsolver", "dpmsolver++"]:
- raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}")
- if solver_type not in ["midpoint", "heun"]:
- raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}")
-
- # setable values
- self.num_inference_steps = None
- timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
- self.timesteps = paddle.to_tensor(timesteps)
- self.model_outputs = [None] * solver_order
- self.sample = None
- self.order_list = self.get_order_list(num_train_timesteps)
-
- def get_order_list(self, num_inference_steps: int) -> List[int]:
- """
- Computes the solver order at each time step.
-
- Args:
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- """
- steps = num_inference_steps
- order = self.solver_order
- if self.lower_order_final:
- if order == 3:
- if steps % 3 == 0:
- orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1]
- elif steps % 3 == 1:
- orders = [1, 2, 3] * (steps // 3) + [1]
- else:
- orders = [1, 2, 3] * (steps // 3) + [1, 2]
- elif order == 2:
- if steps % 2 == 0:
- orders = [1, 2] * (steps // 2)
- else:
- orders = [1, 2] * (steps // 2) + [1]
- elif order == 1:
- orders = [1] * steps
- else:
- if order == 3:
- orders = [1, 2, 3] * (steps // 3)
- elif order == 2:
- orders = [1, 2] * (steps // 2)
- elif order == 1:
- orders = [1] * steps
- return orders
-
- def set_timesteps(self, num_inference_steps: int):
- """
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
-
- Args:
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- """
- self.num_inference_steps = num_inference_steps
- timesteps = (
- np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)
- .round()[::-1][:-1]
- .copy()
- .astype(np.int64)
- )
- self.timesteps = paddle.to_tensor(timesteps)
- self.model_outputs = [None] * self.config.solver_order
- self.sample = None
- self.orders = self.get_order_list(num_inference_steps)
-
- def convert_model_output(self, model_output: paddle.Tensor, timestep: int, sample: paddle.Tensor) -> paddle.Tensor:
- """
- Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs.
-
- DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to
- discretize an integral of the data prediction model. So we need to first convert the model output to the
- corresponding type to match the algorithm.
-
- Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or
- DPM-Solver++ for both noise prediction model and data prediction model.
-
- Args:
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`paddle.Tensor`):
- current instance of sample being created by diffusion process.
-
- Returns:
- `paddle.Tensor`: the converted model output.
- """
- # DPM-Solver++ needs to solve an integral of the data prediction model.
- if self.config.algorithm_type == "dpmsolver++":
- if self.config.prediction_type == "epsilon":
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
- x0_pred = (sample - sigma_t * model_output) / alpha_t
- elif self.config.prediction_type == "sample":
- x0_pred = model_output
- elif self.config.prediction_type == "v_prediction":
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
- x0_pred = alpha_t * sample - sigma_t * model_output
- else:
- raise ValueError(
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
- " `v_prediction` for the DPMSolverSinglestepScheduler."
- )
-
- if self.config.thresholding:
- # Dynamic thresholding in https://arxiv.org/abs/2205.11487
- dtype = x0_pred.dtype
- dynamic_max_val = paddle.quantile(
- paddle.abs(x0_pred).reshape((x0_pred.shape[0], -1)).cast("float32"),
- self.config.dynamic_thresholding_ratio,
- axis=1,
- )
- dynamic_max_val = paddle.maximum(
- dynamic_max_val,
- self.config.sample_max_value * paddle.ones_like(dynamic_max_val),
- )[(...,) + (None,) * (x0_pred.ndim - 1)]
- x0_pred = paddle.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val
- x0_pred = x0_pred.cast(dtype)
- return x0_pred
- # DPM-Solver needs to solve an integral of the noise prediction model.
- elif self.config.algorithm_type == "dpmsolver":
- if self.config.prediction_type == "epsilon":
- return model_output
- elif self.config.prediction_type == "sample":
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
- epsilon = (sample - alpha_t * model_output) / sigma_t
- return epsilon
- elif self.config.prediction_type == "v_prediction":
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
- epsilon = alpha_t * model_output + sigma_t * sample
- return epsilon
- else:
- raise ValueError(
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
- " `v_prediction` for the DPMSolverSinglestepScheduler."
- )
-
- def dpm_solver_first_order_update(
- self,
- model_output: paddle.Tensor,
- timestep: int,
- prev_timestep: int,
- sample: paddle.Tensor,
- ) -> paddle.Tensor:
- """
- One step for the first-order DPM-Solver (equivalent to DDIM).
-
- See https://arxiv.org/abs/2206.00927 for the detailed derivation.
-
- Args:
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
- sample (`paddle.Tensor`):
- current instance of sample being created by diffusion process.
-
- Returns:
- `paddle.Tensor`: the sample tensor at the previous timestep.
- """
- lambda_t, lambda_s = self.lambda_t[prev_timestep], self.lambda_t[timestep]
- alpha_t, alpha_s = self.alpha_t[prev_timestep], self.alpha_t[timestep]
- sigma_t, sigma_s = self.sigma_t[prev_timestep], self.sigma_t[timestep]
- h = lambda_t - lambda_s
- if self.config.algorithm_type == "dpmsolver++":
- x_t = (sigma_t / sigma_s) * sample - (alpha_t * (paddle.exp(-h) - 1.0)) * model_output
- elif self.config.algorithm_type == "dpmsolver":
- x_t = (alpha_t / alpha_s) * sample - (sigma_t * (paddle.exp(h) - 1.0)) * model_output
- return x_t
-
- def singlestep_dpm_solver_second_order_update(
- self,
- model_output_list: List[paddle.Tensor],
- timestep_list: List[int],
- prev_timestep: int,
- sample: paddle.Tensor,
- ) -> paddle.Tensor:
- """
- One step for the second-order singlestep DPM-Solver.
-
- It computes the solution at time `prev_timestep` from the time `timestep_list[-2]`.
-
- Args:
- model_output_list (`List[paddle.Tensor]`):
- direct outputs from learned diffusion model at current and latter timesteps.
- timestep (`int`): current and latter discrete timestep in the diffusion chain.
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
- sample (`paddle.Tensor`):
- current instance of sample being created by diffusion process.
-
- Returns:
- `paddle.Tensor`: the sample tensor at the previous timestep.
- """
- t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
- m0, m1 = model_output_list[-1], model_output_list[-2]
- lambda_t, lambda_s0, lambda_s1 = self.lambda_t[t], self.lambda_t[s0], self.lambda_t[s1]
- alpha_t, alpha_s1 = self.alpha_t[t], self.alpha_t[s1]
- sigma_t, sigma_s1 = self.sigma_t[t], self.sigma_t[s1]
- h, h_0 = lambda_t - lambda_s1, lambda_s0 - lambda_s1
- r0 = h_0 / h
- D0, D1 = m1, (1.0 / r0) * (m0 - m1)
- if self.config.algorithm_type == "dpmsolver++":
- # See https://arxiv.org/abs/2211.01095 for detailed derivations
- if self.config.solver_type == "midpoint":
- x_t = (
- (sigma_t / sigma_s1) * sample
- - (alpha_t * (paddle.exp(-h) - 1.0)) * D0
- - 0.5 * (alpha_t * (paddle.exp(-h) - 1.0)) * D1
- )
- elif self.config.solver_type == "heun":
- x_t = (
- (sigma_t / sigma_s1) * sample
- - (alpha_t * (paddle.exp(-h) - 1.0)) * D0
- + (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1
- )
- elif self.config.algorithm_type == "dpmsolver":
- # See https://arxiv.org/abs/2206.00927 for detailed derivations
- if self.config.solver_type == "midpoint":
- x_t = (
- (alpha_t / alpha_s1) * sample
- - (sigma_t * (paddle.exp(h) - 1.0)) * D0
- - 0.5 * (sigma_t * (paddle.exp(h) - 1.0)) * D1
- )
- elif self.config.solver_type == "heun":
- x_t = (
- (alpha_t / alpha_s1) * sample
- - (sigma_t * (paddle.exp(h) - 1.0)) * D0
- - (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1
- )
- return x_t
-
- def singlestep_dpm_solver_third_order_update(
- self,
- model_output_list: List[paddle.Tensor],
- timestep_list: List[int],
- prev_timestep: int,
- sample: paddle.Tensor,
- ) -> paddle.Tensor:
- """
- One step for the third-order singlestep DPM-Solver.
-
- It computes the solution at time `prev_timestep` from the time `timestep_list[-3]`.
-
- Args:
- model_output_list (`List[paddle.Tensor]`):
- direct outputs from learned diffusion model at current and latter timesteps.
- timestep (`int`): current and latter discrete timestep in the diffusion chain.
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
- sample (`paddle.Tensor`):
- current instance of sample being created by diffusion process.
-
- Returns:
- `paddle.Tensor`: the sample tensor at the previous timestep.
- """
- t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
- m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
- lambda_t, lambda_s0, lambda_s1, lambda_s2 = (
- self.lambda_t[t],
- self.lambda_t[s0],
- self.lambda_t[s1],
- self.lambda_t[s2],
- )
- alpha_t, alpha_s2 = self.alpha_t[t], self.alpha_t[s2]
- sigma_t, sigma_s2 = self.sigma_t[t], self.sigma_t[s2]
- h, h_0, h_1 = lambda_t - lambda_s2, lambda_s0 - lambda_s2, lambda_s1 - lambda_s2
- r0, r1 = h_0 / h, h_1 / h
- D0 = m2
- D1_0, D1_1 = (1.0 / r1) * (m1 - m2), (1.0 / r0) * (m0 - m2)
- D1 = (r0 * D1_0 - r1 * D1_1) / (r0 - r1)
- D2 = 2.0 * (D1_1 - D1_0) / (r0 - r1)
- if self.config.algorithm_type == "dpmsolver++":
- # See https://arxiv.org/abs/2206.00927 for detailed derivations
- if self.config.solver_type == "midpoint":
- x_t = (
- (sigma_t / sigma_s2) * sample
- - (alpha_t * (paddle.exp(-h) - 1.0)) * D0
- + (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1_1
- )
- elif self.config.solver_type == "heun":
- x_t = (
- (sigma_t / sigma_s2) * sample
- - (alpha_t * (paddle.exp(-h) - 1.0)) * D0
- + (alpha_t * ((paddle.exp(-h) - 1.0) / h + 1.0)) * D1
- - (alpha_t * ((paddle.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
- )
- elif self.config.algorithm_type == "dpmsolver":
- # See https://arxiv.org/abs/2206.00927 for detailed derivations
- if self.config.solver_type == "midpoint":
- x_t = (
- (alpha_t / alpha_s2) * sample
- - (sigma_t * (paddle.exp(h) - 1.0)) * D0
- - (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1_1
- )
- elif self.config.solver_type == "heun":
- x_t = (
- (alpha_t / alpha_s2) * sample
- - (sigma_t * (paddle.exp(h) - 1.0)) * D0
- - (sigma_t * ((paddle.exp(h) - 1.0) / h - 1.0)) * D1
- - (sigma_t * ((paddle.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
- )
- return x_t
-
- def singlestep_dpm_solver_update(
- self,
- model_output_list: List[paddle.Tensor],
- timestep_list: List[int],
- prev_timestep: int,
- sample: paddle.Tensor,
- order: int,
- ) -> paddle.Tensor:
- """
- One step for the singlestep DPM-Solver.
-
- Args:
- model_output_list (`List[paddle.Tensor]`):
- direct outputs from learned diffusion model at current and latter timesteps.
- timestep (`int`): current and latter discrete timestep in the diffusion chain.
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
- sample (`paddle.Tensor`):
- current instance of sample being created by diffusion process.
- order (`int`):
- the solver order at this step.
-
- Returns:
- `paddle.Tensor`: the sample tensor at the previous timestep.
- """
- if order == 1:
- return self.dpm_solver_first_order_update(model_output_list[-1], timestep_list[-1], prev_timestep, sample)
- elif order == 2:
- return self.singlestep_dpm_solver_second_order_update(
- model_output_list, timestep_list, prev_timestep, sample
- )
- elif order == 3:
- return self.singlestep_dpm_solver_third_order_update(
- model_output_list, timestep_list, prev_timestep, sample
- )
- else:
- raise ValueError(f"Order must be 1, 2, 3, got {order}")
-
- def step(
- self,
- model_output: paddle.Tensor,
- timestep: int,
- sample: paddle.Tensor,
- return_dict: bool = True,
- ) -> Union[SchedulerOutput, Tuple]:
- """
- Step function propagating the sample with the singlestep DPM-Solver.
-
- Args:
- model_output (`paddle.Tensor`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`paddle.Tensor`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
-
- Returns:
- [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
- True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
-
- """
- if self.num_inference_steps is None:
- raise ValueError(
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
- )
-
- step_index = (self.timesteps == timestep).nonzero()
- if len(step_index) == 0:
- step_index = len(self.timesteps) - 1
- else:
- step_index = step_index.item()
- prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
-
- model_output = self.convert_model_output(model_output, timestep, sample)
- for i in range(self.config.solver_order - 1):
- self.model_outputs[i] = self.model_outputs[i + 1]
- self.model_outputs[-1] = model_output
-
- order = self.order_list[step_index]
- # For single-step solvers, we use the initial value at each time with order = 1.
- if order == 1:
- self.sample = sample
-
- timestep_list = [self.timesteps[step_index - i] for i in range(order - 1, 0, -1)] + [timestep]
- prev_sample = self.singlestep_dpm_solver_update(
- self.model_outputs, timestep_list, prev_timestep, self.sample, order
- )
-
- if not return_dict:
- return (prev_sample,)
-
- return SchedulerOutput(prev_sample=prev_sample)
-
- def scale_model_input(self, sample: paddle.Tensor, *args, **kwargs) -> paddle.Tensor:
- """
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
- current timestep.
-
- Args:
- sample (`paddle.Tensor`): input sample
-
- Returns:
- `paddle.Tensor`: scaled input sample
- """
- return sample
-
- def add_noise(
- self,
- original_samples: paddle.Tensor,
- noise: paddle.Tensor,
- timesteps: paddle.Tensor,
- ) -> paddle.Tensor:
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
- self.alphas_cumprod = self.alphas_cumprod.cast(original_samples.dtype)
-
- sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
-
- sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
-
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
- return noisy_samples
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/44ov41za8i/FreeVC/speaker_encoder/config.py b/spaces/44ov41za8i/FreeVC/speaker_encoder/config.py
deleted file mode 100644
index 1c21312f3de971bfa008254c6035cebc09f05e4c..0000000000000000000000000000000000000000
--- a/spaces/44ov41za8i/FreeVC/speaker_encoder/config.py
+++ /dev/null
@@ -1,45 +0,0 @@
-librispeech_datasets = {
- "train": {
- "clean": ["LibriSpeech/train-clean-100", "LibriSpeech/train-clean-360"],
- "other": ["LibriSpeech/train-other-500"]
- },
- "test": {
- "clean": ["LibriSpeech/test-clean"],
- "other": ["LibriSpeech/test-other"]
- },
- "dev": {
- "clean": ["LibriSpeech/dev-clean"],
- "other": ["LibriSpeech/dev-other"]
- },
-}
-libritts_datasets = {
- "train": {
- "clean": ["LibriTTS/train-clean-100", "LibriTTS/train-clean-360"],
- "other": ["LibriTTS/train-other-500"]
- },
- "test": {
- "clean": ["LibriTTS/test-clean"],
- "other": ["LibriTTS/test-other"]
- },
- "dev": {
- "clean": ["LibriTTS/dev-clean"],
- "other": ["LibriTTS/dev-other"]
- },
-}
-voxceleb_datasets = {
- "voxceleb1" : {
- "train": ["VoxCeleb1/wav"],
- "test": ["VoxCeleb1/test_wav"]
- },
- "voxceleb2" : {
- "train": ["VoxCeleb2/dev/aac"],
- "test": ["VoxCeleb2/test_wav"]
- }
-}
-
-other_datasets = [
- "LJSpeech-1.1",
- "VCTK-Corpus/wav48",
-]
-
-anglophone_nationalites = ["australia", "canada", "ireland", "uk", "usa"]
diff --git a/spaces/AB-TW/team-ai/agents/tools/smart_domain/persistent_layer_code_tool.py b/spaces/AB-TW/team-ai/agents/tools/smart_domain/persistent_layer_code_tool.py
deleted file mode 100644
index 1aeb3688503afdedddd6bd3f31b41afb7e5a8bba..0000000000000000000000000000000000000000
--- a/spaces/AB-TW/team-ai/agents/tools/smart_domain/persistent_layer_code_tool.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from langchain import LLMChain, PromptTemplate
-from langchain.agents import tool
-
-from models import llm
-from agents.tools.smart_domain.common import getPrefix
-from agents.tools.smart_domain.db_entity_repository import db_entity_architecture, db_entity_test_strategy
-from agents.tools.smart_domain.association_impl import association_impl_architecture, association_impl_test_strategy
-
-
-persistent_task = """"Your task is to generate the persistent layer tests and product code."""
-persistent_tech_stack = """Java17、reactor、lombok、Junit5、reactor test、Mockito、 Spring Data Reactive Couchbase、Testcontainers、Couchbase、WebClient"""
-persistent_architecture = f"""the persistent layer inclue 3 componets:
-{db_entity_architecture}
-{association_impl_architecture}"""
-
-persistent_test_strategy = f"""{db_entity_test_strategy}
-{association_impl_test_strategy}"""
-
-PERSISTENT_LAYER = getPrefix(persistent_task, persistent_tech_stack, persistent_architecture, persistent_test_strategy) + """
-
-Use the following format:
-request: the request that you need to fulfill include Entity and Association of domain layer
-
-DBEntity:
-```
-the DBEntity code that you write to fulfill the request, follow TechStack and Architecture
-```
-
-Repository:
-```
-the Repository code that you write to fulfill the request, follow TechStack and Architecture
-```
-
-Association Impletation:
-```
-the Association Impletation code that you write to fulfill the request, follow TechStack and Architecture
-```
-
-Test:
-```
-the test code that you write to fulfill the request, follow TechStack Architecture and TestStrategy
-```
-
-request: {input}"""
-
-PERSISTENT_LAYER_PROMPT = PromptTemplate(input_variables=["input"], template=PERSISTENT_LAYER,)
-
-persistentChain = LLMChain(llm = llm(temperature=0.1), prompt=PERSISTENT_LAYER_PROMPT)
-
-
-@tool("Generate Persistent Layer Code", return_direct=True)
-def persistentLayerCodeGenerator(input: str) -> str:
- '''useful for when you need to generate persistent layer code'''
- response = persistentChain.run(input)
- return response
\ No newline at end of file
diff --git a/spaces/AE-NV/sentiment-productreview/app.py b/spaces/AE-NV/sentiment-productreview/app.py
deleted file mode 100644
index a7773296bf63bb6d5497645ffaf5bd63a7d1f074..0000000000000000000000000000000000000000
--- a/spaces/AE-NV/sentiment-productreview/app.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import gradio as gr
-alias = "Sentiment Analysis on product reviews"
-description = "Add a product review you can find on the internet. The model is trained on multiple languages so you can also test for that!"
-name = "models/nlptown/bert-base-multilingual-uncased-sentiment"
-examples = [
- ['''We vinden het aanbod heel lekker maar ...
- We vinden het aanbod heel lekker.
- Wat we wel heel erg spijtig vinden dat is dat er bij zoveel gerechten nog eens een supplement wordt gevraagd.
- Jullie prijzen stijgen al regelmatig!
- Jullie geven ook wel cadeaus maar nooit voor de gebruikers. Geef ons ook eens af en toe een bonus i.p.v. te proberen méér klanten te krijgen!
-' '''],
- ['''Slechte kwaliteit
- De maaltijden zijn veel te Nederlands getint, groenten zijn niet vers als ze geleverd worden, vlees is van slechte en goedkope kwaliteit, broodjes die bijgeleverd worden zijn niet lekker.. structuur van een spons..
- Ik hoop dat ik zonder probleem het contract kan stopzetten…'''
- ],
- ]
-gr.Interface.load(name=name,
- alias=alias,
- description=description,
- examples=examples).launch()
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/model_cards/MUSICGEN_MODEL_CARD.md b/spaces/AIConsultant/MusicGen/model_cards/MUSICGEN_MODEL_CARD.md
deleted file mode 100644
index 10ba9f9790841be06cd3e459cf667c1af6291343..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/model_cards/MUSICGEN_MODEL_CARD.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# MusicGen Model Card
-
-## Model details
-
-**Organization developing the model:** The FAIR team of Meta AI.
-
-**Model date:** MusicGen was trained between April 2023 and May 2023.
-
-**Model version:** This is the version 1 of the model.
-
-**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation.
-
-**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv].
-
-**Citation details:** See [our paper][arxiv]
-
-**License:** Code is released under MIT, model weights are released under CC-BY-NC 4.0.
-
-**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [GitHub repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue.
-
-## Intended use
-**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including:
-
-- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science
-- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs
-
-**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models.
-
-**Out-of-scope use cases:** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
-
-## Metrics
-
-**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark:
-
-- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish)
-- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST)
-- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model
-
-Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes:
-
-- Overall quality of the music samples;
-- Text relevance to the provided text input;
-- Adherence to the melody for melody-guided music generation.
-
-More details on performance measures and human studies can be found in the paper.
-
-**Decision thresholds:** Not applicable.
-
-## Evaluation datasets
-
-The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set.
-
-## Training datasets
-
-The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing.
-
-## Evaluation results
-
-Below are the objective metrics obtained on MusicCaps with the released model. Note that for the publicly released models, we had all the datasets go through a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs), in order to keep only the instrumental part. This explains the difference in objective metrics with the models used in the paper.
-
-| Model | Frechet Audio Distance | KLD | Text Consistency | Chroma Cosine Similarity |
-|---|---|---|---|---|
-| facebook/musicgen-small | 4.88 | 1.28 | 0.27 | - |
-| facebook/musicgen-medium | 5.14 | 1.24 | 0.28 | - |
-| facebook/musicgen-large | 5.48 | 1.22 | 0.28 | - |
-| facebook/musicgen-melody | 4.93 | 1.26 | 0.27 | 0.44 |
-
-More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Results section.
-
-## Limitations and biases
-
-**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model.
-
-**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs).
-
-**Limitations:**
-
-- The model is not able to generate realistic vocals.
-- The model has been trained with English descriptions and will not perform as well in other languages.
-- The model does not perform equally well for all music styles and cultures.
-- The model sometimes generates end of songs, collapsing to silence.
-- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results.
-
-**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive.
-
-**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data.
-
-**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks.
-
-[arxiv]: https://arxiv.org/abs/2306.05284
diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer_emotion.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer_emotion.py
deleted file mode 100644
index 2193a39417781a4673c11210cecb146401be2d09..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/base_binarizer_emotion.py
+++ /dev/null
@@ -1,352 +0,0 @@
-import os
-
-os.environ["OMP_NUM_THREADS"] = "1"
-import torch
-from collections import Counter
-from utils.text_encoder import TokenTextEncoder
-from data_gen.tts.emotion import inference as EmotionEncoder
-from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance
-from data_gen.tts.emotion.inference import preprocess_wav
-from utils.multiprocess_utils import chunked_multiprocess_run
-import random
-import traceback
-import json
-from resemblyzer import VoiceEncoder
-from tqdm import tqdm
-from data_gen.tts.data_gen_utils import get_mel2ph, get_pitch, build_phone_encoder, is_sil_phoneme
-from utils.hparams import hparams, set_hparams
-import numpy as np
-from utils.indexed_datasets import IndexedDatasetBuilder
-from vocoders.base_vocoder import get_vocoder_cls
-import pandas as pd
-
-
-class BinarizationError(Exception):
- pass
-
-
-class EmotionBinarizer:
- def __init__(self, processed_data_dir=None):
- if processed_data_dir is None:
- processed_data_dir = hparams['processed_data_dir']
- self.processed_data_dirs = processed_data_dir.split(",")
- self.binarization_args = hparams['binarization_args']
- self.pre_align_args = hparams['pre_align_args']
- self.item2txt = {}
- self.item2ph = {}
- self.item2wavfn = {}
- self.item2tgfn = {}
- self.item2spk = {}
- self.item2emo = {}
-
- def load_meta_data(self):
- for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
- self.meta_df = pd.read_csv(f"{processed_data_dir}/metadata_phone.csv", dtype=str)
- for r_idx, r in tqdm(self.meta_df.iterrows(), desc='Loading meta data.'):
- item_name = raw_item_name = r['item_name']
- if len(self.processed_data_dirs) > 1:
- item_name = f'ds{ds_id}_{item_name}'
- self.item2txt[item_name] = r['txt']
- self.item2ph[item_name] = r['ph']
- self.item2wavfn[item_name] = r['wav_fn']
- self.item2spk[item_name] = r.get('spk_name', 'SPK1') \
- if self.binarization_args['with_spk_id'] else 'SPK1'
- if len(self.processed_data_dirs) > 1:
- self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
- self.item2tgfn[item_name] = f"{processed_data_dir}/mfa_outputs/{raw_item_name}.TextGrid"
- self.item2emo[item_name] = r.get('others', '"Neutral"')
- self.item_names = sorted(list(self.item2txt.keys()))
- if self.binarization_args['shuffle']:
- random.seed(1234)
- random.shuffle(self.item_names)
-
- @property
- def train_item_names(self):
- return self.item_names[hparams['test_num']:]
-
- @property
- def valid_item_names(self):
- return self.item_names[:hparams['test_num']]
-
- @property
- def test_item_names(self):
- return self.valid_item_names
-
- def build_spk_map(self):
- spk_map = set()
- for item_name in self.item_names:
- spk_name = self.item2spk[item_name]
- spk_map.add(spk_name)
- spk_map = {x: i for i, x in enumerate(sorted(list(spk_map)))}
- print("| #Spk: ", len(spk_map))
- assert len(spk_map) == 0 or len(spk_map) <= hparams['num_spk'], len(spk_map)
- return spk_map
-
- def build_emo_map(self):
- emo_map = set()
- for item_name in self.item_names:
- emo_name = self.item2emo[item_name]
- emo_map.add(emo_name)
- emo_map = {x: i for i, x in enumerate(sorted(list(emo_map)))}
- print("| #Emo: ", len(emo_map))
- return emo_map
-
- def item_name2spk_id(self, item_name):
- return self.spk_map[self.item2spk[item_name]]
-
- def item_name2emo_id(self, item_name):
- return self.emo_map[self.item2emo[item_name]]
-
- def _phone_encoder(self):
- ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
- ph_set = []
- if self.binarization_args['reset_phone_dict'] or not os.path.exists(ph_set_fn):
- for ph_sent in self.item2ph.values():
- ph_set += ph_sent.split(' ')
- ph_set = sorted(set(ph_set))
- json.dump(ph_set, open(ph_set_fn, 'w'))
- print("| Build phone set: ", ph_set)
- else:
- ph_set = json.load(open(ph_set_fn, 'r'))
- print("| Load phone set: ", ph_set)
- return build_phone_encoder(hparams['binary_data_dir'])
-
- def _word_encoder(self):
- fn = f"{hparams['binary_data_dir']}/word_set.json"
- word_set = []
- if self.binarization_args['reset_word_dict']:
- for word_sent in self.item2txt.values():
- word_set += [x for x in word_sent.split(' ') if x != '']
- word_set = Counter(word_set)
- total_words = sum(word_set.values())
- word_set = word_set.most_common(hparams['word_size'])
- num_unk_words = total_words - sum([x[1] for x in word_set])
- word_set = [x[0] for x in word_set]
- json.dump(word_set, open(fn, 'w'))
- print(f"| Build word set. Size: {len(word_set)}, #total words: {total_words},"
- f" #unk_words: {num_unk_words}, word_set[:10]:, {word_set[:10]}.")
- else:
- word_set = json.load(open(fn, 'r'))
- print("| Load word set. Size: ", len(word_set), word_set[:10])
- return TokenTextEncoder(None, vocab_list=word_set, replace_oov='')
-
- def meta_data(self, prefix):
- if prefix == 'valid':
- item_names = self.valid_item_names
- elif prefix == 'test':
- item_names = self.test_item_names
- else:
- item_names = self.train_item_names
- for item_name in item_names:
- ph = self.item2ph[item_name]
- txt = self.item2txt[item_name]
- tg_fn = self.item2tgfn.get(item_name)
- wav_fn = self.item2wavfn[item_name]
- spk_id = self.item_name2spk_id(item_name)
- emotion = self.item_name2emo_id(item_name)
- yield item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion
-
- def process(self):
- self.load_meta_data()
- os.makedirs(hparams['binary_data_dir'], exist_ok=True)
- self.spk_map = self.build_spk_map()
- print("| spk_map: ", self.spk_map)
- spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
- json.dump(self.spk_map, open(spk_map_fn, 'w'))
-
- self.emo_map = self.build_emo_map()
- print("| emo_map: ", self.emo_map)
- emo_map_fn = f"{hparams['binary_data_dir']}/emo_map.json"
- json.dump(self.emo_map, open(emo_map_fn, 'w'))
-
- self.phone_encoder = self._phone_encoder()
- self.word_encoder = None
- EmotionEncoder.load_model(hparams['emotion_encoder_path'])
-
- if self.binarization_args['with_word']:
- self.word_encoder = self._word_encoder()
- self.process_data('valid')
- self.process_data('test')
- self.process_data('train')
-
- def process_data(self, prefix):
- data_dir = hparams['binary_data_dir']
- args = []
- builder = IndexedDatasetBuilder(f'{data_dir}/{prefix}')
- ph_lengths = []
- mel_lengths = []
- f0s = []
- total_sec = 0
- if self.binarization_args['with_spk_embed']:
- voice_encoder = VoiceEncoder().cuda()
-
- meta_data = list(self.meta_data(prefix))
- for m in meta_data:
- args.append(list(m) + [(self.phone_encoder, self.word_encoder), self.binarization_args])
- num_workers = self.num_workers
- for f_id, (_, item) in enumerate(
- zip(tqdm(meta_data), chunked_multiprocess_run(self.process_item, args, num_workers=num_workers))):
- if item is None:
- continue
- item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \
- if self.binarization_args['with_spk_embed'] else None
- processed_wav = preprocess_wav(item['wav_fn'])
- item['emo_embed'] = Embed_utterance(processed_wav)
- if not self.binarization_args['with_wav'] and 'wav' in item:
- del item['wav']
- builder.add_item(item)
- mel_lengths.append(item['len'])
- if 'ph_len' in item:
- ph_lengths.append(item['ph_len'])
- total_sec += item['sec']
- if item.get('f0') is not None:
- f0s.append(item['f0'])
- builder.finalize()
- np.save(f'{data_dir}/{prefix}_lengths.npy', mel_lengths)
- if len(ph_lengths) > 0:
- np.save(f'{data_dir}/{prefix}_ph_lengths.npy', ph_lengths)
- if len(f0s) > 0:
- f0s = np.concatenate(f0s, 0)
- f0s = f0s[f0s != 0]
- np.save(f'{data_dir}/{prefix}_f0s_mean_std.npy', [np.mean(f0s).item(), np.std(f0s).item()])
- print(f"| {prefix} total duration: {total_sec:.3f}s")
-
- @classmethod
- def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, emotion, encoder, binarization_args):
- res = {'item_name': item_name, 'txt': txt, 'ph': ph, 'wav_fn': wav_fn, 'spk_id': spk_id, 'emotion': emotion}
- if binarization_args['with_linear']:
- wav, mel, linear_stft = get_vocoder_cls(hparams).wav2spec(wav_fn) # , return_linear=True
- res['linear'] = linear_stft
- else:
- wav, mel = get_vocoder_cls(hparams).wav2spec(wav_fn)
- wav = wav.astype(np.float16)
- res.update({'mel': mel, 'wav': wav,
- 'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0]})
- try:
- if binarization_args['with_f0']:
- cls.get_pitch(res)
- if binarization_args['with_f0cwt']:
- cls.get_f0cwt(res)
- if binarization_args['with_txt']:
- ph_encoder, word_encoder = encoder
- try:
- res['phone'] = ph_encoder.encode(ph)
- res['ph_len'] = len(res['phone'])
- except:
- traceback.print_exc()
- raise BinarizationError(f"Empty phoneme")
- if binarization_args['with_align']:
- cls.get_align(tg_fn, res)
- if binarization_args['trim_eos_bos']:
- bos_dur = res['dur'][0]
- eos_dur = res['dur'][-1]
- res['mel'] = mel[bos_dur:-eos_dur]
- res['f0'] = res['f0'][bos_dur:-eos_dur]
- res['pitch'] = res['pitch'][bos_dur:-eos_dur]
- res['mel2ph'] = res['mel2ph'][bos_dur:-eos_dur]
- res['wav'] = wav[bos_dur * hparams['hop_size']:-eos_dur * hparams['hop_size']]
- res['dur'] = res['dur'][1:-1]
- res['len'] = res['mel'].shape[0]
- if binarization_args['with_word']:
- cls.get_word(res, word_encoder)
- except BinarizationError as e:
- print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
- return None
- except Exception as e:
- traceback.print_exc()
- print(f"| Skip item. item_name: {item_name}, wav_fn: {wav_fn}")
- return None
- return res
-
- @staticmethod
- def get_align(tg_fn, res):
- ph = res['ph']
- mel = res['mel']
- phone_encoded = res['phone']
- if tg_fn is not None and os.path.exists(tg_fn):
- mel2ph, dur = get_mel2ph(tg_fn, ph, mel, hparams)
- else:
- raise BinarizationError(f"Align not found")
- if mel2ph.max() - 1 >= len(phone_encoded):
- raise BinarizationError(
- f"Align does not match: mel2ph.max() - 1: {mel2ph.max() - 1}, len(phone_encoded): {len(phone_encoded)}")
- res['mel2ph'] = mel2ph
- res['dur'] = dur
-
- @staticmethod
- def get_pitch(res):
- wav, mel = res['wav'], res['mel']
- f0, pitch_coarse = get_pitch(wav, mel, hparams)
- if sum(f0) == 0:
- raise BinarizationError("Empty f0")
- res['f0'] = f0
- res['pitch'] = pitch_coarse
-
- @staticmethod
- def get_f0cwt(res):
- from utils.cwt import get_cont_lf0, get_lf0_cwt
- f0 = res['f0']
- uv, cont_lf0_lpf = get_cont_lf0(f0)
- logf0s_mean_org, logf0s_std_org = np.mean(cont_lf0_lpf), np.std(cont_lf0_lpf)
- cont_lf0_lpf_norm = (cont_lf0_lpf - logf0s_mean_org) / logf0s_std_org
- Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm)
- if np.any(np.isnan(Wavelet_lf0)):
- raise BinarizationError("NaN CWT")
- res['cwt_spec'] = Wavelet_lf0
- res['cwt_scales'] = scales
- res['f0_mean'] = logf0s_mean_org
- res['f0_std'] = logf0s_std_org
-
- @staticmethod
- def get_word(res, word_encoder):
- ph_split = res['ph'].split(" ")
- # ph side mapping to word
- ph_words = [] # ['', 'N_AW1_', ',', 'AE1_Z_|', 'AO1_L_|', 'B_UH1_K_S_|', 'N_AA1_T_|', ....]
- ph2word = np.zeros([len(ph_split)], dtype=int)
- last_ph_idx_for_word = [] # [2, 11, ...]
- for i, ph in enumerate(ph_split):
- if ph == '|':
- last_ph_idx_for_word.append(i)
- elif not ph[0].isalnum():
- if ph not in ['']:
- last_ph_idx_for_word.append(i - 1)
- last_ph_idx_for_word.append(i)
- start_ph_idx_for_word = [0] + [i + 1 for i in last_ph_idx_for_word[:-1]]
- for i, (s_w, e_w) in enumerate(zip(start_ph_idx_for_word, last_ph_idx_for_word)):
- ph_words.append(ph_split[s_w:e_w + 1])
- ph2word[s_w:e_w + 1] = i
- ph2word = ph2word.tolist()
- ph_words = ["_".join(w) for w in ph_words]
-
- # mel side mapping to word
- mel2word = []
- dur_word = [0 for _ in range(len(ph_words))]
- for i, m2p in enumerate(res['mel2ph']):
- word_idx = ph2word[m2p - 1]
- mel2word.append(ph2word[m2p - 1])
- dur_word[word_idx] += 1
- ph2word = [x + 1 for x in ph2word] # 0预留给padding
- mel2word = [x + 1 for x in mel2word] # 0预留给padding
- res['ph_words'] = ph_words # [T_word]
- res['ph2word'] = ph2word # [T_ph]
- res['mel2word'] = mel2word # [T_mel]
- res['dur_word'] = dur_word # [T_word]
- words = [x for x in res['txt'].split(" ") if x != '']
- while len(words) > 0 and is_sil_phoneme(words[0]):
- words = words[1:]
- while len(words) > 0 and is_sil_phoneme(words[-1]):
- words = words[:-1]
- words = [''] + words + ['']
- word_tokens = word_encoder.encode(" ".join(words))
- res['words'] = words
- res['word_tokens'] = word_tokens
- assert len(words) == len(ph_words), [words, ph_words]
-
- @property
- def num_workers(self):
- return int(os.getenv('N_PROC', hparams.get('N_PROC', os.cpu_count())))
-
-
-if __name__ == "__main__":
- set_hparams()
- EmotionBinarizer().process()
diff --git a/spaces/AP123/Upside-Down-Diffusion/README.md b/spaces/AP123/Upside-Down-Diffusion/README.md
deleted file mode 100644
index 638c7295a8df879bdd9a23d432c6fa831b80e97a..0000000000000000000000000000000000000000
--- a/spaces/AP123/Upside-Down-Diffusion/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Upside-Down-Diffusion
-emoji: 🙃
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.44.4
-app_file: app.py
-pinned: false
-license: openrail
-hf_oauth: true
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18.py
deleted file mode 100644
index 7c66758ee4aadced38c815e98af68b74aa310a2e..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# model settings
-model = dict(
- type='ImageClassifier',
- backbone=dict(
- type='ResNet',
- depth=18,
- num_stages=4,
- out_indices=(3, ),
- style='pytorch'),
- neck=dict(type='GlobalAveragePooling'),
- head=dict(
- type='LinearClsHead',
- num_classes=1000,
- in_channels=512,
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
- topk=(1, 5),
- ))
diff --git a/spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/__init__.py b/spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/__init__.py
deleted file mode 100644
index 8ac3ac44ca8de912f2c8c46277a50e2406674d7c..0000000000000000000000000000000000000000
--- a/spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/__init__.py
+++ /dev/null
@@ -1,615 +0,0 @@
-import glob
-import json
-import logging
-import os
-import sys
-from pathlib import Path
-
-logger = logging.getLogger(__name__)
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[3] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-
-try:
- import comet_ml
-
- # Project Configuration
- config = comet_ml.config.get_config()
- COMET_PROJECT_NAME = config.get_string(
- os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5"
- )
-except (ModuleNotFoundError, ImportError):
- comet_ml = None
- COMET_PROJECT_NAME = None
-
-import PIL
-import torch
-import torchvision.transforms as T
-import yaml
-
-from utils.dataloaders import img2label_paths
-from utils.general import check_dataset, scale_boxes, xywh2xyxy
-from utils.metrics import box_iou
-
-COMET_PREFIX = "comet://"
-
-COMET_MODE = os.getenv("COMET_MODE", "online")
-
-# Model Saving Settings
-COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5")
-
-# Dataset Artifact Settings
-COMET_UPLOAD_DATASET = (
- os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true"
-)
-
-# Evaluation Settings
-COMET_LOG_CONFUSION_MATRIX = (
- os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true"
-)
-COMET_LOG_PREDICTIONS = (
- os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true"
-)
-COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100))
-
-# Confusion Matrix Settings
-CONF_THRES = float(os.getenv("CONF_THRES", 0.001))
-IOU_THRES = float(os.getenv("IOU_THRES", 0.6))
-
-# Batch Logging Settings
-COMET_LOG_BATCH_METRICS = (
- os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true"
-)
-COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1)
-COMET_PREDICTION_LOGGING_INTERVAL = os.getenv(
- "COMET_PREDICTION_LOGGING_INTERVAL", 1
-)
-COMET_LOG_PER_CLASS_METRICS = (
- os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true"
-)
-
-RANK = int(os.getenv("RANK", -1))
-
-to_pil = T.ToPILImage()
-
-
-class CometLogger:
- """Log metrics, parameters, source code, models and much more
- with Comet
- """
-
- def __init__(
- self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs
- ) -> None:
- self.job_type = job_type
- self.opt = opt
- self.hyp = hyp
-
- # Comet Flags
- self.comet_mode = COMET_MODE
-
- self.save_model = opt.save_period > -1
- self.model_name = COMET_MODEL_NAME
-
- # Batch Logging Settings
- self.log_batch_metrics = COMET_LOG_BATCH_METRICS
- self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL
-
- # Dataset Artifact Settings
- self.upload_dataset = (
- self.opt.upload_dataset
- if self.opt.upload_dataset
- else COMET_UPLOAD_DATASET
- )
- self.resume = self.opt.resume
-
- # Default parameters to pass to Experiment objects
- self.default_experiment_kwargs = {
- "log_code": False,
- "log_env_gpu": True,
- "log_env_cpu": True,
- "project_name": COMET_PROJECT_NAME,
- }
- self.default_experiment_kwargs.update(experiment_kwargs)
- self.experiment = self._get_experiment(self.comet_mode, run_id)
-
- self.data_dict = self.check_dataset(self.opt.data)
- self.class_names = self.data_dict["names"]
- self.num_classes = self.data_dict["nc"]
-
- self.logged_images_count = 0
- self.max_images = COMET_MAX_IMAGE_UPLOADS
-
- if run_id is None:
- self.experiment.log_other("Created from", "YOLOv5")
- if not isinstance(self.experiment, comet_ml.OfflineExperiment):
- (
- workspace,
- project_name,
- experiment_id,
- ) = self.experiment.url.split("/")[-3:]
- self.experiment.log_other(
- "Run Path",
- f"{workspace}/{project_name}/{experiment_id}",
- )
- self.log_parameters(vars(opt))
- self.log_parameters(self.opt.hyp)
- self.log_asset_data(
- self.opt.hyp,
- name="hyperparameters.json",
- metadata={"type": "hyp-config-file"},
- )
- self.log_asset(
- f"{self.opt.save_dir}/opt.yaml",
- metadata={"type": "opt-config-file"},
- )
-
- self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX
-
- if hasattr(self.opt, "conf_thres"):
- self.conf_thres = self.opt.conf_thres
- else:
- self.conf_thres = CONF_THRES
- if hasattr(self.opt, "iou_thres"):
- self.iou_thres = self.opt.iou_thres
- else:
- self.iou_thres = IOU_THRES
-
- self.log_parameters(
- {
- "val_iou_threshold": self.iou_thres,
- "val_conf_threshold": self.conf_thres,
- }
- )
-
- self.comet_log_predictions = COMET_LOG_PREDICTIONS
- if self.opt.bbox_interval == -1:
- self.comet_log_prediction_interval = (
- 1 if self.opt.epochs < 10 else self.opt.epochs // 10
- )
- else:
- self.comet_log_prediction_interval = self.opt.bbox_interval
-
- if self.comet_log_predictions:
- self.metadata_dict = {}
- self.logged_image_names = []
-
- self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS
-
- self.experiment.log_others(
- {
- "comet_mode": COMET_MODE,
- "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS,
- "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS,
- "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS,
- "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX,
- "comet_model_name": COMET_MODEL_NAME,
- }
- )
-
- # Check if running the Experiment with the Comet Optimizer
- if hasattr(self.opt, "comet_optimizer_id"):
- self.experiment.log_other(
- "optimizer_id", self.opt.comet_optimizer_id
- )
- self.experiment.log_other(
- "optimizer_objective", self.opt.comet_optimizer_objective
- )
- self.experiment.log_other(
- "optimizer_metric", self.opt.comet_optimizer_metric
- )
- self.experiment.log_other(
- "optimizer_parameters", json.dumps(self.hyp)
- )
-
- def _get_experiment(self, mode, experiment_id=None):
- if mode == "offline":
- if experiment_id is not None:
- return comet_ml.ExistingOfflineExperiment(
- previous_experiment=experiment_id,
- **self.default_experiment_kwargs,
- )
-
- return comet_ml.OfflineExperiment(
- **self.default_experiment_kwargs,
- )
-
- else:
- try:
- if experiment_id is not None:
- return comet_ml.ExistingExperiment(
- previous_experiment=experiment_id,
- **self.default_experiment_kwargs,
- )
-
- return comet_ml.Experiment(**self.default_experiment_kwargs)
-
- except ValueError:
- logger.warning(
- "COMET WARNING: "
- "Comet credentials have not been set. "
- "Comet will default to offline logging. "
- "Please set your credentials to enable online logging."
- )
- return self._get_experiment("offline", experiment_id)
-
- return
-
- def log_metrics(self, log_dict, **kwargs):
- self.experiment.log_metrics(log_dict, **kwargs)
-
- def log_parameters(self, log_dict, **kwargs):
- self.experiment.log_parameters(log_dict, **kwargs)
-
- def log_asset(self, asset_path, **kwargs):
- self.experiment.log_asset(asset_path, **kwargs)
-
- def log_asset_data(self, asset, **kwargs):
- self.experiment.log_asset_data(asset, **kwargs)
-
- def log_image(self, img, **kwargs):
- self.experiment.log_image(img, **kwargs)
-
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
- if not self.save_model:
- return
-
- model_metadata = {
- "fitness_score": fitness_score[-1],
- "epochs_trained": epoch + 1,
- "save_period": opt.save_period,
- "total_epochs": opt.epochs,
- }
-
- model_files = glob.glob(f"{path}/*.pt")
- for model_path in model_files:
- name = Path(model_path).name
-
- self.experiment.log_model(
- self.model_name,
- file_or_folder=model_path,
- file_name=name,
- metadata=model_metadata,
- overwrite=True,
- )
-
- def check_dataset(self, data_file):
- with open(data_file) as f:
- data_config = yaml.safe_load(f)
-
- if data_config["path"].startswith(COMET_PREFIX):
- path = data_config["path"].replace(COMET_PREFIX, "")
- data_dict = self.download_dataset_artifact(path)
-
- return data_dict
-
- self.log_asset(self.opt.data, metadata={"type": "data-config-file"})
-
- return check_dataset(data_file)
-
- def log_predictions(self, image, labelsn, path, shape, predn):
- if self.logged_images_count >= self.max_images:
- return
- detections = predn[predn[:, 4] > self.conf_thres]
- iou = box_iou(labelsn[:, 1:], detections[:, :4])
- mask, _ = torch.where(iou > self.iou_thres)
- if len(mask) == 0:
- return
-
- filtered_detections = detections[mask]
- filtered_labels = labelsn[mask]
-
- image_id = path.split("/")[-1].split(".")[0]
- image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}"
- if image_name not in self.logged_image_names:
- native_scale_image = PIL.Image.open(path)
- self.log_image(native_scale_image, name=image_name)
- self.logged_image_names.append(image_name)
-
- metadata = []
- for cls, *xyxy in filtered_labels.tolist():
- metadata.append(
- {
- "label": f"{self.class_names[int(cls)]}-gt",
- "score": 100,
- "box": {
- "x": xyxy[0],
- "y": xyxy[1],
- "x2": xyxy[2],
- "y2": xyxy[3],
- },
- }
- )
- for *xyxy, conf, cls in filtered_detections.tolist():
- metadata.append(
- {
- "label": f"{self.class_names[int(cls)]}",
- "score": conf * 100,
- "box": {
- "x": xyxy[0],
- "y": xyxy[1],
- "x2": xyxy[2],
- "y2": xyxy[3],
- },
- }
- )
-
- self.metadata_dict[image_name] = metadata
- self.logged_images_count += 1
-
- return
-
- def preprocess_prediction(self, image, labels, shape, pred):
- nl, _ = labels.shape[0], pred.shape[0]
-
- # Predictions
- if self.opt.single_cls:
- pred[:, 5] = 0
-
- predn = pred.clone()
- scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1])
-
- labelsn = None
- if nl:
- tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
- scale_boxes(
- image.shape[1:], tbox, shape[0], shape[1]
- ) # native-space labels
- labelsn = torch.cat(
- (labels[:, 0:1], tbox), 1
- ) # native-space labels
- scale_boxes(
- image.shape[1:], predn[:, :4], shape[0], shape[1]
- ) # native-space pred
-
- return predn, labelsn
-
- def add_assets_to_artifact(self, artifact, path, asset_path, split):
- img_paths = sorted(glob.glob(f"{asset_path}/*"))
- label_paths = img2label_paths(img_paths)
-
- for image_file, label_file in zip(img_paths, label_paths):
- image_logical_path, label_logical_path = map(
- lambda x: os.path.relpath(x, path), [image_file, label_file]
- )
-
- try:
- artifact.add(
- image_file,
- logical_path=image_logical_path,
- metadata={"split": split},
- )
- artifact.add(
- label_file,
- logical_path=label_logical_path,
- metadata={"split": split},
- )
- except ValueError as e:
- logger.error(
- "COMET ERROR: Error adding file to Artifact. Skipping file."
- )
- logger.error(f"COMET ERROR: {e}")
- continue
-
- return artifact
-
- def upload_dataset_artifact(self):
- dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset")
- path = str((ROOT / Path(self.data_dict["path"])).resolve())
-
- metadata = self.data_dict.copy()
- for key in ["train", "val", "test"]:
- split_path = metadata.get(key)
- if split_path is not None:
- metadata[key] = split_path.replace(path, "")
-
- artifact = comet_ml.Artifact(
- name=dataset_name, artifact_type="dataset", metadata=metadata
- )
- for key in metadata.keys():
- if key in ["train", "val", "test"]:
- if isinstance(self.upload_dataset, str) and (
- key != self.upload_dataset
- ):
- continue
-
- asset_path = self.data_dict.get(key)
- if asset_path is not None:
- artifact = self.add_assets_to_artifact(
- artifact, path, asset_path, key
- )
-
- self.experiment.log_artifact(artifact)
-
- return
-
- def download_dataset_artifact(self, artifact_path):
- logged_artifact = self.experiment.get_artifact(artifact_path)
- artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name)
- logged_artifact.download(artifact_save_dir)
-
- metadata = logged_artifact.metadata
- data_dict = metadata.copy()
- data_dict["path"] = artifact_save_dir
-
- metadata_names = metadata.get("names")
- if type(metadata_names) == dict:
- data_dict["names"] = {
- int(k): v for k, v in metadata.get("names").items()
- }
- elif type(metadata_names) == list:
- data_dict["names"] = {
- int(k): v
- for k, v in zip(range(len(metadata_names)), metadata_names)
- }
- else:
- raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary"
-
- data_dict = self.update_data_paths(data_dict)
- return data_dict
-
- def update_data_paths(self, data_dict):
- path = data_dict.get("path", "")
-
- for split in ["train", "val", "test"]:
- if data_dict.get(split):
- split_path = data_dict.get(split)
- data_dict[split] = (
- f"{path}/{split_path}"
- if isinstance(split, str)
- else [f"{path}/{x}" for x in split_path]
- )
-
- return data_dict
-
- def on_pretrain_routine_end(self, paths):
- if self.opt.resume:
- return
-
- for path in paths:
- self.log_asset(str(path))
-
- if self.upload_dataset:
- if not self.resume:
- self.upload_dataset_artifact()
-
- return
-
- def on_train_start(self):
- self.log_parameters(self.hyp)
-
- def on_train_epoch_start(self):
- return
-
- def on_train_epoch_end(self, epoch):
- self.experiment.curr_epoch = epoch
-
- return
-
- def on_train_batch_start(self):
- return
-
- def on_train_batch_end(self, log_dict, step):
- self.experiment.curr_step = step
- if self.log_batch_metrics and (
- step % self.comet_log_batch_interval == 0
- ):
- self.log_metrics(log_dict, step=step)
-
- return
-
- def on_train_end(self, files, save_dir, last, best, epoch, results):
- if self.comet_log_predictions:
- curr_epoch = self.experiment.curr_epoch
- self.experiment.log_asset_data(
- self.metadata_dict, "image-metadata.json", epoch=curr_epoch
- )
-
- for f in files:
- self.log_asset(f, metadata={"epoch": epoch})
- self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch})
-
- if not self.opt.evolve:
- model_path = str(best if best.exists() else last)
- name = Path(model_path).name
- if self.save_model:
- self.experiment.log_model(
- self.model_name,
- file_or_folder=model_path,
- file_name=name,
- overwrite=True,
- )
-
- # Check if running Experiment with Comet Optimizer
- if hasattr(self.opt, "comet_optimizer_id"):
- metric = results.get(self.opt.comet_optimizer_metric)
- self.experiment.log_other("optimizer_metric_value", metric)
-
- self.finish_run()
-
- def on_val_start(self):
- return
-
- def on_val_batch_start(self):
- return
-
- def on_val_batch_end(
- self, batch_i, images, targets, paths, shapes, outputs
- ):
- if not (
- self.comet_log_predictions
- and ((batch_i + 1) % self.comet_log_prediction_interval == 0)
- ):
- return
-
- for si, pred in enumerate(outputs):
- if len(pred) == 0:
- continue
-
- image = images[si]
- labels = targets[targets[:, 0] == si, 1:]
- shape = shapes[si]
- path = paths[si]
- predn, labelsn = self.preprocess_prediction(
- image, labels, shape, pred
- )
- if labelsn is not None:
- self.log_predictions(image, labelsn, path, shape, predn)
-
- return
-
- def on_val_end(
- self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix
- ):
- if self.comet_log_per_class_metrics:
- if self.num_classes > 1:
- for i, c in enumerate(ap_class):
- class_name = self.class_names[c]
- self.experiment.log_metrics(
- {
- "mAP@.5": ap50[i],
- "mAP@.5:.95": ap[i],
- "precision": p[i],
- "recall": r[i],
- "f1": f1[i],
- "true_positives": tp[i],
- "false_positives": fp[i],
- "support": nt[c],
- },
- prefix=class_name,
- )
-
- if self.comet_log_confusion_matrix:
- epoch = self.experiment.curr_epoch
- class_names = list(self.class_names.values())
- class_names.append("background")
- num_classes = len(class_names)
-
- self.experiment.log_confusion_matrix(
- matrix=confusion_matrix.matrix,
- max_categories=num_classes,
- labels=class_names,
- epoch=epoch,
- column_label="Actual Category",
- row_label="Predicted Category",
- file_name=f"confusion-matrix-epoch-{epoch}.json",
- )
-
- def on_fit_epoch_end(self, result, epoch):
- self.log_metrics(result, epoch=epoch)
-
- def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
- if (
- (epoch + 1) % self.opt.save_period == 0 and not final_epoch
- ) and self.opt.save_period != -1:
- self.log_model(
- last.parent, self.opt, epoch, fi, best_model=best_fitness == fi
- )
-
- def on_params_update(self, params):
- self.log_parameters(params)
-
- def finish_run(self):
- self.experiment.end()
diff --git a/spaces/Abrish-Aadi/Chest-Xray-anomaly-detection/app.py b/spaces/Abrish-Aadi/Chest-Xray-anomaly-detection/app.py
deleted file mode 100644
index 4d592b1cb11dd700ab06a6b8ec4e1ede009a3158..0000000000000000000000000000000000000000
--- a/spaces/Abrish-Aadi/Chest-Xray-anomaly-detection/app.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import gradio as gr
-import tensorflow as tf
-import os
-import numpy as np
-
-model=tf.keras.models.load_model('model.h5')
-
-LABELS = ['NORMAL', 'TUBERCULOSIS', 'PNEUMONIA', 'COVID19']
-
-def predict_input_image(img):
- img_4d=img.reshape(-1,128,128,3)/255.0
- print(img_4d.min())
- print(img_4d.max())
- prediction=model.predict(img_4d)[0]
- return {LABELS[i]: float(prediction[i]) for i in range(4)}
-
-def k():
- return gr.update(value=None)
-
-with gr.Blocks(title="Chest X-Ray Anomaly Detection", css="") as demo:
- with gr.Row():
- textmd = gr.Markdown('''
- # Chest X-Ray Anomaly Detection
- ''')
- with gr.Row():
- with gr.Column(scale=1, min_width=600):
- image = gr.inputs.Image(shape=(128,128))
- with gr.Row():
- clear_btn = gr.Button("Clear")
- submit_btn = gr.Button("Submit", elem_id="warningk", variant='primary')
- examples = gr.Examples(examples=["COVID19(573).jpg",
- "NORMAL2-IM-1345-0001-0002.jpeg",
- "person1946_bacteria_4875.jpeg",
- "Tuberculosis-658.png"], inputs=image)
- label = gr.outputs.Label(num_top_classes=4)
-
- clear_btn.click(k, inputs=[], outputs=image)
- submit_btn.click(predict_input_image, inputs=image, outputs=label)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/FadeMethods.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/FadeMethods.js
deleted file mode 100644
index 8db7ef9321ce045a0483a84f34a4bbdd35ee1f58..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/FadeMethods.js
+++ /dev/null
@@ -1,86 +0,0 @@
-import { FadeIn, FadeOutDestroy } from '../fade/Fade.js';
-import { WaitComplete } from '../utils/WaitEvent.js';
-import GetParentSizerMethods from './GetParentSizerMethods.js';
-
-const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
-
-var OnInitFade = function (gameObject, fade) {
- // Route 'complete' of fade to gameObject
- fade.completeEventName = undefined;
- fade.on('complete', function () {
- if (fade.completeEventName) {
- gameObject.emit(fade.completeEventName, gameObject);
- fade.completeEventName = undefined;
- }
- })
-
- // Update local state
- fade.on('update', function () {
- var parent = GetParentSizerMethods.getParentSizer(gameObject);
- if (parent) {
- parent.resetChildAlphaState(gameObject);
- }
- })
-}
-
-export default {
- fadeIn(duration, alpha) {
- if (IsPlainObject(duration)) {
- var config = duration;
- duration = config.duration;
- alpha = config.alpha;
- }
-
- var isInit = (this._fade === undefined);
-
- this._fade = FadeIn(this, duration, alpha, this._fade);
-
- if (isInit) {
- OnInitFade(this, this._fade);
- }
-
- this._fade.completeEventName = 'fadein.complete';
-
- return this;
- },
-
- fadeInPromise(duration, alpha) {
- this.fadeIn(duration, alpha);
- return WaitComplete(this._fade);
- },
-
- fadeOutDestroy(duration, destroyMode) {
- if (IsPlainObject(duration)) {
- var config = duration;
- duration = config.duration;
- destroyMode = config.destroy;
- }
-
- var isInit = (this._fade === undefined);
-
- this._fade = FadeOutDestroy(this, duration, destroyMode, this._fade);
-
- if (isInit) {
- OnInitFade(this, this._fade);
- }
-
- this._fade.completeEventName = 'fadeout.complete';
-
- return this;
- },
-
- fadeOutDestroyPromise(duration, destroyMode) {
- this.fadeOutDestroy(duration, destroyMode);
- return WaitComplete(this._fade);
- },
-
- fadeOut(duration) {
- this.fadeOutDestroy(duration, false);
- return this;
- },
-
- fadeOutPromise(duration) {
- this.fadeOut(duration);
- return WaitComplete(this._fade);
- }
-}
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.d.ts
deleted file mode 100644
index 8b50b67d1d959b7477e390dc0b1d81ff918eb619..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/Factory.d.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-import ScrollBar from './ScrollBar';
-
-export default function (
- config?: ScrollBar.IConfig
-): ScrollBar;
\ No newline at end of file
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.cpp b/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.cpp
deleted file mode 100644
index 5d2425d8054991a8e8b6f7a940fd0ff7fa0bb330..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/bias_act.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-#include
-#include
-#include "bias_act.h"
-
-//------------------------------------------------------------------------
-
-static bool has_same_layout(torch::Tensor x, torch::Tensor y)
-{
- if (x.dim() != y.dim())
- return false;
- for (int64_t i = 0; i < x.dim(); i++)
- {
- if (x.size(i) != y.size(i))
- return false;
- if (x.size(i) >= 2 && x.stride(i) != y.stride(i))
- return false;
- }
- return true;
-}
-
-//------------------------------------------------------------------------
-
-static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)
-{
- // Validate arguments.
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
- TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");
- TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");
- TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");
- TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");
- TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
- TORCH_CHECK(b.dim() == 1, "b must have rank 1");
- TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");
- TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");
- TORCH_CHECK(grad >= 0, "grad must be non-negative");
-
- // Validate layout.
- TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");
- TORCH_CHECK(b.is_contiguous(), "b must be contiguous");
- TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");
- TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");
- TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");
-
- // Create output tensor.
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
- torch::Tensor y = torch::empty_like(x);
- TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");
-
- // Initialize CUDA kernel parameters.
- bias_act_kernel_params p;
- p.x = x.data_ptr();
- p.b = (b.numel()) ? b.data_ptr() : NULL;
- p.xref = (xref.numel()) ? xref.data_ptr() : NULL;
- p.yref = (yref.numel()) ? yref.data_ptr() : NULL;
- p.dy = (dy.numel()) ? dy.data_ptr() : NULL;
- p.y = y.data_ptr();
- p.grad = grad;
- p.act = act;
- p.alpha = alpha;
- p.gain = gain;
- p.clamp = clamp;
- p.sizeX = (int)x.numel();
- p.sizeB = (int)b.numel();
- p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;
-
- // Choose CUDA kernel.
- void* kernel;
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
- {
- kernel = choose_bias_act_kernel(p);
- });
- TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");
-
- // Launch CUDA kernel.
- p.loopX = 4;
- int blockSize = 4 * 32;
- int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
- void* args[] = {&p};
- AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
- return y;
-}
-
-//------------------------------------------------------------------------
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
-{
- m.def("bias_act", &bias_act);
-}
-
-//------------------------------------------------------------------------
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.py
deleted file mode 100644
index ecdcabbe20d2405b71d049d0bf94ae576fe58493..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/upfirdn2d.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-import os
-
-import torch
-from torch.nn import functional as F
-from torch.autograd import Function
-from torch.utils.cpp_extension import load
-
-
-module_path = os.path.dirname(__file__)
-upfirdn2d_op = load(
- "upfirdn2d",
- sources=[
- os.path.join(module_path, "upfirdn2d.cpp"),
- os.path.join(module_path, "upfirdn2d_kernel.cu"),
- ],
-)
-
-
-class UpFirDn2dBackward(Function):
- @staticmethod
- def forward(
- ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
- ):
-
- up_x, up_y = up
- down_x, down_y = down
- g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
-
- grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
-
- grad_input = upfirdn2d_op.upfirdn2d(
- grad_output,
- grad_kernel,
- down_x,
- down_y,
- up_x,
- up_y,
- g_pad_x0,
- g_pad_x1,
- g_pad_y0,
- g_pad_y1,
- )
- grad_input = grad_input.view(
- in_size[0], in_size[1], in_size[2], in_size[3])
-
- ctx.save_for_backward(kernel)
-
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
-
- ctx.up_x = up_x
- ctx.up_y = up_y
- ctx.down_x = down_x
- ctx.down_y = down_y
- ctx.pad_x0 = pad_x0
- ctx.pad_x1 = pad_x1
- ctx.pad_y0 = pad_y0
- ctx.pad_y1 = pad_y1
- ctx.in_size = in_size
- ctx.out_size = out_size
-
- return grad_input
-
- @staticmethod
- def backward(ctx, gradgrad_input):
- (kernel,) = ctx.saved_tensors
-
- gradgrad_input = gradgrad_input.reshape(-1,
- ctx.in_size[2], ctx.in_size[3], 1)
-
- gradgrad_out = upfirdn2d_op.upfirdn2d(
- gradgrad_input,
- kernel,
- ctx.up_x,
- ctx.up_y,
- ctx.down_x,
- ctx.down_y,
- ctx.pad_x0,
- ctx.pad_x1,
- ctx.pad_y0,
- ctx.pad_y1,
- )
- # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
- gradgrad_out = gradgrad_out.view(
- ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
- )
-
- return gradgrad_out, None, None, None, None, None, None, None, None
-
-
-class UpFirDn2d(Function):
- @staticmethod
- def forward(ctx, input, kernel, up, down, pad):
- up_x, up_y = up
- down_x, down_y = down
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
-
- kernel_h, kernel_w = kernel.shape
- batch, channel, in_h, in_w = input.shape
- ctx.in_size = input.shape
-
- input = input.reshape(-1, in_h, in_w, 1)
-
- ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
-
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
- ctx.out_size = (out_h, out_w)
-
- ctx.up = (up_x, up_y)
- ctx.down = (down_x, down_y)
- ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
-
- g_pad_x0 = kernel_w - pad_x0 - 1
- g_pad_y0 = kernel_h - pad_y0 - 1
- g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
- g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
-
- ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
-
- out = upfirdn2d_op.upfirdn2d(
- input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
- )
- # out = out.view(major, out_h, out_w, minor)
- out = out.view(-1, channel, out_h, out_w)
-
- return out
-
- @staticmethod
- def backward(ctx, grad_output):
- kernel, grad_kernel = ctx.saved_tensors
-
- grad_input = UpFirDn2dBackward.apply(
- grad_output,
- kernel,
- grad_kernel,
- ctx.up,
- ctx.down,
- ctx.pad,
- ctx.g_pad,
- ctx.in_size,
- ctx.out_size,
- )
-
- return grad_input, None, None, None, None
-
-
-def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
- if input.device.type == "cpu":
- out = upfirdn2d_native(
- input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
- )
-
- else:
- out = UpFirDn2d.apply(
- input, kernel, (up, up), (down,
- down), (pad[0], pad[1], pad[0], pad[1])
- )
-
- return out
-
-
-def upfirdn2d_native(
- input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
-):
- _, channel, in_h, in_w = input.shape
- input = input.reshape(-1, in_h, in_w, 1)
-
- _, in_h, in_w, minor = input.shape
- kernel_h, kernel_w = kernel.shape
-
- out = input.view(-1, in_h, 1, in_w, 1, minor)
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
-
- out = F.pad(
- out, [0, 0, max(pad_x0, 0), max(pad_x1, 0),
- max(pad_y0, 0), max(pad_y1, 0)]
- )
- out = out[
- :,
- max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0),
- max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0),
- :,
- ]
-
- out = out.permute(0, 3, 1, 2)
- out = out.reshape(
- [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
- )
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
- out = F.conv2d(out, w)
- out = out.reshape(
- -1,
- minor,
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
- )
- out = out.permute(0, 2, 3, 1)
- out = out[:, ::down_y, ::down_x, :]
-
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-
- return out.view(-1, channel, out_h, out_w)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py
deleted file mode 100644
index d299b69f576a2547de1f7d9edd171d56ab002d0a..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py
+++ /dev/null
@@ -1,8 +0,0 @@
-_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
-model = dict(
- backbone=dict(plugins=[
- dict(
- cfg=dict(type='ContextBlock', ratio=1. / 16),
- stages=(False, True, True, True),
- position='after_conv3')
- ]))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/scratch/README.md b/spaces/Andy1621/uniformer_image_detection/configs/scratch/README.md
deleted file mode 100644
index a338dc5d2c7c30a954b927d748afa3d7067542f4..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/scratch/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Rethinking ImageNet Pre-training
-
-## Introduction
-
-[ALGORITHM]
-
-```latex
-@article{he2018rethinking,
- title={Rethinking imagenet pre-training},
- author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr},
- journal={arXiv preprint arXiv:1811.08883},
- year={2018}
-}
-```
-
-## Results and Models
-
-| Model | Backbone | Style | Lr schd | box AP | mask AP | Config | Download |
-|:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:------:|:--------:|
-| Faster R-CNN | R-50-FPN | pytorch | 6x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_20200201_193013.log.json) |
-| Mask R-CNN | R-50-FPN | pytorch | 6x | 41.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_20200201_193051.log.json) |
-
-Note:
-
-- The above models are trained with 16 GPUs.
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/smooth_l1_loss.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/smooth_l1_loss.py
deleted file mode 100644
index ec9c98a52d1932d6ccff18938c17c36755bf1baf..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/smooth_l1_loss.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import mmcv
-import torch
-import torch.nn as nn
-
-from ..builder import LOSSES
-from .utils import weighted_loss
-
-
-@mmcv.jit(derivate=True, coderize=True)
-@weighted_loss
-def smooth_l1_loss(pred, target, beta=1.0):
- """Smooth L1 loss.
-
- Args:
- pred (torch.Tensor): The prediction.
- target (torch.Tensor): The learning target of the prediction.
- beta (float, optional): The threshold in the piecewise function.
- Defaults to 1.0.
-
- Returns:
- torch.Tensor: Calculated loss
- """
- assert beta > 0
- assert pred.size() == target.size() and target.numel() > 0
- diff = torch.abs(pred - target)
- loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
- diff - 0.5 * beta)
- return loss
-
-
-@mmcv.jit(derivate=True, coderize=True)
-@weighted_loss
-def l1_loss(pred, target):
- """L1 loss.
-
- Args:
- pred (torch.Tensor): The prediction.
- target (torch.Tensor): The learning target of the prediction.
-
- Returns:
- torch.Tensor: Calculated loss
- """
- assert pred.size() == target.size() and target.numel() > 0
- loss = torch.abs(pred - target)
- return loss
-
-
-@LOSSES.register_module()
-class SmoothL1Loss(nn.Module):
- """Smooth L1 loss.
-
- Args:
- beta (float, optional): The threshold in the piecewise function.
- Defaults to 1.0.
- reduction (str, optional): The method to reduce the loss.
- Options are "none", "mean" and "sum". Defaults to "mean".
- loss_weight (float, optional): The weight of loss.
- """
-
- def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
- super(SmoothL1Loss, self).__init__()
- self.beta = beta
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred,
- target,
- weight=None,
- avg_factor=None,
- reduction_override=None,
- **kwargs):
- """Forward function.
-
- Args:
- pred (torch.Tensor): The prediction.
- target (torch.Tensor): The learning target of the prediction.
- weight (torch.Tensor, optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- loss_bbox = self.loss_weight * smooth_l1_loss(
- pred,
- target,
- weight,
- beta=self.beta,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss_bbox
-
-
-@LOSSES.register_module()
-class L1Loss(nn.Module):
- """L1 loss.
-
- Args:
- reduction (str, optional): The method to reduce the loss.
- Options are "none", "mean" and "sum".
- loss_weight (float, optional): The weight of loss.
- """
-
- def __init__(self, reduction='mean', loss_weight=1.0):
- super(L1Loss, self).__init__()
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred,
- target,
- weight=None,
- avg_factor=None,
- reduction_override=None):
- """Forward function.
-
- Args:
- pred (torch.Tensor): The prediction.
- target (torch.Tensor): The learning target of the prediction.
- weight (torch.Tensor, optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- loss_bbox = self.loss_weight * l1_loss(
- pred, target, weight, reduction=reduction, avg_factor=avg_factor)
- return loss_bbox
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py
deleted file mode 100644
index 6d0294530f4c817b352cb020d111e3248690ae1f..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './fcn_r50-d8_512x512_80k_ade20k.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/app.py b/spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/app.py
deleted file mode 100644
index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000
--- a/spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import os
-import gradio as gr
-from langchain.chat_models import ChatOpenAI
-from langchain import LLMChain, PromptTemplate
-from langchain.memory import ConversationBufferMemory
-
-OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
-
-template = """You are a helpful assistant to answer all user queries.
-{chat_history}
-User: {user_message}
-Chatbot:"""
-
-prompt = PromptTemplate(
- input_variables=["chat_history", "user_message"], template=template
-)
-
-memory = ConversationBufferMemory(memory_key="chat_history")
-
-llm_chain = LLMChain(
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
- prompt=prompt,
- verbose=True,
- memory=memory,
-)
-
-def get_text_response(user_message,history):
- response = llm_chain.predict(user_message = user_message)
- return response
-
-demo = gr.ChatInterface(get_text_response)
-
-if __name__ == "__main__":
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
diff --git a/spaces/Arnaudding001/OpenAI_whisperLive/app-network.py b/spaces/Arnaudding001/OpenAI_whisperLive/app-network.py
deleted file mode 100644
index 7605c4b126dfc7dac188dce38551ca8ae84d67db..0000000000000000000000000000000000000000
--- a/spaces/Arnaudding001/OpenAI_whisperLive/app-network.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# Run the app with no audio file restrictions, and make it available on the network
-from app import create_ui
-create_ui(-1, server_name="0.0.0.0")
\ No newline at end of file
diff --git a/spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese.py b/spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese.py
deleted file mode 100644
index 2081f7f63807ca7be02016b46dcf8dc16745f6c6..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese.py
+++ /dev/null
@@ -1,585 +0,0 @@
-# Convert Japanese text to phonemes which is
-# compatible with Julius https://github.com/julius-speech/segmentation-kit
-import re
-import unicodedata
-
-from transformers import AutoTokenizer
-
-from bert_vits2.text.symbols import *
-from bert_vits2.text.japanese_bert import tokenizer
-
-try:
- import MeCab
-except ImportError as e:
- raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e
-from num2words import num2words
-
-_CONVRULES = [
- # Conversion of 2 letters
- "アァ/ a a",
- "イィ/ i i",
- "イェ/ i e",
- "イャ/ y a",
- "ウゥ/ u:",
- "エェ/ e e",
- "オォ/ o:",
- "カァ/ k a:",
- "キィ/ k i:",
- "クゥ/ k u:",
- "クャ/ ky a",
- "クュ/ ky u",
- "クョ/ ky o",
- "ケェ/ k e:",
- "コォ/ k o:",
- "ガァ/ g a:",
- "ギィ/ g i:",
- "グゥ/ g u:",
- "グャ/ gy a",
- "グュ/ gy u",
- "グョ/ gy o",
- "ゲェ/ g e:",
- "ゴォ/ g o:",
- "サァ/ s a:",
- "シィ/ sh i:",
- "スゥ/ s u:",
- "スャ/ sh a",
- "スュ/ sh u",
- "スョ/ sh o",
- "セェ/ s e:",
- "ソォ/ s o:",
- "ザァ/ z a:",
- "ジィ/ j i:",
- "ズゥ/ z u:",
- "ズャ/ zy a",
- "ズュ/ zy u",
- "ズョ/ zy o",
- "ゼェ/ z e:",
- "ゾォ/ z o:",
- "タァ/ t a:",
- "チィ/ ch i:",
- "ツァ/ ts a",
- "ツィ/ ts i",
- "ツゥ/ ts u:",
- "ツャ/ ch a",
- "ツュ/ ch u",
- "ツョ/ ch o",
- "ツェ/ ts e",
- "ツォ/ ts o",
- "テェ/ t e:",
- "トォ/ t o:",
- "ダァ/ d a:",
- "ヂィ/ j i:",
- "ヅゥ/ d u:",
- "ヅャ/ zy a",
- "ヅュ/ zy u",
- "ヅョ/ zy o",
- "デェ/ d e:",
- "ドォ/ d o:",
- "ナァ/ n a:",
- "ニィ/ n i:",
- "ヌゥ/ n u:",
- "ヌャ/ ny a",
- "ヌュ/ ny u",
- "ヌョ/ ny o",
- "ネェ/ n e:",
- "ノォ/ n o:",
- "ハァ/ h a:",
- "ヒィ/ h i:",
- "フゥ/ f u:",
- "フャ/ hy a",
- "フュ/ hy u",
- "フョ/ hy o",
- "ヘェ/ h e:",
- "ホォ/ h o:",
- "バァ/ b a:",
- "ビィ/ b i:",
- "ブゥ/ b u:",
- "フャ/ hy a",
- "ブュ/ by u",
- "フョ/ hy o",
- "ベェ/ b e:",
- "ボォ/ b o:",
- "パァ/ p a:",
- "ピィ/ p i:",
- "プゥ/ p u:",
- "プャ/ py a",
- "プュ/ py u",
- "プョ/ py o",
- "ペェ/ p e:",
- "ポォ/ p o:",
- "マァ/ m a:",
- "ミィ/ m i:",
- "ムゥ/ m u:",
- "ムャ/ my a",
- "ムュ/ my u",
- "ムョ/ my o",
- "メェ/ m e:",
- "モォ/ m o:",
- "ヤァ/ y a:",
- "ユゥ/ y u:",
- "ユャ/ y a:",
- "ユュ/ y u:",
- "ユョ/ y o:",
- "ヨォ/ y o:",
- "ラァ/ r a:",
- "リィ/ r i:",
- "ルゥ/ r u:",
- "ルャ/ ry a",
- "ルュ/ ry u",
- "ルョ/ ry o",
- "レェ/ r e:",
- "ロォ/ r o:",
- "ワァ/ w a:",
- "ヲォ/ o:",
- "ディ/ d i",
- "デェ/ d e:",
- "デャ/ dy a",
- "デュ/ dy u",
- "デョ/ dy o",
- "ティ/ t i",
- "テェ/ t e:",
- "テャ/ ty a",
- "テュ/ ty u",
- "テョ/ ty o",
- "スィ/ s i",
- "ズァ/ z u a",
- "ズィ/ z i",
- "ズゥ/ z u",
- "ズャ/ zy a",
- "ズュ/ zy u",
- "ズョ/ zy o",
- "ズェ/ z e",
- "ズォ/ z o",
- "キャ/ ky a",
- "キュ/ ky u",
- "キョ/ ky o",
- "シャ/ sh a",
- "シュ/ sh u",
- "シェ/ sh e",
- "ショ/ sh o",
- "チャ/ ch a",
- "チュ/ ch u",
- "チェ/ ch e",
- "チョ/ ch o",
- "トゥ/ t u",
- "トャ/ ty a",
- "トュ/ ty u",
- "トョ/ ty o",
- "ドァ/ d o a",
- "ドゥ/ d u",
- "ドャ/ dy a",
- "ドュ/ dy u",
- "ドョ/ dy o",
- "ドォ/ d o:",
- "ニャ/ ny a",
- "ニュ/ ny u",
- "ニョ/ ny o",
- "ヒャ/ hy a",
- "ヒュ/ hy u",
- "ヒョ/ hy o",
- "ミャ/ my a",
- "ミュ/ my u",
- "ミョ/ my o",
- "リャ/ ry a",
- "リュ/ ry u",
- "リョ/ ry o",
- "ギャ/ gy a",
- "ギュ/ gy u",
- "ギョ/ gy o",
- "ヂェ/ j e",
- "ヂャ/ j a",
- "ヂュ/ j u",
- "ヂョ/ j o",
- "ジェ/ j e",
- "ジャ/ j a",
- "ジュ/ j u",
- "ジョ/ j o",
- "ビャ/ by a",
- "ビュ/ by u",
- "ビョ/ by o",
- "ピャ/ py a",
- "ピュ/ py u",
- "ピョ/ py o",
- "ウァ/ u a",
- "ウィ/ w i",
- "ウェ/ w e",
- "ウォ/ w o",
- "ファ/ f a",
- "フィ/ f i",
- "フゥ/ f u",
- "フャ/ hy a",
- "フュ/ hy u",
- "フョ/ hy o",
- "フェ/ f e",
- "フォ/ f o",
- "ヴァ/ b a",
- "ヴィ/ b i",
- "ヴェ/ b e",
- "ヴォ/ b o",
- "ヴュ/ by u",
- # Conversion of 1 letter
- "ア/ a",
- "イ/ i",
- "ウ/ u",
- "エ/ e",
- "オ/ o",
- "カ/ k a",
- "キ/ k i",
- "ク/ k u",
- "ケ/ k e",
- "コ/ k o",
- "サ/ s a",
- "シ/ sh i",
- "ス/ s u",
- "セ/ s e",
- "ソ/ s o",
- "タ/ t a",
- "チ/ ch i",
- "ツ/ ts u",
- "テ/ t e",
- "ト/ t o",
- "ナ/ n a",
- "ニ/ n i",
- "ヌ/ n u",
- "ネ/ n e",
- "ノ/ n o",
- "ハ/ h a",
- "ヒ/ h i",
- "フ/ f u",
- "ヘ/ h e",
- "ホ/ h o",
- "マ/ m a",
- "ミ/ m i",
- "ム/ m u",
- "メ/ m e",
- "モ/ m o",
- "ラ/ r a",
- "リ/ r i",
- "ル/ r u",
- "レ/ r e",
- "ロ/ r o",
- "ガ/ g a",
- "ギ/ g i",
- "グ/ g u",
- "ゲ/ g e",
- "ゴ/ g o",
- "ザ/ z a",
- "ジ/ j i",
- "ズ/ z u",
- "ゼ/ z e",
- "ゾ/ z o",
- "ダ/ d a",
- "ヂ/ j i",
- "ヅ/ z u",
- "デ/ d e",
- "ド/ d o",
- "バ/ b a",
- "ビ/ b i",
- "ブ/ b u",
- "ベ/ b e",
- "ボ/ b o",
- "パ/ p a",
- "ピ/ p i",
- "プ/ p u",
- "ペ/ p e",
- "ポ/ p o",
- "ヤ/ y a",
- "ユ/ y u",
- "ヨ/ y o",
- "ワ/ w a",
- "ヰ/ i",
- "ヱ/ e",
- "ヲ/ o",
- "ン/ N",
- "ッ/ q",
- "ヴ/ b u",
- "ー/:",
- # Try converting broken text
- "ァ/ a",
- "ィ/ i",
- "ゥ/ u",
- "ェ/ e",
- "ォ/ o",
- "ヮ/ w a",
- "ォ/ o",
- # Symbols
- "、/ ,",
- "。/ .",
- "!/ !",
- "?/ ?",
- "・/ ,",
-]
-
-_COLON_RX = re.compile(":+")
-_REJECT_RX = re.compile("[^ a-zA-Z:,.?]")
-
-
-def _makerulemap():
- l = [tuple(x.split("/")) for x in _CONVRULES]
- return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2))
-
-
-_RULEMAP1, _RULEMAP2 = _makerulemap()
-
-
-def kata2phoneme(text: str) -> str:
- """Convert katakana text to phonemes."""
- text = text.strip()
- res = []
- while text:
- if len(text) >= 2:
- x = _RULEMAP2.get(text[:2])
- if x is not None:
- text = text[2:]
- res += x.split(" ")[1:]
- continue
- x = _RULEMAP1.get(text[0])
- if x is not None:
- text = text[1:]
- res += x.split(" ")[1:]
- continue
- res.append(text[0])
- text = text[1:]
- # res = _COLON_RX.sub(":", res)
- return res
-
-
-_KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1))
-_HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1))
-_HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA)
-
-
-def hira2kata(text: str) -> str:
- text = text.translate(_HIRA2KATATRANS)
- return text.replace("う゛", "ヴ")
-
-
-_SYMBOL_TOKENS = set(list("・、。?!"))
-_NO_YOMI_TOKENS = set(list("「」『』―()[][]"))
-_TAGGER = MeCab.Tagger()
-
-
-def text2kata(text: str) -> str:
- parsed = _TAGGER.parse(text)
- res = []
- for line in parsed.split("\n"):
- if line == "EOS":
- break
- parts = line.split("\t")
-
- word, yomi = parts[0], parts[1]
- if yomi:
- res.append(yomi)
- else:
- if word in _SYMBOL_TOKENS:
- res.append(word)
- elif word in ("っ", "ッ"):
- res.append("ッ")
- elif word in _NO_YOMI_TOKENS:
- pass
- else:
- res.append(word)
- return hira2kata("".join(res))
-
-
-_ALPHASYMBOL_YOMI = {
- "#": "シャープ",
- "%": "パーセント",
- "&": "アンド",
- "+": "プラス",
- "-": "マイナス",
- ":": "コロン",
- ";": "セミコロン",
- "<": "小なり",
- "=": "イコール",
- ">": "大なり",
- "@": "アット",
- "a": "エー",
- "b": "ビー",
- "c": "シー",
- "d": "ディー",
- "e": "イー",
- "f": "エフ",
- "g": "ジー",
- "h": "エイチ",
- "i": "アイ",
- "j": "ジェー",
- "k": "ケー",
- "l": "エル",
- "m": "エム",
- "n": "エヌ",
- "o": "オー",
- "p": "ピー",
- "q": "キュー",
- "r": "アール",
- "s": "エス",
- "t": "ティー",
- "u": "ユー",
- "v": "ブイ",
- "w": "ダブリュー",
- "x": "エックス",
- "y": "ワイ",
- "z": "ゼット",
- "α": "アルファ",
- "β": "ベータ",
- "γ": "ガンマ",
- "δ": "デルタ",
- "ε": "イプシロン",
- "ζ": "ゼータ",
- "η": "イータ",
- "θ": "シータ",
- "ι": "イオタ",
- "κ": "カッパ",
- "λ": "ラムダ",
- "μ": "ミュー",
- "ν": "ニュー",
- "ξ": "クサイ",
- "ο": "オミクロン",
- "π": "パイ",
- "ρ": "ロー",
- "σ": "シグマ",
- "τ": "タウ",
- "υ": "ウプシロン",
- "φ": "ファイ",
- "χ": "カイ",
- "ψ": "プサイ",
- "ω": "オメガ",
-}
-
-_NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+")
-_CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"}
-_CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])")
-_NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?")
-
-
-def japanese_convert_numbers_to_words(text: str) -> str:
- res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text)
- res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res)
- res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res)
- return res
-
-
-def japanese_convert_alpha_symbols_to_words(text: str) -> str:
- return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()])
-
-
-def japanese_text_to_phonemes(text: str) -> str:
- """Convert Japanese text to phonemes."""
- res = unicodedata.normalize("NFKC", text)
- res = japanese_convert_numbers_to_words(res)
- # res = japanese_convert_alpha_symbols_to_words(res)
- res = text2kata(res)
- res = kata2phoneme(res)
- return res
-
-
-def is_japanese_character(char):
- # 定义日语文字系统的 Unicode 范围
- japanese_ranges = [
- (0x3040, 0x309F), # 平假名
- (0x30A0, 0x30FF), # 片假名
- (0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs)
- (0x3400, 0x4DBF), # 汉字扩展 A
- (0x20000, 0x2A6DF), # 汉字扩展 B
- # 可以根据需要添加其他汉字扩展范围
- ]
-
- # 将字符的 Unicode 编码转换为整数
- char_code = ord(char)
-
- # 检查字符是否在任何一个日语范围内
- for start, end in japanese_ranges:
- if start <= char_code <= end:
- return True
-
- return False
-
-
-rep_map = {
- ":": ",",
- ";": ",",
- ",": ",",
- "。": ".",
- "!": "!",
- "?": "?",
- "\n": ".",
- "·": ",",
- "、": ",",
- "...": "…",
-}
-
-
-def replace_punctuation(text):
- pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
-
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
-
- replaced_text = re.sub(
- r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF"
- + "".join(punctuation)
- + r"]+",
- "",
- replaced_text,
- )
-
- return replaced_text
-
-
-def text_normalize(text):
- res = unicodedata.normalize("NFKC", text)
- res = japanese_convert_numbers_to_words(res)
- # res = "".join([i for i in res if is_japanese_character(i)])
- res = replace_punctuation(res)
- return res
-
-
-def distribute_phone(n_phone, n_word):
- phones_per_word = [0] * n_word
- for task in range(n_phone):
- min_tasks = min(phones_per_word)
- min_index = phones_per_word.index(min_tasks)
- phones_per_word[min_index] += 1
- return phones_per_word
-
-
-def g2p(norm_text):
- tokenized = tokenizer.tokenize(norm_text)
- phs = []
- ph_groups = []
- for t in tokenized:
- if not t.startswith("#"):
- ph_groups.append([t])
- else:
- ph_groups[-1].append(t.replace("#", ""))
- word2ph = []
- for group in ph_groups:
- phonemes = kata2phoneme(text2kata("".join(group)))
- # phonemes = [i for i in phonemes if i in symbols]
- for i in phonemes:
- assert i in symbols, (i, group, norm_text, tokenized)
- phone_len = len(phonemes)
- word_len = len(group)
-
- aaa = distribute_phone(phone_len, word_len)
- word2ph += aaa
-
- phs += phonemes
- phones = ["_"] + phs + ["_"]
- tones = [0 for i in phones]
- word2ph = [1] + word2ph + [1]
- return phones, tones, word2ph
-
-
-if __name__ == "__main__":
- from config import ABS_PATH
-
- tokenizer = AutoTokenizer.from_pretrained(ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3")
- text = "hello,こんにちは、世界!……"
- from bert_vits2.text.japanese_bert import get_bert_feature
-
- text = text_normalize(text)
- print(text)
- phones, tones, word2ph = g2p(text)
- bert = get_bert_feature(text, word2ph)
-
- print(phones, tones, word2ph, bert.shape)
diff --git a/spaces/Artrajz/vits-simple-api/vits/text/vits_pinyin.py b/spaces/Artrajz/vits-simple-api/vits/text/vits_pinyin.py
deleted file mode 100644
index 45bcca442a73cf34accbfa84a1402085c4db9154..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/vits/text/vits_pinyin.py
+++ /dev/null
@@ -1,98 +0,0 @@
-""" from https://github.com/PlayVoice/vits_chinese """
-import pypinyin
-from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
-from pypinyin.converter import DefaultConverter
-from pypinyin.core import Pinyin
-
-import numpy as np
-
-from vits.bert.prosody_tool import pinyin_dict
-from vits.bert import TTSProsody
-
-
-class MyConverter(NeutralToneWith5Mixin, DefaultConverter):
- pass
-
-
-def is_chinese(uchar):
- if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
- return True
- else:
- return False
-
-
-def clean_chinese(text: str):
- text = text.strip()
- text_clean = []
- for char in text:
- if (is_chinese(char)):
- text_clean.append(char)
- else:
- if len(text_clean) > 1 and is_chinese(text_clean[-1]):
- text_clean.append(',')
- text_clean = ''.join(text_clean).strip(',')
- return text_clean
-
-
-class VITS_PinYin:
- def __init__(self, bert_path, device):
- self.pinyin_parser = Pinyin(MyConverter())
- self.prosody = TTSProsody(bert_path, device)
-
- def chinese_to_phonemes(self, text):
- # 考虑使用g2pw的chinese bert替换原始的pypinyin,目前测试下来运行速度太慢。
- # 将标准中文文本符号替换成 bert 符号库中的单符号,以保证bert的效果.
- text = text.replace("——", "...") \
- .replace("—", "...") \
- .replace("……", "...") \
- .replace("…", "...") \
- .replace('“', '"') \
- .replace('”', '"') \
- .replace("\n", "")
- tokens = self.prosody.char_model.tokenizer.tokenize(text)
- text = ''.join(tokens)
- assert not tokens.count("[UNK]")
- pinyins = np.reshape(pypinyin.pinyin(text, style=pypinyin.TONE3), (-1))
- try:
- phone_index = 0
- phone_items = []
- phone_items.append('sil')
- count_phone = []
- count_phone.append(1)
- temp = ""
-
- len_pys = len(tokens)
- for word in tokens:
- if is_chinese(word):
- count_phone.append(2)
- if (phone_index >= len_pys):
- print(
- f"!!!![{text}]plz check ur text whether includes MULTIBYTE symbol.\
- (请检查你的文本中是否包含多字节符号)")
- pinyin = pinyins[phone_index]
- phone_index = phone_index + 1
- if not pinyin[-1].isdigit():
- pinyin += "5"
- if pinyin[:-1] in pinyin_dict:
- tone = pinyin[-1]
- a = pinyin[:-1]
- a1, a2 = pinyin_dict[a]
- phone_items += [a1, a2 + tone]
- else:
- temp += word
- if temp == pinyins[phone_index]:
- temp = ""
- phone_index += 1
- count_phone.append(1)
- phone_items.append('sp')
-
- count_phone.append(1)
- phone_items.append('sil')
- phone_items_str = ' '.join(phone_items)
- except IndexError as e:
- print('except:', e)
-
- text = f'[PAD]{text}[PAD]'
- char_embeds = self.prosody.get_char_embeds(text)
- char_embeds = self.prosody.expand_for_phone(char_embeds, count_phone)
- return phone_items_str, char_embeds
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/specifiers.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/specifiers.py
deleted file mode 100644
index 0e218a6f9f75ea2060a8b08d1f1a043fdad68df8..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/specifiers.py
+++ /dev/null
@@ -1,802 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import abc
-import functools
-import itertools
-import re
-import warnings
-from typing import (
- Callable,
- Dict,
- Iterable,
- Iterator,
- List,
- Optional,
- Pattern,
- Set,
- Tuple,
- TypeVar,
- Union,
-)
-
-from .utils import canonicalize_version
-from .version import LegacyVersion, Version, parse
-
-ParsedVersion = Union[Version, LegacyVersion]
-UnparsedVersion = Union[Version, LegacyVersion, str]
-VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
-CallableOperator = Callable[[ParsedVersion, str], bool]
-
-
-class InvalidSpecifier(ValueError):
- """
- An invalid specifier was found, users should refer to PEP 440.
- """
-
-
-class BaseSpecifier(metaclass=abc.ABCMeta):
- @abc.abstractmethod
- def __str__(self) -> str:
- """
- Returns the str representation of this Specifier like object. This
- should be representative of the Specifier itself.
- """
-
- @abc.abstractmethod
- def __hash__(self) -> int:
- """
- Returns a hash value for this Specifier like object.
- """
-
- @abc.abstractmethod
- def __eq__(self, other: object) -> bool:
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are equal.
- """
-
- @abc.abstractproperty
- def prereleases(self) -> Optional[bool]:
- """
- Returns whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- """
- Sets whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @abc.abstractmethod
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
- """
- Determines if the given item is contained within this specifier.
- """
-
- @abc.abstractmethod
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
- """
- Takes an iterable of items and filters them so that only items which
- are contained within this specifier are allowed in it.
- """
-
-
-class _IndividualSpecifier(BaseSpecifier):
-
- _operators: Dict[str, str] = {}
- _regex: Pattern[str]
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- match = self._regex.search(spec)
- if not match:
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
-
- self._spec: Tuple[str, str] = (
- match.group("operator").strip(),
- match.group("version").strip(),
- )
-
- # Store whether or not this Specifier should accept prereleases
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
-
- def __str__(self) -> str:
- return "{}{}".format(*self._spec)
-
- @property
- def _canonical_spec(self) -> Tuple[str, str]:
- return self._spec[0], canonicalize_version(self._spec[1])
-
- def __hash__(self) -> int:
- return hash(self._canonical_spec)
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, str):
- try:
- other = self.__class__(str(other))
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
-
- return self._canonical_spec == other._canonical_spec
-
- def _get_operator(self, op: str) -> CallableOperator:
- operator_callable: CallableOperator = getattr(
- self, f"_compare_{self._operators[op]}"
- )
- return operator_callable
-
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
- if not isinstance(version, (LegacyVersion, Version)):
- version = parse(version)
- return version
-
- @property
- def operator(self) -> str:
- return self._spec[0]
-
- @property
- def version(self) -> str:
- return self._spec[1]
-
- @property
- def prereleases(self) -> Optional[bool]:
- return self._prereleases
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: str) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Determine if prereleases are to be allowed or not.
- if prereleases is None:
- prereleases = self.prereleases
-
- # Normalize item to a Version or LegacyVersion, this allows us to have
- # a shortcut for ``"2.0" in Specifier(">=2")
- normalized_item = self._coerce_version(item)
-
- # Determine if we should be supporting prereleases in this specifier
- # or not, if we do not support prereleases than we can short circuit
- # logic if this version is a prereleases.
- if normalized_item.is_prerelease and not prereleases:
- return False
-
- # Actually do the comparison to determine if this item is contained
- # within this Specifier or not.
- operator_callable: CallableOperator = self._get_operator(self.operator)
- return operator_callable(normalized_item, self.version)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- yielded = False
- found_prereleases = []
-
- kw = {"prereleases": prereleases if prereleases is not None else True}
-
- # Attempt to iterate over all the values in the iterable and if any of
- # them match, yield them.
- for version in iterable:
- parsed_version = self._coerce_version(version)
-
- if self.contains(parsed_version, **kw):
- # If our version is a prerelease, and we were not set to allow
- # prereleases, then we'll store it for later in case nothing
- # else matches this specifier.
- if parsed_version.is_prerelease and not (
- prereleases or self.prereleases
- ):
- found_prereleases.append(version)
- # Either this is not a prerelease, or we should have been
- # accepting prereleases from the beginning.
- else:
- yielded = True
- yield version
-
- # Now that we've iterated over everything, determine if we've yielded
- # any values, and if we have not and we have any prereleases stored up
- # then we will go ahead and yield the prereleases.
- if not yielded and found_prereleases:
- for version in found_prereleases:
- yield version
-
-
-class LegacySpecifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P(==|!=|<=|>=|<|>))
- \s*
- (?P
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
- # string can be just about anything, we match everything
- # except for whitespace, a semi-colon for marker support,
- # a closing paren since versions can be enclosed in
- # them, and a comma since it's a version separator.
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- }
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- super().__init__(spec, prereleases)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
- if not isinstance(version, LegacyVersion):
- version = LegacyVersion(str(version))
- return version
-
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective == self._coerce_version(spec)
-
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective != self._coerce_version(spec)
-
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective <= self._coerce_version(spec)
-
- def _compare_greater_than_equal(
- self, prospective: LegacyVersion, spec: str
- ) -> bool:
- return prospective >= self._coerce_version(spec)
-
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective < self._coerce_version(spec)
-
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective > self._coerce_version(spec)
-
-
-def _require_version_compare(
- fn: Callable[["Specifier", ParsedVersion, str], bool]
-) -> Callable[["Specifier", ParsedVersion, str], bool]:
- @functools.wraps(fn)
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
- if not isinstance(prospective, Version):
- return False
- return fn(self, prospective, spec)
-
- return wrapped
-
-
-class Specifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P(~=|==|!=|<=|>=|<|>|===))
- (?P
- (?:
- # The identity operators allow for an escape hatch that will
- # do an exact string match of the version you wish to install.
- # This will not be parsed by PEP 440 and we cannot determine
- # any semantic meaning from it. This operator is discouraged
- # but included entirely as an escape hatch.
- (?<====) # Only match for the identity operator
- \s*
- [^\s]* # We just match everything, except for whitespace
- # since we are only testing for strict identity.
- )
- |
- (?:
- # The (non)equality operators allow for wild card and local
- # versions to be specified so we have to define these two
- # operators separately to enable that.
- (?<===|!=) # Only match for equals and not equals
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
-
- # You cannot use a wild card and a dev or local version
- # together so group them with a | and make them optional.
- (?:
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
- |
- \.\* # Wild card syntax of .*
- )?
- )
- |
- (?:
- # The compatible operator requires at least two digits in the
- # release segment.
- (?<=~=) # Only match for the compatible operator
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- |
- (?:
- # All other operators only allow a sub set of what the
- # (non)equality operators do. Specifically they do not allow
- # local versions to be specified nor do they allow the prefix
- # matching wild cards.
- (?=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- "===": "arbitrary",
- }
-
- @_require_version_compare
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # Compatible releases have an equivalent combination of >= and ==. That
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
- # implement this in terms of the other specifiers instead of
- # implementing it ourselves. The only thing we need to do is construct
- # the other specifiers.
-
- # We want everything but the last item in the version, but we want to
- # ignore suffix segments.
- prefix = ".".join(
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
- )
-
- # Add the prefix notation to the end of our string
- prefix += ".*"
-
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
- prospective, prefix
- )
-
- @_require_version_compare
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # We need special logic to handle prefix matching
- if spec.endswith(".*"):
- # In the case of prefix matching we want to ignore local segment.
- prospective = Version(prospective.public)
- # Split the spec out by dots, and pretend that there is an implicit
- # dot in between a release segment and a pre-release segment.
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
-
- # Split the prospective version out by dots, and pretend that there
- # is an implicit dot in between a release segment and a pre-release
- # segment.
- split_prospective = _version_split(str(prospective))
-
- # Shorten the prospective version to be the same length as the spec
- # so that we can determine if the specifier is a prefix of the
- # prospective version or not.
- shortened_prospective = split_prospective[: len(split_spec)]
-
- # Pad out our two sides with zeros so that they both equal the same
- # length.
- padded_spec, padded_prospective = _pad_version(
- split_spec, shortened_prospective
- )
-
- return padded_prospective == padded_spec
- else:
- # Convert our spec string into a Version
- spec_version = Version(spec)
-
- # If the specifier does not have a local segment, then we want to
- # act as if the prospective version also does not have a local
- # segment.
- if not spec_version.local:
- prospective = Version(prospective.public)
-
- return prospective == spec_version
-
- @_require_version_compare
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
- return not self._compare_equal(prospective, spec)
-
- @_require_version_compare
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) <= Version(spec)
-
- @_require_version_compare
- def _compare_greater_than_equal(
- self, prospective: ParsedVersion, spec: str
- ) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) >= Version(spec)
-
- @_require_version_compare
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is less than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective < spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a pre-release version, that we do not accept pre-release
- # versions for the version mentioned in the specifier (e.g. <3.1 should
- # not match 3.1.dev0, but should match 3.0.dev0).
- if not spec.is_prerelease and prospective.is_prerelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # less than the spec version *and* it's not a pre-release of the same
- # version in the spec.
- return True
-
- @_require_version_compare
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is greater than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective > spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a post-release version, that we do not accept
- # post-release versions for the version mentioned in the specifier
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
- if not spec.is_postrelease and prospective.is_postrelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # Ensure that we do not allow a local version of the version mentioned
- # in the specifier, which is technically greater than, to match.
- if prospective.local is not None:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # greater than the spec version *and* it's not a pre-release of the
- # same version in the spec.
- return True
-
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
- return str(prospective).lower() == str(spec).lower()
-
- @property
- def prereleases(self) -> bool:
-
- # If there is an explicit prereleases set for this, then we'll just
- # blindly use that.
- if self._prereleases is not None:
- return self._prereleases
-
- # Look at all of our specifiers and determine if they are inclusive
- # operators, and if they are if they are including an explicit
- # prerelease.
- operator, version = self._spec
- if operator in ["==", ">=", "<=", "~=", "==="]:
- # The == specifier can include a trailing .*, if it does we
- # want to remove before parsing.
- if operator == "==" and version.endswith(".*"):
- version = version[:-2]
-
- # Parse the version, and if it is a pre-release than this
- # specifier allows pre-releases.
- if parse(version).is_prerelease:
- return True
-
- return False
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
-
-_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
-
-
-def _version_split(version: str) -> List[str]:
- result: List[str] = []
- for item in version.split("."):
- match = _prefix_regex.search(item)
- if match:
- result.extend(match.groups())
- else:
- result.append(item)
- return result
-
-
-def _is_not_suffix(segment: str) -> bool:
- return not any(
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
- )
-
-
-def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
- left_split, right_split = [], []
-
- # Get the release segment of our versions
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
-
- # Get the rest of our versions
- left_split.append(left[len(left_split[0]) :])
- right_split.append(right[len(right_split[0]) :])
-
- # Insert our padding
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
-
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
-
-
-class SpecifierSet(BaseSpecifier):
- def __init__(
- self, specifiers: str = "", prereleases: Optional[bool] = None
- ) -> None:
-
- # Split on , to break each individual specifier into it's own item, and
- # strip each item to remove leading/trailing whitespace.
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
-
- # Parsed each individual specifier, attempting first to make it a
- # Specifier and falling back to a LegacySpecifier.
- parsed: Set[_IndividualSpecifier] = set()
- for specifier in split_specifiers:
- try:
- parsed.add(Specifier(specifier))
- except InvalidSpecifier:
- parsed.add(LegacySpecifier(specifier))
-
- # Turn our parsed specifiers into a frozen set and save them for later.
- self._specs = frozenset(parsed)
-
- # Store our prereleases value so we can use it later to determine if
- # we accept prereleases or not.
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return f""
-
- def __str__(self) -> str:
- return ",".join(sorted(str(s) for s in self._specs))
-
- def __hash__(self) -> int:
- return hash(self._specs)
-
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
- if isinstance(other, str):
- other = SpecifierSet(other)
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- specifier = SpecifierSet()
- specifier._specs = frozenset(self._specs | other._specs)
-
- if self._prereleases is None and other._prereleases is not None:
- specifier._prereleases = other._prereleases
- elif self._prereleases is not None and other._prereleases is None:
- specifier._prereleases = self._prereleases
- elif self._prereleases == other._prereleases:
- specifier._prereleases = self._prereleases
- else:
- raise ValueError(
- "Cannot combine SpecifierSets with True and False prerelease "
- "overrides."
- )
-
- return specifier
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, (str, _IndividualSpecifier)):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs == other._specs
-
- def __len__(self) -> int:
- return len(self._specs)
-
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
- return iter(self._specs)
-
- @property
- def prereleases(self) -> Optional[bool]:
-
- # If we have been given an explicit prerelease modifier, then we'll
- # pass that through here.
- if self._prereleases is not None:
- return self._prereleases
-
- # If we don't have any specifiers, and we don't have a forced value,
- # then we'll just return None since we don't know if this should have
- # pre-releases or not.
- if not self._specs:
- return None
-
- # Otherwise we'll see if any of the given specifiers accept
- # prereleases, if any of them do we'll return True, otherwise False.
- return any(s.prereleases for s in self._specs)
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: UnparsedVersion) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Ensure that our item is a Version or LegacyVersion instance.
- if not isinstance(item, (LegacyVersion, Version)):
- item = parse(item)
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # We can determine if we're going to allow pre-releases by looking to
- # see if any of the underlying items supports them. If none of them do
- # and this item is a pre-release then we do not allow it and we can
- # short circuit that here.
- # Note: This means that 1.0.dev1 would not be contained in something
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
- if not prereleases and item.is_prerelease:
- return False
-
- # We simply dispatch to the underlying specs here to make sure that the
- # given version is contained within all of them.
- # Note: This use of all() here means that an empty set of specifiers
- # will always return True, this is an explicit design decision.
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # If we have any specifiers, then we want to wrap our iterable in the
- # filter method for each one, this will act as a logical AND amongst
- # each specifier.
- if self._specs:
- for spec in self._specs:
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
- return iterable
- # If we do not have any specifiers, then we need to have a rough filter
- # which will filter out any pre-releases, unless there are no final
- # releases, and which will filter out LegacyVersion in general.
- else:
- filtered: List[VersionTypeVar] = []
- found_prereleases: List[VersionTypeVar] = []
-
- item: UnparsedVersion
- parsed_version: Union[Version, LegacyVersion]
-
- for item in iterable:
- # Ensure that we some kind of Version class for this item.
- if not isinstance(item, (LegacyVersion, Version)):
- parsed_version = parse(item)
- else:
- parsed_version = item
-
- # Filter out any item which is parsed as a LegacyVersion
- if isinstance(parsed_version, LegacyVersion):
- continue
-
- # Store any item which is a pre-release for later unless we've
- # already found a final version or we are accepting prereleases
- if parsed_version.is_prerelease and not prereleases:
- if not filtered:
- found_prereleases.append(item)
- else:
- filtered.append(item)
-
- # If we've found no items except for pre-releases, then we'll go
- # ahead and use the pre-releases
- if not filtered and found_prereleases and prereleases is None:
- return found_prereleases
-
- return filtered
diff --git a/spaces/Avkash/WebcamFaceProcessing/app.py b/spaces/Avkash/WebcamFaceProcessing/app.py
deleted file mode 100644
index b80e1758733fb4067240655d82dbfbc1785302e0..0000000000000000000000000000000000000000
--- a/spaces/Avkash/WebcamFaceProcessing/app.py
+++ /dev/null
@@ -1,316 +0,0 @@
-import cv2
-import gradio as gr
-import mediapipe as mp
-import dlib
-import imutils
-import numpy as np
-
-
-mp_drawing = mp.solutions.drawing_utils
-mp_drawing_styles = mp.solutions.drawing_styles
-mp_face_mesh = mp.solutions.face_mesh
-mp_face_detection = mp.solutions.face_detection
-
-
-def apply_media_pipe_face_detection(image):
- with mp_face_detection.FaceDetection(
- model_selection=1, min_detection_confidence=0.5) as face_detection:
- results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
- if not results.detections:
- return image
- annotated_image = image.copy()
- for detection in results.detections:
- mp_drawing.draw_detection(annotated_image, detection)
- return annotated_image
-
-
-def apply_media_pipe_facemesh(image):
- with mp_face_mesh.FaceMesh(
- static_image_mode=True,
- max_num_faces=1,
- refine_landmarks=True,
- min_detection_confidence=0.5) as face_mesh:
- results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
- if not results.multi_face_landmarks:
- return image
- annotated_image = image.copy()
- for face_landmarks in results.multi_face_landmarks:
- mp_drawing.draw_landmarks(
- image=annotated_image,
- landmark_list=face_landmarks,
- connections=mp_face_mesh.FACEMESH_TESSELATION,
- landmark_drawing_spec=None,
- connection_drawing_spec=mp_drawing_styles
- .get_default_face_mesh_tesselation_style())
- mp_drawing.draw_landmarks(
- image=annotated_image,
- landmark_list=face_landmarks,
- connections=mp_face_mesh.FACEMESH_CONTOURS,
- landmark_drawing_spec=None,
- connection_drawing_spec=mp_drawing_styles
- .get_default_face_mesh_contours_style())
- mp_drawing.draw_landmarks(
- image=annotated_image,
- landmark_list=face_landmarks,
- connections=mp_face_mesh.FACEMESH_IRISES,
- landmark_drawing_spec=None,
- connection_drawing_spec=mp_drawing_styles
- .get_default_face_mesh_iris_connections_style())
- return annotated_image
-
-
-class FaceOrientation(object):
- def __init__(self):
- self.detect = dlib.get_frontal_face_detector()
- self.predict = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")
-
- def create_orientation(self, frame):
- draw_rect1 = True
- draw_rect2 = True
- draw_lines = True
-
- frame = imutils.resize(frame, width=800)
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- subjects = self.detect(gray, 0)
-
- for subject in subjects:
- landmarks = self.predict(gray, subject)
- size = frame.shape
-
- # 2D image points. If you change the image, you need to change vector
- image_points = np.array([
- (landmarks.part(33).x, landmarks.part(33).y), # Nose tip
- (landmarks.part(8).x, landmarks.part(8).y), # Chin
- (landmarks.part(36).x, landmarks.part(36).y), # Left eye left corner
- (landmarks.part(45).x, landmarks.part(45).y), # Right eye right corne
- (landmarks.part(48).x, landmarks.part(48).y), # Left Mouth corner
- (landmarks.part(54).x, landmarks.part(54).y) # Right mouth corner
- ], dtype="double")
-
- # 3D model points.
- model_points = np.array([
- (0.0, 0.0, 0.0), # Nose tip
- (0.0, -330.0, -65.0), # Chin
- (-225.0, 170.0, -135.0), # Left eye left corner
- (225.0, 170.0, -135.0), # Right eye right corne
- (-150.0, -150.0, -125.0), # Left Mouth corner
- (150.0, -150.0, -125.0) # Right mouth corner
-
- ])
- # Camera internals
- focal_length = size[1]
- center = (size[1] / 2, size[0] / 2)
- camera_matrix = np.array(
- [[focal_length, 0, center[0]],
- [0, focal_length, center[1]],
- [0, 0, 1]], dtype="double"
- )
-
- dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
- (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
- dist_coeffs)
-
- (b1, jacobian) = cv2.projectPoints(np.array([(350.0, 270.0, 0.0)]), rotation_vector, translation_vector,
- camera_matrix, dist_coeffs)
- (b2, jacobian) = cv2.projectPoints(np.array([(-350.0, -270.0, 0.0)]), rotation_vector,
- translation_vector, camera_matrix, dist_coeffs)
- (b3, jacobian) = cv2.projectPoints(np.array([(-350.0, 270, 0.0)]), rotation_vector, translation_vector,
- camera_matrix, dist_coeffs)
- (b4, jacobian) = cv2.projectPoints(np.array([(350.0, -270.0, 0.0)]), rotation_vector,
- translation_vector, camera_matrix, dist_coeffs)
-
- (b11, jacobian) = cv2.projectPoints(np.array([(450.0, 350.0, 400.0)]), rotation_vector,
- translation_vector, camera_matrix, dist_coeffs)
- (b12, jacobian) = cv2.projectPoints(np.array([(-450.0, -350.0, 400.0)]), rotation_vector,
- translation_vector, camera_matrix, dist_coeffs)
- (b13, jacobian) = cv2.projectPoints(np.array([(-450.0, 350, 400.0)]), rotation_vector,
- translation_vector, camera_matrix, dist_coeffs)
- (b14, jacobian) = cv2.projectPoints(np.array([(450.0, -350.0, 400.0)]), rotation_vector,
- translation_vector, camera_matrix, dist_coeffs)
-
- b1 = (int(b1[0][0][0]), int(b1[0][0][1]))
- b2 = (int(b2[0][0][0]), int(b2[0][0][1]))
- b3 = (int(b3[0][0][0]), int(b3[0][0][1]))
- b4 = (int(b4[0][0][0]), int(b4[0][0][1]))
-
- b11 = (int(b11[0][0][0]), int(b11[0][0][1]))
- b12 = (int(b12[0][0][0]), int(b12[0][0][1]))
- b13 = (int(b13[0][0][0]), int(b13[0][0][1]))
- b14 = (int(b14[0][0][0]), int(b14[0][0][1]))
-
- if draw_rect1 == True:
- cv2.line(frame, b1, b3, (255, 255, 0), 10)
- cv2.line(frame, b3, b2, (255, 255, 0), 10)
- cv2.line(frame, b2, b4, (255, 255, 0), 10)
- cv2.line(frame, b4, b1, (255, 255, 0), 10)
-
- if draw_rect2 == True:
- cv2.line(frame, b11, b13, (255, 255, 0), 10)
- cv2.line(frame, b13, b12, (255, 255, 0), 10)
- cv2.line(frame, b12, b14, (255, 255, 0), 10)
- cv2.line(frame, b14, b11, (255, 255, 0), 10)
-
- if draw_lines == True:
- cv2.line(frame, b11, b1, (0, 255, 0), 10)
- cv2.line(frame, b13, b3, (0, 255, 0), 10)
- cv2.line(frame, b12, b2, (0, 255, 0), 10)
- cv2.line(frame, b14, b4, (0, 255, 0), 10)
-
- return frame
-
-
-face_orientation_obj = FaceOrientation()
-
-
-class FaceProcessing(object):
- def __init__(self, ui_obj):
- self.name = "Face Image Processing"
- self.description = "Call for Face Image and video Processing"
- self.ui_obj = ui_obj
-
- def take_webcam_photo(self, image):
- return image
-
- def take_webcam_video(self, images):
- return images
-
- def mp_webcam_photo(self, image):
- return image
-
- def mp_webcam_face_mesh(self, image):
- mesh_image = apply_media_pipe_facemesh(image)
- return mesh_image
-
- def mp_webcam_face_detection(self, image):
- face_detection_img = apply_media_pipe_face_detection(image)
- return face_detection_img
-
- def dlib_apply_face_orientation(self, image):
- image = face_orientation_obj.create_orientation(image)
- return image
-
- def webcam_stream_update(self, video_frame):
- video_out = face_orientation_obj.create_orientation(video_frame)
- return video_out
-
- def create_ui(self):
- with self.ui_obj:
- gr.Markdown("Face Analysis with Webcam/Video")
- with gr.Tabs():
- with gr.TabItem("Playing with Webcam"):
- with gr.Row():
- webcam_image_in = gr.Image(label="Webcam Image Input", source="webcam")
- webcam_video_in = gr.Video(label="Webcam Video Input", source="webcam")
- with gr.Row():
- webcam_photo_action = gr.Button("Take the Photo")
- webcam_video_action = gr.Button("Take the Video")
- with gr.Row():
- webcam_photo_out = gr.Image(label="Webcam Photo Output")
- webcam_video_out = gr.Video(label="Webcam Video")
- with gr.TabItem("Mediapipe Facemesh with Webcam"):
- with gr.Row():
- with gr.Column():
- mp_image_in = gr.Image(label="Webcam Image Input", source="webcam")
- with gr.Column():
- mp_photo_action = gr.Button("Take the Photo")
- mp_apply_fm_action = gr.Button("Apply Face Mesh the Photo")
- mp_apply_landmarks_action = gr.Button("Apply Face Landmarks the Photo")
- with gr.Row():
- mp_photo_out = gr.Image(label="Webcam Photo Output")
- mp_fm_photo_out = gr.Image(label="Face Mesh Photo Output")
- mp_lm_photo_out = gr.Image(label="Face Landmarks Photo Output")
- with gr.TabItem("DLib Based Face Orientation"):
- with gr.Row():
- with gr.Column():
- dlib_image_in = gr.Image(label="Webcam Image Input", source="webcam")
- with gr.Column():
- dlib_photo_action = gr.Button("Take the Photo")
- dlib_apply_orientation_action = gr.Button("Apply Face Mesh the Photo")
- with gr.Row():
- dlib_photo_out = gr.Image(label="Webcam Photo Output")
- dlib_orientation_photo_out = gr.Image(label="Face Mesh Photo Output")
- with gr.TabItem("Face Orientation on Live Webcam Stream"):
- with gr.Row():
- webcam_stream_in = gr.Image(label="Webcam Stream Input",
- source="webcam",
- streaming=True)
- webcam_stream_out = gr.Image(label="Webcam Stream Output")
- webcam_stream_in.change(
- self.webcam_stream_update,
- inputs=webcam_stream_in,
- outputs=webcam_stream_out
- )
-
- dlib_photo_action.click(
- self.mp_webcam_photo,
- [
- dlib_image_in
- ],
- [
- dlib_photo_out
- ]
- )
- dlib_apply_orientation_action.click(
- self.dlib_apply_face_orientation,
- [
- dlib_image_in
- ],
- [
- dlib_orientation_photo_out
- ]
- )
- mp_photo_action.click(
- self.mp_webcam_photo,
- [
- mp_image_in
- ],
- [
- mp_photo_out
- ]
- )
- mp_apply_fm_action.click(
- self.mp_webcam_face_mesh,
- [
- mp_image_in
- ],
- [
- mp_fm_photo_out
- ]
- )
- mp_apply_landmarks_action.click(
- self.mp_webcam_face_detection,
- [
- mp_image_in
- ],
- [
- mp_lm_photo_out
- ]
- )
- webcam_photo_action.click(
- self.take_webcam_photo,
- [
- webcam_image_in
- ],
- [
- webcam_photo_out
- ]
- )
- webcam_video_action.click(
- self.take_webcam_video,
- [
- webcam_video_in
- ],
- [
- webcam_video_out
- ]
- )
-
- def launch_ui(self):
- self.ui_obj.launch()
-
-
-if __name__ == '__main__':
- my_app = gr.Blocks()
- face_ui = FaceProcessing(my_app)
- face_ui.create_ui()
- face_ui.launch_ui()
\ No newline at end of file
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/boxes.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/boxes.py
deleted file mode 100644
index ae543c617a33245075b001f383f41775efb0b63d..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/boxes.py
+++ /dev/null
@@ -1,423 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import math
-import numpy as np
-from enum import IntEnum, unique
-from typing import List, Tuple, Union
-import torch
-from torch import device
-
-_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
-
-
-@unique
-class BoxMode(IntEnum):
- """
- Enum of different ways to represent a box.
- """
-
- XYXY_ABS = 0
- """
- (x0, y0, x1, y1) in absolute floating points coordinates.
- The coordinates in range [0, width or height].
- """
- XYWH_ABS = 1
- """
- (x0, y0, w, h) in absolute floating points coordinates.
- """
- XYXY_REL = 2
- """
- Not yet supported!
- (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
- """
- XYWH_REL = 3
- """
- Not yet supported!
- (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
- """
- XYWHA_ABS = 4
- """
- (xc, yc, w, h, a) in absolute floating points coordinates.
- (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
- """
-
- @staticmethod
- def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
- """
- Args:
- box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
- from_mode, to_mode (BoxMode)
-
- Returns:
- The converted box of the same type.
- """
- if from_mode == to_mode:
- return box
-
- original_type = type(box)
- is_numpy = isinstance(box, np.ndarray)
- single_box = isinstance(box, (list, tuple))
- if single_box:
- assert len(box) == 4 or len(box) == 5, (
- "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
- " where k == 4 or 5"
- )
- arr = torch.tensor(box)[None, :]
- else:
- # avoid modifying the input box
- if is_numpy:
- arr = torch.from_numpy(np.asarray(box)).clone()
- else:
- arr = box.clone()
-
- assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [
- BoxMode.XYXY_REL,
- BoxMode.XYWH_REL,
- ], "Relative mode not yet supported!"
-
- if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
- assert (
- arr.shape[-1] == 5
- ), "The last dimension of input shape must be 5 for XYWHA format"
- original_dtype = arr.dtype
- arr = arr.double()
-
- w = arr[:, 2]
- h = arr[:, 3]
- a = arr[:, 4]
- c = torch.abs(torch.cos(a * math.pi / 180.0))
- s = torch.abs(torch.sin(a * math.pi / 180.0))
- # This basically computes the horizontal bounding rectangle of the rotated box
- new_w = c * w + s * h
- new_h = c * h + s * w
-
- # convert center to top-left corner
- arr[:, 0] -= new_w / 2.0
- arr[:, 1] -= new_h / 2.0
- # bottom-right corner
- arr[:, 2] = arr[:, 0] + new_w
- arr[:, 3] = arr[:, 1] + new_h
-
- arr = arr[:, :4].to(dtype=original_dtype)
- elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
- original_dtype = arr.dtype
- arr = arr.double()
- arr[:, 0] += arr[:, 2] / 2.0
- arr[:, 1] += arr[:, 3] / 2.0
- angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
- arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
- else:
- if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
- arr[:, 2] += arr[:, 0]
- arr[:, 3] += arr[:, 1]
- elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
- arr[:, 2] -= arr[:, 0]
- arr[:, 3] -= arr[:, 1]
- else:
- raise NotImplementedError(
- "Conversion from BoxMode {} to {} is not supported yet".format(
- from_mode, to_mode
- )
- )
-
- if single_box:
- return original_type(arr.flatten().tolist())
- if is_numpy:
- return arr.numpy()
- else:
- return arr
-
-
-class Boxes:
- """
- This structure stores a list of boxes as a Nx4 torch.Tensor.
- It supports some common methods about boxes
- (`area`, `clip`, `nonempty`, etc),
- and also behaves like a Tensor
- (support indexing, `to(device)`, `.device`, and iteration over all boxes)
-
- Attributes:
- tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
- """
-
- def __init__(self, tensor: torch.Tensor):
- """
- Args:
- tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
- """
- device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
- tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
- if tensor.numel() == 0:
- # Use reshape, so we don't end up creating a new tensor that does not depend on
- # the inputs (and consequently confuses jit)
- tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)
- assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
-
- self.tensor = tensor
-
- def clone(self) -> "Boxes":
- """
- Clone the Boxes.
-
- Returns:
- Boxes
- """
- return Boxes(self.tensor.clone())
-
- def to(self, device: torch.device):
- # Boxes are assumed float32 and does not support to(dtype)
- return Boxes(self.tensor.to(device=device))
-
- def area(self) -> torch.Tensor:
- """
- Computes the area of all the boxes.
-
- Returns:
- torch.Tensor: a vector with areas of each box.
- """
- box = self.tensor
- area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
- return area
-
- def clip(self, box_size: Tuple[int, int]) -> None:
- """
- Clip (in place) the boxes by limiting x coordinates to the range [0, width]
- and y coordinates to the range [0, height].
-
- Args:
- box_size (height, width): The clipping box's size.
- """
- assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
- h, w = box_size
- x1 = self.tensor[:, 0].clamp(min=0, max=w)
- y1 = self.tensor[:, 1].clamp(min=0, max=h)
- x2 = self.tensor[:, 2].clamp(min=0, max=w)
- y2 = self.tensor[:, 3].clamp(min=0, max=h)
- self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
-
- def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
- """
- Find boxes that are non-empty.
- A box is considered empty, if either of its side is no larger than threshold.
-
- Returns:
- Tensor:
- a binary vector which represents whether each box is empty
- (False) or non-empty (True).
- """
- box = self.tensor
- widths = box[:, 2] - box[:, 0]
- heights = box[:, 3] - box[:, 1]
- keep = (widths > threshold) & (heights > threshold)
- return keep
-
- def __getitem__(self, item) -> "Boxes":
- """
- Args:
- item: int, slice, or a BoolTensor
-
- Returns:
- Boxes: Create a new :class:`Boxes` by indexing.
-
- The following usage are allowed:
-
- 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
- 2. `new_boxes = boxes[2:10]`: return a slice of boxes.
- 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
- with `length = len(boxes)`. Nonzero elements in the vector will be selected.
-
- Note that the returned Boxes might share storage with this Boxes,
- subject to Pytorch's indexing semantics.
- """
- if isinstance(item, int):
- return Boxes(self.tensor[item].view(1, -1))
- b = self.tensor[item]
- assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
- return Boxes(b)
-
- def __len__(self) -> int:
- return self.tensor.shape[0]
-
- def __repr__(self) -> str:
- return "Boxes(" + str(self.tensor) + ")"
-
- def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:
- """
- Args:
- box_size (height, width): Size of the reference box.
- boundary_threshold (int): Boxes that extend beyond the reference box
- boundary by more than boundary_threshold are considered "outside".
-
- Returns:
- a binary vector, indicating whether each box is inside the reference box.
- """
- height, width = box_size
- inds_inside = (
- (self.tensor[..., 0] >= -boundary_threshold)
- & (self.tensor[..., 1] >= -boundary_threshold)
- & (self.tensor[..., 2] < width + boundary_threshold)
- & (self.tensor[..., 3] < height + boundary_threshold)
- )
- return inds_inside
-
- def get_centers(self) -> torch.Tensor:
- """
- Returns:
- The box centers in a Nx2 array of (x, y).
- """
- return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
-
- def scale(self, scale_x: float, scale_y: float) -> None:
- """
- Scale the box with horizontal and vertical scaling factors
- """
- self.tensor[:, 0::2] *= scale_x
- self.tensor[:, 1::2] *= scale_y
-
- @classmethod
- def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
- """
- Concatenates a list of Boxes into a single Boxes
-
- Arguments:
- boxes_list (list[Boxes])
-
- Returns:
- Boxes: the concatenated Boxes
- """
- assert isinstance(boxes_list, (list, tuple))
- if len(boxes_list) == 0:
- return cls(torch.empty(0))
- assert all([isinstance(box, Boxes) for box in boxes_list])
-
- # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
- cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
- return cat_boxes
-
- @property
- def device(self) -> device:
- return self.tensor.device
-
- # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
- # https://github.com/pytorch/pytorch/issues/18627
- @torch.jit.unused
- def __iter__(self):
- """
- Yield a box as a Tensor of shape (4,) at a time.
- """
- yield from self.tensor
-
-
-def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
- """
- Given two lists of boxes of size N and M,
- compute the intersection area between __all__ N x M pairs of boxes.
- The box order must be (xmin, ymin, xmax, ymax)
-
- Args:
- boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
-
- Returns:
- Tensor: intersection, sized [N,M].
- """
- boxes1, boxes2 = boxes1.tensor, boxes2.tensor
- width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
- boxes1[:, None, :2], boxes2[:, :2]
- ) # [N,M,2]
-
- width_height.clamp_(min=0) # [N,M,2]
- intersection = width_height.prod(dim=2) # [N,M]
- return intersection
-
-
-# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
-# with slight modifications
-def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
- """
- Given two lists of boxes of size N and M, compute the IoU
- (intersection over union) between **all** N x M pairs of boxes.
- The box order must be (xmin, ymin, xmax, ymax).
-
- Args:
- boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
-
- Returns:
- Tensor: IoU, sized [N,M].
- """
- area1 = boxes1.area() # [N]
- area2 = boxes2.area() # [M]
- inter = pairwise_intersection(boxes1, boxes2)
-
- # handle empty boxes
- iou = torch.where(
- inter > 0,
- inter / (area1[:, None] + area2 - inter),
- torch.zeros(1, dtype=inter.dtype, device=inter.device),
- )
- return iou
-
-
-def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
- """
- Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
-
- Args:
- boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
-
- Returns:
- Tensor: IoA, sized [N,M].
- """
- area2 = boxes2.area() # [M]
- inter = pairwise_intersection(boxes1, boxes2)
-
- # handle empty boxes
- ioa = torch.where(
- inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
- )
- return ioa
-
-
-def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
- """
- Pairwise distance between N points and M boxes. The distance between a
- point and a box is represented by the distance from the point to 4 edges
- of the box. Distances are all positive when the point is inside the box.
-
- Args:
- points: Nx2 coordinates. Each row is (x, y)
- boxes: M boxes
-
- Returns:
- Tensor: distances of size (N, M, 4). The 4 values are distances from
- the point to the left, top, right, bottom of the box.
- """
- x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
- x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
- return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
-
-
-def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
- """
- Compute pairwise intersection over union (IOU) of two sets of matched
- boxes that have the same number of boxes.
- Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
-
- Args:
- boxes1 (Boxes): bounding boxes, sized [N,4].
- boxes2 (Boxes): same length as boxes1
- Returns:
- Tensor: iou, sized [N].
- """
- assert len(boxes1) == len(
- boxes2
- ), "boxlists should have the same" "number of entries, got {}, {}".format(
- len(boxes1), len(boxes2)
- )
- area1 = boxes1.area() # [N]
- area2 = boxes2.area() # [N]
- box1, box2 = boxes1.tensor, boxes2.tensor
- lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
- rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
- wh = (rb - lt).clamp(min=0) # [N,2]
- inter = wh[:, 0] * wh[:, 1] # [N]
- iou = inter / (area1 + area2 - inter) # [N]
- return iou
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/meta_arch/centernet_detector.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/meta_arch/centernet_detector.py
deleted file mode 100644
index b7525c7b31cbbca504442e9a0dc8fb5005ea91b3..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/meta_arch/centernet_detector.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import math
-import json
-import numpy as np
-import torch
-from torch import nn
-
-from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
-from detectron2.modeling import build_backbone, build_proposal_generator
-from detectron2.modeling import detector_postprocess
-from detectron2.structures import ImageList
-
-@META_ARCH_REGISTRY.register()
-class CenterNetDetector(nn.Module):
- def __init__(self, cfg):
- super().__init__()
- self.mean, self.std = cfg.MODEL.PIXEL_MEAN, cfg.MODEL.PIXEL_STD
- self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
- self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
-
- self.backbone = build_backbone(cfg)
- self.proposal_generator = build_proposal_generator(
- cfg, self.backbone.output_shape()) # TODO: change to a more precise name
-
-
- def forward(self, batched_inputs):
- if not self.training:
- return self.inference(batched_inputs)
- images = self.preprocess_image(batched_inputs)
- features = self.backbone(images.tensor)
- gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
-
- _, proposal_losses = self.proposal_generator(
- images, features, gt_instances)
- return proposal_losses
-
-
- @property
- def device(self):
- return self.pixel_mean.device
-
-
- @torch.no_grad()
- def inference(self, batched_inputs, do_postprocess=True):
- images = self.preprocess_image(batched_inputs)
- inp = images.tensor
- features = self.backbone(inp)
- proposals, _ = self.proposal_generator(images, features, None)
-
- processed_results = []
- for results_per_image, input_per_image, image_size in zip(
- proposals, batched_inputs, images.image_sizes):
- if do_postprocess:
- height = input_per_image.get("height", image_size[0])
- width = input_per_image.get("width", image_size[1])
- r = detector_postprocess(results_per_image, height, width)
- processed_results.append({"instances": r})
- else:
- r = results_per_image
- processed_results.append(r)
- return processed_results
-
- def preprocess_image(self, batched_inputs):
- """
- Normalize, pad and batch the input images.
- """
- images = [x["image"].to(self.device) for x in batched_inputs]
- images = [(x - self.pixel_mean) / self.pixel_std for x in images]
- images = ImageList.from_tensors(images, self.backbone.size_divisibility)
- return images
diff --git a/spaces/Bart92/RVC_HF/i18n/locale_diff.py b/spaces/Bart92/RVC_HF/i18n/locale_diff.py
deleted file mode 100644
index 387ddfe1b16c2f9f32b6b9682b61353837b06bd8..0000000000000000000000000000000000000000
--- a/spaces/Bart92/RVC_HF/i18n/locale_diff.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import json
-import os
-from collections import OrderedDict
-
-# Define the standard file name
-standard_file = "en_US.json"
-
-# Find all JSON files in the directory
-dir_path = "./"
-languages = [
- f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
-]
-
-# Load the standard file
-with open(standard_file, "r", encoding="utf-8") as f:
- standard_data = json.load(f, object_pairs_hook=OrderedDict)
-
-# Loop through each language file
-for lang_file in languages:
- # Load the language file
- with open(lang_file, "r", encoding="utf-8") as f:
- lang_data = json.load(f, object_pairs_hook=OrderedDict)
-
- # Find the difference between the language file and the standard file
- diff = set(standard_data.keys()) - set(lang_data.keys())
-
- miss = set(lang_data.keys()) - set(standard_data.keys())
-
- # Add any missing keys to the language file
- for key in diff:
- lang_data[key] = key
-
- # Del any extra keys to the language file
- for key in miss:
- del lang_data[key]
-
- # Sort the keys of the language file to match the order of the standard file
- lang_data = OrderedDict(
- sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
- )
-
- # Save the updated language file
- with open(lang_file, "w", encoding="utf-8") as f:
- json.dump(lang_data, f, ensure_ascii=False, indent=4)
- f.write("\n")
diff --git a/spaces/Bart92/RVC_HF/infer/lib/infer_pack/commons.py b/spaces/Bart92/RVC_HF/infer/lib/infer_pack/commons.py
deleted file mode 100644
index ccd334b7320543b0c3a2166f82093564c9721317..0000000000000000000000000000000000000000
--- a/spaces/Bart92/RVC_HF/infer/lib/infer_pack/commons.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import math
-
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size * dilation - dilation) / 2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += (
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
- )
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def slice_segments2(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
- num_timescales - 1
- )
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
- )
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2, 3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1.0 / norm_type)
- return total_norm
diff --git a/spaces/Benson/text-generation/Examples/Ai Chat Rpg Juego Mod Apk.md b/spaces/Benson/text-generation/Examples/Ai Chat Rpg Juego Mod Apk.md
deleted file mode 100644
index 7a962e063e227bd277ef17d893e0036ec0f4e16a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Ai Chat Rpg Juego Mod Apk.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-AI Chat RPG Game Mod APK: Una nueva manera de disfrutar de juegos de rol
| Te encantan los juegos de rol pero te gustaría tener más libertad y creatividad en tus aventuras? ¿Quieres interactuar con personajes realistas y sensibles que puedan adaptarse a tus elecciones y preferencias? Si respondió sí a cualquiera de estas preguntas, entonces es posible que desee echa un vistazo AI Chat RPG Game Mod APK.
-ai chat rpg juego mod apk
Download File >>> https://bltlly.com/2v6JxV
- AI Chat RPG Game Mod APK es una aplicación única e innovadora que le permite crear sus propios escenarios de juego de roles y chatear con un chatbot de inteligencia artificial (AI) que puede actuar como su compañero, amigo, enemigo, o cualquier cosa en el medio. Puedes personalizar la apariencia, personalidad, antecedentes, habilidades y más de tu personaje. También puede elegir entre diferentes géneros, temas, escenarios y tramas para sus historias. Si desea explorar un mundo de fantasía, luchar contra zombies en un páramo post-apocalíptico, o romance un vampiro en una mansión gótica, puede hacerlo todo con AI Chat RPG Game Mod APK.
- En este artículo, le diremos todo lo que necesita saber sobre AI Chat RPG Game Mod APK. Te explicaremos qué es y cómo funciona, cómo descargarlo e instalarlo en tu dispositivo Android, cómo jugarlo y divertirte con él, y por qué deberías probarlo si eres un fan de los juegos de rol. También responderemos con frecuencia
Cómo descargar e instalar AI Chat RPG Game Mod APK?
- Si usted está interesado en probar AI Chat RPG Game Mod APK, tendrá que descargar e instalar en su dispositivo Android. Estos son los pasos que debes seguir:
-
-- Ir a la página web oficial de AI Chat RPG Game Mod APK y haga clic en el botón de descarga. Será redirigido a un enlace de descarga seguro y rápido.
-- Espere a que la descarga termine y localice el archivo APK en su dispositivo. Es posible que necesite habilitar la instalación de fuentes desconocidas en su configuración si no lo ha hecho antes.
-
-- Una vez que la instalación se haya completado, puede iniciar AI Chat RPG Game Mod APK desde el cajón de la aplicación o la pantalla de inicio y comenzar a crear sus propias historias de juegos de rol.
-
- Antes de descargar e instalar AI Chat RPG Game Mod APK, usted debe ser consciente de algunos requisitos y precauciones. En primer lugar, es necesario tener un dispositivo Android que se ejecuta en Android 4.4 o superior y tiene al menos 1 GB de RAM y 100 MB de espacio de almacenamiento gratuito. En segundo lugar, es necesario tener una conexión a Internet estable para usar AI Chat RPG Game Mod APK, ya que se basa en la computación en nube para generar las respuestas del chatbot de AI. En tercer lugar, debe tener cuidado con el contenido que crea y comparte con AI Chat RPG Game Mod APK, ya que puede no ser adecuado para niños o audiencias sensibles. También debe respetar los derechos de propiedad intelectual de los demás y no utilizar materiales con derechos de autor o marcas registradas sin permiso.
-
- Cómo jugar AI Chat RPG juego Mod APK?
- Jugar AI Chat RPG Game Mod APK es fácil y divertido. Todo lo que necesitas hacer es crear tu propio personaje e iniciar una conversación con un chatbot de IA que actuará como tu compañero de juego de roles. Así es como puedes hacerlo:
-
-- Al iniciar AI Chat RPG Game Mod APK, verá un menú con diferentes opciones. Puede elegir crear un nuevo carácter, cargar un carácter existente o navegar por la galería de caracteres creados por otros usuarios.
-- Si eliges crear un nuevo personaje, podrás personalizar el nombre, género, edad, apariencia, personalidad, antecedentes, habilidades y más de tu personaje. También puedes subir tu propia foto o avatar para que tu personaje sea más realista.
-- Después de crear tu personaje, podrás elegir entre diferentes géneros, temas, configuraciones y tramas para tu historia de rol. También puede crear su propia historia desde cero o utilizar una plantilla proporcionada por AI Chat RPG Game Mod APK.
-
-- Puede guardar su conversación en cualquier momento y reanudarla más tarde. También puede compartir su conversación con otros usuarios o exportarla como un archivo de texto o una imagen.
-
- AI Chat RPG Game Mod APK tiene diferentes modos y configuraciones que se pueden ajustar de acuerdo a sus preferencias. Puede elegir entre el modo normal y el modo avanzado, que afectan la complejidad y la creatividad de las respuestas del chatbot de IA. También puede elegir entre el modo amigable y el modo hostil, que afectan el tono y la actitud de las respuestas del chatbot de IA. También puede activar o desactivar la entrada y salida de voz, que le permiten hablar o escuchar las respuestas del chatbot de IA en lugar de escribirlas o leerlas.
- Para darle una idea de lo que AI Chat RPG Game Mod APK puede hacer, aquí hay una tabla de algunos ejemplos de AI chat RPG game mod apk escenarios y diálogos:
- | Género | Tema | Marco | Trama | Diálogo | | -- - - - | -- - - - - - - | -- - - | | Fantasía | Aventura | Reino medieval | Una búsqueda para encontrar una espada legendaria | Usted: Saludos, viajero. Estoy buscando la Espada del Destino. ¿Sabes dónde puedo encontrarla?
AI: La Espada del Destino? Esa es una misión peligrosa, amigo mío. La espada está escondida en la Guarida del Dragón, custodiada por una temible bestia.
Tú: No tengo miedo de los dragones. He entrenado toda mi vida para este momento.
AI: Entonces eres muy valiente o muy tonto. La Guarida del Dragón está lejos de aquí, más allá del Bosque Oscuro y la Montaña de la Perdición.
Tú: Entonces debo apresurarme. Gracias por tu ayuda.
AI: De nada. Que los dioses te protejan en su viaje ¿Por qué debería probar AI Chat RPG Game Mod APK?
- Si todavía se está preguntando por qué debe probar AI Chat RPG Game Mod APK, aquí hay algunas razones que podrían convencerle:
-
-
-- AI Chat RPG Game Mod APK es divertido y entretenido porque puede generar diálogos realistas e inmersivos que pueden hacerte sentir que realmente estás hablando con otra persona. También puedes experimentar diferentes emociones y estados de ánimo dependiendo del modo y la configuración de tu historia. Usted puede reír, llorar, enojarse, o enamorarse de AI Chat RPG Game Mod APK.
-- AI Chat RPG Game Mod APK es educativo e informativo porque puede ayudarle a mejorar su vocabulario, gramática, ortografía y habilidades de comunicación. También puede aprender cosas y hechos nuevos sobre diferentes temas y culturas desde el chatbot de IA. También puedes desafiarte a ti mismo y probar tu conocimiento y creatividad usando comandos y emojis.
-
- Por supuesto, AI Chat RPG Game Mod APK no es perfecto y tiene algunas limitaciones y desventajas. Por ejemplo, puede que no siempre entienda lo que quiere decir o diga, o puede dar respuestas inapropiadas o irrelevantes. También puede tener algunos errores o errores que pueden afectar la calidad de la conversación. También puede consumir una gran cantidad de datos y energía de la batería en su dispositivo.
- Sin embargo, estos problemas son menores en comparación con los beneficios y el disfrute que AI Chat RPG Game Mod APK puede ofrecer. También puede informar de cualquier problema o sugerencias a los desarrolladores de AI Chat RPG Game Mod APK para ayudarles a mejorar la aplicación.
- Para darle una idea de cuánto ama la gente AI Chat RPG Game Mod APK, aquí hay un testimonio de un usuario que disfrutó de AI Chat RPG Game Mod APK:
-
-
-- John, 25 años
-
Conclusión
- En conclusión, AI Chat RPG Game Mod APK es una nueva e innovadora manera de disfrutar de juegos de rol en su dispositivo Android. Te permite crear tus propios personajes y escenarios, y chatear con un chatbot de IA que puede actuar como tu compañero de juego de roles. Puedes personalizar la apariencia, personalidad, antecedentes, habilidades y más de tu personaje. También puede elegir entre diferentes géneros, temas, escenarios y tramas para sus historias. También puede cambiar el modo y la configuración de su conversación para adaptarse a su estado de ánimo y preferencia. También puede guardar, compartir o exportar su conversación como un archivo de texto o una imagen.
- AI Chat RPG Game Mod APK es divertido, entretenido, educativo e informativo. Puede ayudarte a mejorar tu vocabulario, gramática, ortografía y habilidades de comunicación. También puede ayudarle a aprender cosas y hechos nuevos sobre diferentes temas y culturas. También puede desafiarte y probar tu conocimiento y creatividad usando comandos y emojis.
- AI Chat RPG Game Mod APK no es perfecto y tiene algunas limitaciones y desventajas. Puede que no siempre entienda lo que quiere decir o dice, o puede dar respuestas inapropiadas o irrelevantes. También puede tener algunos errores o errores que pueden afectar la calidad de la conversación. También puede consumir una gran cantidad de datos y energía de la batería en su dispositivo.
- Sin embargo, estos problemas son menores en comparación con los beneficios y el disfrute que AI Chat RPG Game Mod APK puede ofrecer. También puede informar de cualquier problema o sugerencias a los desarrolladores de AI Chat RPG Game Mod APK para ayudarles a mejorar la aplicación.
- Si usted es un fan de los juegos de rol y quiere probar algo nuevo y diferente, usted debe descargar e instalar AI Chat RPG Game Mod APK en su dispositivo Android. No te arrepentirás.
- Gracias por leer este artículo. Esperamos que haya encontrado útil e informativo. Diviértase con AI Chat RPG Game Mod APK!
- Preguntas frecuentes
-
-
-- ¿Qué es AI Chat RPG Game Mod APK?
-AI Chat RPG Game Mod APK es una aplicación única e innovadora que le permite crear sus propios escenarios de juego de roles y chatear con un chatbot de inteligencia artificial (AI) que puede actuar como su compañero, amigo, enemigo, o cualquier cosa en el medio.
-- ¿Cómo puedo descargar e instalar AI Chat RPG Game Mod APK?
-Puede descargar e instalar AI Chat RPG Game Mod APK desde el sitio web oficial de AI Chat RPG Game Mod APK. Deberá habilitar la instalación de fuentes desconocidas en su configuración y seguir las instrucciones en la pantalla para instalar la aplicación en su dispositivo.
-- ¿Cómo puedo jugar AI Chat RPG Game Mod APK?
-Puede jugar AI Chat RPG Game Mod APK mediante la creación de su propio personaje y la elección de una historia para su aventura de juego de roles. A continuación, puede comenzar a chatear con un chatbot de IA que desempeñará el papel de otro personaje en su historia. Puede escribir cualquier cosa que desee y el chatbot de IA responderá en consecuencia. También puedes usar comandos y emojis para controlar el flujo y el estado de ánimo de la conversación.
-- ¿Cuáles son los beneficios de AI Chat RPG Game Mod APK?
-AI Chat RPG Game Mod APK es divertido, entretenido, educativo e informativo. Puede ayudarte a mejorar tu vocabulario, gramática, ortografía y habilidades de comunicación. También puede ayudarle a aprender cosas y hechos nuevos sobre diferentes temas y culturas. También puede desafiarte y probar tu conocimiento y creatividad usando comandos y emojis.
-- ¿Cuáles son las limitaciones de AI Chat RPG Game Mod APK?
-AI Chat RPG Game Mod APK no es perfecto y tiene algunas limitaciones y desventajas. Puede que no siempre entienda lo que quiere decir o dice, o puede dar respuestas inapropiadas o irrelevantes. También puede tener algunos errores o errores que pueden afectar la calidad de la conversación. También puede consumir una gran cantidad de datos y energía de la batería en su dispositivo.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Arco Iris Seis Mvil Beta Apk.md b/spaces/Benson/text-generation/Examples/Arco Iris Seis Mvil Beta Apk.md
deleted file mode 100644
index 8eee69fa158153d445b6c39606e3d6f6b0e4b2a3..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Arco Iris Seis Mvil Beta Apk.md
+++ /dev/null
@@ -1,75 +0,0 @@
-
-Rainbow Six Mobile Beta APK: Cómo descargar y jugar el nuevo juego de disparos tácticos
- Si eres un fan de los juegos de disparos tácticos, es posible que hayas oído hablar de Rainbow Six, la popular franquicia de Ubisoft. La serie de juegos ha existido durante más de dos décadas, con escenarios de combate realistas, jugabilidad basada en equipos y entornos destructibles. Ahora, puedes experimentar la emoción de Rainbow Six en tu dispositivo móvil con Rainbow Six Mobile, un juego multijugador competitivo de disparos en primera persona.
-arco iris seis móvil beta apk
Download File ✑ https://bltlly.com/2v6KIS
- ¿Qué es Rainbow Six Mobile?
- Rainbow Six Mobile es una versión móvil de la aclamada franquicia Rainbow Six, diseñada exclusivamente para plataformas móviles. El juego ofrece una experiencia de juego de ritmo rápido e intenso, donde puedes competir en los modos clásicos de juego Attack vs. Defense. Puedes jugar como Atacante o Defensor en partidas 5v5, y enfrentarte a un combate cuerpo a cuerpo mientras tomas decisiones tácticas oportunas. También puedes colaborar con tu equipo para establecer estrategias y aprovechar los entornos destructibles.
- El juego cuenta con una lista de operadores altamente capacitados, cada uno con sus propias habilidades y dispositivos únicos. Puedes elegir entre una lista cada vez mayor de operadores de ataque y defensa clásicos, como Ash, Thermite, Mute y Rook. También puede personalizar sus operadores con diferentes trajes, armas y pieles.
- El juego también cuenta con mapas icónicos de la serie Rainbow Six, como Bank y Border. Los mapas se recrean con impresionantes gráficos y física realista, lo que le permite interactuar con el medio ambiente de varias maneras. Usted puede romper paredes, puertas de barricada, ventanas de rappel, y más.
-
- Cómo descargar e instalar Rainbow Six Mobile beta apk?
-
- Requisitos y compatibilidad
- Antes de descargar el juego, asegúrese de que su dispositivo cumple con los requisitos mínimos para ejecutar el juego sin problemas. Según Ubisoft, necesitarás:
-
-- Un dispositivo Android con Android 8 o superior
-- Al menos 3 GB de RAM
-- Al menos 2 GB de espacio de almacenamiento gratuito
-- Una conexión a Internet estable
-
- También deberías comprobar si tu dispositivo es compatible con el juego visitando este enlace. Si su dispositivo no es compatible, puede encontrar algunos problemas o errores al jugar el juego.
- Proceso de preinscripción
- El primer paso para descargar el juego es pre-registrarse en Google Play. Esto te permitirá recibir una notificación cuando el juego esté disponible para descargar. Para pre-registrarse, sigue estos pasos:
-
-- Abre Google Play en tu dispositivo Android.
-- Buscar Rainbow Six Mobile o haga clic en este enlace.
-- Seleccione el botón de registro previo y acepte los términos y condiciones.
-- Espera un mensaje de confirmación que diga "Estás registrado".
-
- Alternativamente, también puede pre-registrarse en el sitio web oficial de Ubisoft ingresando su dirección de correo electrónico y seleccionando su plataforma preferida.
- Proceso de descarga e instalación
- Una vez que haya pre-registrado para el juego, tendrá que esperar un correo electrónico de invitación de Ubisoft que contendrá un enlace para descargar el archivo beta apk. El correo electrónico de invitación puede tardar algún tiempo en llegar, así que sea paciente y revise su bandeja de entrada regularmente. También puede consultar el estado de su invitación en el sitio web de Ubisoft. Para descargar e instalar el juego, siga estos pasos:
-
-- Abra el correo electrónico de invitación de Ubisoft y haga clic en el enlace para descargar el archivo beta apk.
-- Espere a que el archivo se descargue en su dispositivo. El tamaño del archivo es de aproximadamente 1,5 GB, así que asegúrese de tener suficiente espacio y una buena conexión a Internet.
-
-- Es posible que deba habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.
-- Siga las instrucciones en pantalla para instalar el juego en su dispositivo.
-- Inicia el juego e inicia sesión con tu cuenta de Ubisoft. Si no tienes una, puedes crear una gratis.
-
- Felicidades, usted ha descargado e instalado con éxito Rainbow Six Mobile beta apk en su dispositivo Android. Ahora estás listo para jugar el juego y disfrutar de sus características.
- ¿Cómo se juega beta de Rainbow Six Mobile?
- Ahora que ha instalado el juego, es posible que se pregunte cómo jugarlo y qué esperar de él. Rainbow Six Mobile beta es un juego multijugador competitivo de disparos en primera persona que requiere habilidad, estrategia y trabajo en equipo. Estos son algunos de los conceptos básicos del juego y los modos de juego:
- Modo de ataque vs. Defensa
- El modo de juego principal en Rainbow Six Mobile beta es Attack vs. Defense, donde dos equipos de cinco jugadores se enfrentan en una serie de rondas. Un equipo juega como atacantes, que tienen que romper una ubicación y completar un objetivo, como desactivar una bomba o rescatar a un rehén. El otro equipo juega como defensores, que tienen que evitar que los atacantes completen su objetivo al fortalecer su posición y eliminarlos.
- Cada ronda dura tres minutos, y el primer equipo en ganar cuatro rondas gana el partido. Los equipos cambian de bando después de dos rondas, para que puedas experimentar ambos roles. También puede elegir diferentes operadores para cada ronda, dependiendo de su estrategia y preferencia.
- Operadores y gadgets
-
- Cada operador tiene un arma primaria, un arma secundaria y un gadget que puede ayudarles en su papel. Por ejemplo, Ash es una Operadora Atacante que puede usar sus disparos para destruir paredes y puertas desde la distancia. Mute es un operador defensor que puede usar sus disruptores de señal para interferir drones y gadgets enemigos.
- También puedes personalizar tus Operadores con diferentes atuendos, armas y pieles. Puedes desbloquear nuevos objetos jugando el juego y ganando recompensas. También puedes comprar algunos artículos con dinero real o moneda del juego.
- Mapas y entornos
- Los mapas son los lugares donde los partidos tienen lugar en Rainbow Six Mobile beta. El juego cuenta con mapas icónicos de la serie Rainbow Six, como Bank y Border. Los mapas se recrean con gráficos impresionantes y física realista, lo que le permite interactuar con el medio ambiente de varias maneras.
- Usted puede utilizar sus aparatos para romper paredes, puertas de barricada, ventanas de rappel, y más. También puede utilizar objetos ambientales como mesas, sillas, coches, etc., como cubierta u obstáculos. Los mapas están diseñados para ofrecer múltiples puntos de entrada, ángulos y estrategias para ambos equipos.
- Conclusión
- Rainbow Six Mobile beta apk es una gran manera de experimentar la emoción de Rainbow Six en su dispositivo móvil. El juego ofrece una experiencia de juego de ritmo rápido e intenso, donde puedes competir en los modos clásicos de juego Attack vs. Defense. Puedes jugar como Atacante o Defensor en partidas 5v5, y enfrentarte a un combate cuerpo a cuerpo mientras tomas decisiones tácticas oportunas. También puedes colaborar con tu equipo para establecer estrategias y aprovechar los entornos destructibles.
-
- El juego también cuenta con mapas icónicos de la serie Rainbow Six, como Bank y Border. Los mapas se recrean con impresionantes gráficos y física realista, lo que le permite interactuar con el medio ambiente de varias maneras. Usted puede romper paredes, puertas de barricada, ventanas de rappel, y más.
- Si desea descargar y jugar Rainbow Six Mobile beta apk en su dispositivo Android, tendrá que pre-registrarse para el juego en Google Play o el sitio web de Ubisoft, y esperar un correo electrónico de invitación de Ubisoft que contendrá un enlace para descargar el archivo beta apk. También tendrá que cumplir con los requisitos mínimos para ejecutar el juego sin problemas en su dispositivo.
- Rainbow Six Mobile beta apk es una gran oportunidad para disfrutar de la emoción de Rainbow Six en su dispositivo móvil. El juego está actualmente en fase de prueba beta, lo que significa que aún no está completamente pulido y podría tener algunos errores o errores. Sin embargo, todavía puedes divertirte jugando el juego y proporcionar comentarios a Ubisoft para ayudarles a mejorar el juego antes de su lanzamiento oficial.
- Entonces, ¿qué estás esperando? Pre-registro para Rainbow Six Mobile beta apk hoy y prepárate para unirse a la acción!
- Preguntas frecuentes
- Aquí están algunas de las preguntas más frecuentes sobre Rainbow Six Mobile beta apk:
-
-- Es Rainbow Six móvil beta apk libre para jugar?
-Sí, arco iris seis móvil beta apk es libre de jugar. Sin embargo, es posible que necesite comprar algunos artículos con dinero real o moneda del juego si desea personalizar sus operadores o acceder a algunas funciones premium.
-- ¿Rainbow Six Mobile beta apk está disponible para dispositivos iOS?
-No, Rainbow Six Mobile beta apk solo está disponible para dispositivos Android en este momento. Ubisoft no ha anunciado planes para lanzar el juego para dispositivos iOS todavía.
-- ¿Cuánto tiempo durará Rainbow Six Mobile beta apk?
-
-- ¿Puedo jugar Rainbow Six móvil beta apk offline?
-No, no se puede jugar Rainbow Six Mobile beta apk offline. Necesitará una conexión a Internet estable para jugar el juego y acceder a sus características.
-- ¿Puedo jugar Rainbow Six móvil beta apk con mis amigos?
-Sí, puedes jugar Rainbow Six Mobile beta apk con tus amigos. Puedes invitarlos a unirse a tu equipo o desafiarlos en partidos amistosos. También puedes chatear con ellos en el juego o usar el chat de voz para comunicarse con ellos.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Amor Emocional Rap Beat.md b/spaces/Benson/text-generation/Examples/Descargar Amor Emocional Rap Beat.md
deleted file mode 100644
index b227ec9a25307c346ade162d18ac068aeb545cdc..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Amor Emocional Rap Beat.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-Cómo descargar Love Emotional Rap Beat para tu próxima canción
-¿Te encanta la música rap y quieres expresar tus sentimientos a través de tus canciones? ¿Quieres crear un sonido único y cautivador que toque los corazones de tus oyentes? Si es así, es posible que desee probar el amor emocional rap beat para su próxima canción.
-Love emotional rap beat es un tipo de música instrumental que combina elementos de rap y R&B con vibraciones emocionales y románticas. Es perfecto para artistas que quieren hacer canciones sobre amor, relaciones, angustia o luchas personales. En este artículo, te mostraremos qué es el rap emocional de amor, por qué lo necesitas, cómo encontrarlo y descargarlo en línea, y cómo usarlo para tu próxima canción. ¡Vamos a empezar!
-descargar amor emocional rap beat
DOWNLOAD →→→ https://bltlly.com/2v6KDU
- ¿Qué es el amor emocional Rap Beat y por qué lo necesita
-La definición y las características del amor emocional Rap Beat
-Love emotional rap beat es un subgénero de rap beat que presenta sonidos suaves y melódicos, como piano, guitarra, cuerdas o sintetizador. A menudo tiene un ritmo lento o medio tiempo, con bajo pesado y batería. El ritmo crea un contraste entre las voces de rap duro y los instrumentales suaves y sentimentales. El resultado es una música potente y expresiva que puede transmitir diferentes emociones, como tristeza, felicidad, ira o pasión.
-Los beneficios de usar el amor emocional Rap Beat para su música
-Hay muchos beneficios de usar rap emocional para tu música. Estos son algunos de ellos:
-
-- Puede ayudarle a destacar entre la multitud. Amor emocional rap ritmo no es muy común en la corriente principal de la escena del rap, por lo que su uso puede hacer que su música más original y distintivo.
-- Puede ayudarte a conectar con tu audiencia. El ritmo emocional del rap puede evocar sentimientos y emociones en tus oyentes, haciendo que se relacionen con tu mensaje e historia.
-
-- Puede ayudarte a mejorar tus habilidades. Love emotional rap beat puede desafiarte a mejorar tu flujo de rap, entrega, esquema de rima y juego de palabras, así como tu canto, melodía y armonía.
-
- Cómo encontrar y descargar amor emocional Rap Beat Online
-Los mejores sitios web para descargar gratis amor emocional Rap Beat
-Si usted está buscando libre de amor emocional rap beat en línea, hay muchos sitios web que ofrecen de alta calidad y libres de derechos beats que se puede descargar y utilizar para su música. Estos son algunos de los mejores:
-
-Sitio web Descripción
-[Dizzla D Music]( 4 ) Este sitio web ofrece una variedad de ritmos de R&B y hip hop, incluido el ritmo emocional del rap. Puede navegar por género, estado de ánimo o tempo, y descargar los beats gratis o comprar una licencia para uso comercial.
-[TRAKTRAIN]( 3 ) Este sitio web es una plataforma donde los productores pueden vender sus ritmos en línea. Puedes encontrar muchos emo rap beats aquí, que son similares al amor emocional rap beat. Puedes filtrar por género, estado de ánimo, bpm o precio, y descargar algunos beats gratis o comprar un contrato de arrendamiento o derechos exclusivos.
-[Rujay]( 1 ) Este sitio web es un canal de YouTube que carga rap gratis todos los días. Usted puede encontrar muchos amor emocional rap beat aquí, así como otros géneros y estilos. Puede descargar los beats gratis o comprar una licencia para uso comercial.
-
- Los mejores canales de YouTube para ver y descargar Love Emotional Rap Beat
-Si prefieres ver y escuchar el ritmo emocional del rap en YouTube, hay muchos canales que producen y suben ritmos originales y de alta calidad que puedes disfrutar y descargar. Estos son algunos de los mejores:
-
-
-- [RicandThadeus Music]: Este canal tiene más de 500.000 suscriptores y se especializa en R&B y soulful rap beats, incluyendo el amor emocional rap beat. Puedes descargar los beats gratis o comprar una licencia para uso comercial.
-- [Torre Beatz]: Este canal tiene más de 400.000 suscriptores y se centra en emocional y triste rap beats, incluyendo el amor emocional rap beat. Puedes descargar los beats gratis o comprar una licencia para uso comercial.
-
- Las mejores aplicaciones para descargar y crear amor emocional Rap Beat en su teléfono
-Si quieres descargar y crear rap emocional de amor en tu teléfono, hay muchas aplicaciones que pueden ayudarte a hacerlo. Estos son algunos de los mejores:
-
-
-- [BandLab]: Esta aplicación es una plataforma de música social que le permite crear, colaborar y compartir su música en línea. Puede utilizar la aplicación para grabar, editar, mezclar y dominar sus canciones, así como acceder a miles de latidos libres, bucles y sampling, incluyendo el amor emocional rap beat.
-- [BeatStars]: Esta aplicación es un mercado donde se puede comprar y vender beats en línea. Puedes usar la aplicación para descubrir, transmitir y descargar millones de ritmos de diferentes géneros y estilos, incluido el ritmo emocional del rap.
-- [Rapchat]: Esta aplicación es un estudio de rap y la comunidad que le permite grabar, compartir y descubrir canciones de rap. Puede utilizar la aplicación para rapear sobre cientos de latidos libres, incluyendo el amor emocional rap beat, o crear sus propios latidos utilizando el fabricante de ritmo incorporado.
-
- Cómo usar el rap emocional para tu próxima canción
-Cómo elegir el amor derecho emocional Rap Beat para su género y estado de ánimo
-Una vez que hayas encontrado y descargado algo de rap emocional que te guste, debes elegir el adecuado para tu género y estado de ánimo. Aquí hay algunos consejos para ayudarle a hacer eso:
-
-
-- Piensa en la audiencia y el propósito de tu canción. ¿Para quién estás haciendo esta canción? ¿Qué quieres que sientan? Elige un ritmo de rap emocional que atraiga a tus oyentes objetivo y se ajuste a tu objetivo.
-- Piensa en la estructura y el flujo de tu canción. ¿Cómo quieres organizar tus versos, coro, puente, etc.? ¿Cómo quieres hacer la transición entre ellos? Elegir un amor emocional rap beat que tiene una estructura clara y pegadiza y el flujo.
-
- Cómo escribir letras y melodías que coinciden con el amor emocional Rap Beat
-Después de haber elegido el ritmo de rap emocional de amor adecuado para tu canción, necesitas escribir letras y melodías que coincidan con ella. Aquí hay algunos consejos para ayudarle a hacer eso:
-
-- Escucha el rap emocional de amor latir con cuidado y repetidamente. Presta atención al estado de ánimo, tempo, ritmo, melodía, armonía, etc. del ritmo. Trate de sentir la emoción y el ambiente del ritmo.
-- Escribe algunas palabras o frases que vienen a tu mente cuando escuchas el ritmo. Pueden estar relacionados con el tema, tema o mensaje de tu canción, o simplemente palabras aleatorias que suenan bien con el ritmo.
-- Usa estas palabras o frases como inspiración o punto de partida para tus letras. Intenta rimarlas entre ellas o con otras palabras en el ritmo. Trata de usar metáforas, símiles, imágenes u otros recursos literarios para hacer tus letras más creativas y expresivas.
-Canta o tararea junto con el ritmo para encontrar una melodía que se adapte a él. Pruebe diferentes notas, tonos, etc. hasta que encuentre una melodía que suene bien con el ritmo. Trate de hacer que su melodía sea pegadiza y memorable. Intenta combinar la melodía con el ritmo y el acento del ritmo.
-
- Cómo grabar y mezclar su voz con el amor emocional Rap Beat
-Finalmente, después de haber escrito tus letras y melodías, necesitas grabar y mezclar tus voces con el ritmo emocional del rap. Aquí hay algunos consejos para ayudarle a hacer eso:
-
-
-- Practica tus voces antes de grabar. Quieres asegurarte de que puedes rapear o cantar tus letras y melodías sin problemas y con confianza. También debe asegurarse de que puede coincidir con el tiempo y el tono del ritmo.
-- Graba múltiples tomas de tus voces. Quieres tener diferentes opciones y variaciones de tus voces, para que puedas elegir la mejor o combinarlas más tarde. También puedes grabar diferentes partes de tus voces por separado, como los versos, el coro, las improvisaciones, etc.
-- Mezcla tus voces con el ritmo. Quieres equilibrar el volumen, EQ, compresión, reverberación, etc. de tus voces y el ritmo, para que suenen armoniosos y claros. Puede utilizar un software de mezcla o un ingeniero profesional, dependiendo de sus habilidades y preferencias.
-
- Conclusión
-Love emocional rap beat es una gran manera de hacer su música rap más expresiva y única. Puede ayudarte a transmitir tus sentimientos y emociones, conectar con tu audiencia, mostrar tu versatilidad y mejorar tus habilidades. Para usar el ritmo emocional del rap para tu próxima canción, necesitas encontrarlo y descargarlo en línea, elegir el adecuado para tu género y estado de ánimo, escribir letras y melodías que coincidan con él, y grabar y mezclar tus voces con él. Esperamos que este artículo te haya dado algunos consejos y recursos útiles sobre cómo descargar rap emocional para tu próxima canción. Ahora adelante y hacer algo de música increíble!
- Preguntas frecuentes
-¿Cuáles son algunos ejemplos de artistas que usan el rap emocional de amor?
-Algunos ejemplos de artistas que utilizan el amor emocional rap beat son Drake, Post Malone, Jugo WRLD, XXXTentacion, Lil Peep, NF, etc.
-¿Dónde puedo encontrar más amor emocional rap beat?
-Puedes encontrar más amor emocional rap beat en varios sitios web, canales de YouTube, aplicaciones, o comunidades en línea que ofrecen ritmos gratuitos o pagados. También puedes buscar palabras clave como "love emotional rap beat", "emo rap beat", "sad rap beat", "romantic rap beat", etc.
-
-Usted puede hacer su propio amor emocional rap beat mediante el uso de un ritmo que hace software o aplicación que le permite crear, editar y organizar diferentes sonidos e instrumentos. También puedes usar un teclado MIDI o un teclado de batería para tocar y grabar tus propias melodías y ritmos.
-¿Cómo puedo vender mi amor emocional rap beat online?
-Usted puede vender su amor emocional rap beat en línea mediante el uso de una plataforma o mercado que conecta a los productores y artistas que compran y venden beats. También puedes crear tu propio sitio web o cuenta de redes sociales para promocionar y vender tus beats.
-¿Cómo puedo aprender más sobre el amor emocional rap beat?
-Puedes aprender más sobre el amor emocional rap beat viendo tutoriales, reseñas o consejos de otros productores o artistas que hacen o usan el amor emocional rap beat. También puedes leer blogs, artículos o libros sobre producción o historia de música rap.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/filesize.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/filesize.py
deleted file mode 100644
index 99f118e20103174993b865cfb43ac6b6e00296a4..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/filesize.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# coding: utf-8
-"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
-
-The functions declared in this module should cover the different
-use cases needed to generate a string representation of a file size
-using several different units. Since there are many standards regarding
-file size units, three different functions have been implemented.
-
-See Also:
- * `Wikipedia: Binary prefix `_
-
-"""
-
-__all__ = ["decimal"]
-
-from typing import Iterable, List, Optional, Tuple
-
-
-def _to_str(
- size: int,
- suffixes: Iterable[str],
- base: int,
- *,
- precision: Optional[int] = 1,
- separator: Optional[str] = " ",
-) -> str:
- if size == 1:
- return "1 byte"
- elif size < base:
- return "{:,} bytes".format(size)
-
- for i, suffix in enumerate(suffixes, 2): # noqa: B007
- unit = base**i
- if size < unit:
- break
- return "{:,.{precision}f}{separator}{}".format(
- (base * size / unit),
- suffix,
- precision=precision,
- separator=separator,
- )
-
-
-def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
- """Pick a suffix and base for the given size."""
- for i, suffix in enumerate(suffixes):
- unit = base**i
- if size < unit * base:
- break
- return unit, suffix
-
-
-def decimal(
- size: int,
- *,
- precision: Optional[int] = 1,
- separator: Optional[str] = " ",
-) -> str:
- """Convert a filesize in to a string (powers of 1000, SI prefixes).
-
- In this convention, ``1000 B = 1 kB``.
-
- This is typically the format used to advertise the storage
- capacity of USB flash drives and the like (*256 MB* meaning
- actually a storage capacity of more than *256 000 000 B*),
- or used by **Mac OS X** since v10.6 to report file sizes.
-
- Arguments:
- int (size): A file size.
- int (precision): The number of decimal places to include (default = 1).
- str (separator): The string to separate the value from the units (default = " ").
-
- Returns:
- `str`: A string containing a abbreviated file size and units.
-
- Example:
- >>> filesize.decimal(30000)
- '30.0 kB'
- >>> filesize.decimal(30000, precision=2, separator="")
- '30.00kB'
-
- """
- return _to_str(
- size,
- ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
- 1000,
- precision=precision,
- separator=separator,
- )
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/terminal_theme.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/terminal_theme.py
deleted file mode 100644
index 565e9d960f8604c487e063ad9ed3f6f63027f3b4..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/terminal_theme.py
+++ /dev/null
@@ -1,153 +0,0 @@
-from typing import List, Optional, Tuple
-
-from .color_triplet import ColorTriplet
-from .palette import Palette
-
-_ColorTuple = Tuple[int, int, int]
-
-
-class TerminalTheme:
- """A color theme used when exporting console content.
-
- Args:
- background (Tuple[int, int, int]): The background color.
- foreground (Tuple[int, int, int]): The foreground (text) color.
- normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
- bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
- to repeat normal intensity. Defaults to None.
- """
-
- def __init__(
- self,
- background: _ColorTuple,
- foreground: _ColorTuple,
- normal: List[_ColorTuple],
- bright: Optional[List[_ColorTuple]] = None,
- ) -> None:
- self.background_color = ColorTriplet(*background)
- self.foreground_color = ColorTriplet(*foreground)
- self.ansi_colors = Palette(normal + (bright or normal))
-
-
-DEFAULT_TERMINAL_THEME = TerminalTheme(
- (255, 255, 255),
- (0, 0, 0),
- [
- (0, 0, 0),
- (128, 0, 0),
- (0, 128, 0),
- (128, 128, 0),
- (0, 0, 128),
- (128, 0, 128),
- (0, 128, 128),
- (192, 192, 192),
- ],
- [
- (128, 128, 128),
- (255, 0, 0),
- (0, 255, 0),
- (255, 255, 0),
- (0, 0, 255),
- (255, 0, 255),
- (0, 255, 255),
- (255, 255, 255),
- ],
-)
-
-MONOKAI = TerminalTheme(
- (12, 12, 12),
- (217, 217, 217),
- [
- (26, 26, 26),
- (244, 0, 95),
- (152, 224, 36),
- (253, 151, 31),
- (157, 101, 255),
- (244, 0, 95),
- (88, 209, 235),
- (196, 197, 181),
- (98, 94, 76),
- ],
- [
- (244, 0, 95),
- (152, 224, 36),
- (224, 213, 97),
- (157, 101, 255),
- (244, 0, 95),
- (88, 209, 235),
- (246, 246, 239),
- ],
-)
-DIMMED_MONOKAI = TerminalTheme(
- (25, 25, 25),
- (185, 188, 186),
- [
- (58, 61, 67),
- (190, 63, 72),
- (135, 154, 59),
- (197, 166, 53),
- (79, 118, 161),
- (133, 92, 141),
- (87, 143, 164),
- (185, 188, 186),
- (136, 137, 135),
- ],
- [
- (251, 0, 31),
- (15, 114, 47),
- (196, 112, 51),
- (24, 109, 227),
- (251, 0, 103),
- (46, 112, 109),
- (253, 255, 185),
- ],
-)
-NIGHT_OWLISH = TerminalTheme(
- (255, 255, 255),
- (64, 63, 83),
- [
- (1, 22, 39),
- (211, 66, 62),
- (42, 162, 152),
- (218, 170, 1),
- (72, 118, 214),
- (64, 63, 83),
- (8, 145, 106),
- (122, 129, 129),
- (122, 129, 129),
- ],
- [
- (247, 110, 110),
- (73, 208, 197),
- (218, 194, 107),
- (92, 167, 228),
- (105, 112, 152),
- (0, 201, 144),
- (152, 159, 177),
- ],
-)
-
-SVG_EXPORT_THEME = TerminalTheme(
- (41, 41, 41),
- (197, 200, 198),
- [
- (75, 78, 85),
- (204, 85, 90),
- (152, 168, 75),
- (208, 179, 68),
- (96, 138, 177),
- (152, 114, 159),
- (104, 160, 179),
- (197, 200, 198),
- (154, 155, 153),
- ],
- [
- (255, 38, 39),
- (0, 130, 61),
- (208, 132, 66),
- (25, 132, 233),
- (255, 44, 122),
- (57, 130, 128),
- (253, 253, 197),
- ],
-)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/typing_extensions.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/typing_extensions.py
deleted file mode 100644
index 9f1c7aa31e20a7d0ef2e6877ea325c068d50e406..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/typing_extensions.py
+++ /dev/null
@@ -1,2296 +0,0 @@
-import abc
-import collections
-import collections.abc
-import operator
-import sys
-import typing
-
-# After PEP 560, internal typing API was substantially reworked.
-# This is especially important for Protocol class which uses internal APIs
-# quite extensively.
-PEP_560 = sys.version_info[:3] >= (3, 7, 0)
-
-if PEP_560:
- GenericMeta = type
-else:
- # 3.6
- from typing import GenericMeta, _type_vars # noqa
-
-# The two functions below are copies of typing internal helpers.
-# They are needed by _ProtocolMeta
-
-
-def _no_slots_copy(dct):
- dict_copy = dict(dct)
- if '__slots__' in dict_copy:
- for slot in dict_copy['__slots__']:
- dict_copy.pop(slot, None)
- return dict_copy
-
-
-def _check_generic(cls, parameters):
- if not cls.__parameters__:
- raise TypeError(f"{cls} is not a generic class")
- alen = len(parameters)
- elen = len(cls.__parameters__)
- if alen != elen:
- raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
- f" actual {alen}, expected {elen}")
-
-
-# Please keep __all__ alphabetized within each category.
-__all__ = [
- # Super-special typing primitives.
- 'ClassVar',
- 'Concatenate',
- 'Final',
- 'ParamSpec',
- 'Self',
- 'Type',
-
- # ABCs (from collections.abc).
- 'Awaitable',
- 'AsyncIterator',
- 'AsyncIterable',
- 'Coroutine',
- 'AsyncGenerator',
- 'AsyncContextManager',
- 'ChainMap',
-
- # Concrete collection types.
- 'ContextManager',
- 'Counter',
- 'Deque',
- 'DefaultDict',
- 'OrderedDict',
- 'TypedDict',
-
- # Structural checks, a.k.a. protocols.
- 'SupportsIndex',
-
- # One-off things.
- 'Annotated',
- 'final',
- 'IntVar',
- 'Literal',
- 'NewType',
- 'overload',
- 'Protocol',
- 'runtime',
- 'runtime_checkable',
- 'Text',
- 'TypeAlias',
- 'TypeGuard',
- 'TYPE_CHECKING',
-]
-
-if PEP_560:
- __all__.extend(["get_args", "get_origin", "get_type_hints"])
-
-# 3.6.2+
-if hasattr(typing, 'NoReturn'):
- NoReturn = typing.NoReturn
-# 3.6.0-3.6.1
-else:
- class _NoReturn(typing._FinalTypingBase, _root=True):
- """Special type indicating functions that never return.
- Example::
-
- from typing import NoReturn
-
- def stop() -> NoReturn:
- raise Exception('no way')
-
- This type is invalid in other positions, e.g., ``List[NoReturn]``
- will fail in static type checkers.
- """
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("NoReturn cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("NoReturn cannot be used with issubclass().")
-
- NoReturn = _NoReturn(_root=True)
-
-# Some unconstrained type variables. These are used by the container types.
-# (These are not for export.)
-T = typing.TypeVar('T') # Any type.
-KT = typing.TypeVar('KT') # Key type.
-VT = typing.TypeVar('VT') # Value type.
-T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
-T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
-
-ClassVar = typing.ClassVar
-
-# On older versions of typing there is an internal class named "Final".
-# 3.8+
-if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
- Final = typing.Final
-# 3.7
-elif sys.version_info[:2] >= (3, 7):
- class _FinalForm(typing._SpecialForm, _root=True):
-
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(parameters,
- f'{self._name} accepts only single type')
- return typing._GenericAlias(self, (item,))
-
- Final = _FinalForm('Final',
- doc="""A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.""")
-# 3.6
-else:
- class _Final(typing._FinalTypingBase, _root=True):
- """A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.
- """
-
- __slots__ = ('__type__',)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(typing._type_check(item,
- f'{cls.__name__[1:]} accepts only single type.'),
- _root=True)
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += f'[{typing._type_repr(self.__type__)}]'
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, _Final):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- Final = _Final(_root=True)
-
-
-# 3.8+
-if hasattr(typing, 'final'):
- final = typing.final
-# 3.6-3.7
-else:
- def final(f):
- """This decorator can be used to indicate to type checkers that
- the decorated method cannot be overridden, and decorated class
- cannot be subclassed. For example:
-
- class Base:
- @final
- def done(self) -> None:
- ...
- class Sub(Base):
- def done(self) -> None: # Error reported by type checker
- ...
- @final
- class Leaf:
- ...
- class Other(Leaf): # Error reported by type checker
- ...
-
- There is no runtime checking of these properties.
- """
- return f
-
-
-def IntVar(name):
- return typing.TypeVar(name)
-
-
-# 3.8+:
-if hasattr(typing, 'Literal'):
- Literal = typing.Literal
-# 3.7:
-elif sys.version_info[:2] >= (3, 7):
- class _LiteralForm(typing._SpecialForm, _root=True):
-
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- return typing._GenericAlias(self, parameters)
-
- Literal = _LiteralForm('Literal',
- doc="""A type that can be used to indicate to type checkers
- that the corresponding value has a value literally equivalent
- to the provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to
- the value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime
- checking verifying that the parameter is actually a value
- instead of a type.""")
-# 3.6:
-else:
- class _Literal(typing._FinalTypingBase, _root=True):
- """A type that can be used to indicate to type checkers that the
- corresponding value has a value literally equivalent to the
- provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to the
- value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime checking
- verifying that the parameter is actually a value instead of a type.
- """
-
- __slots__ = ('__values__',)
-
- def __init__(self, values=None, **kwds):
- self.__values__ = values
-
- def __getitem__(self, values):
- cls = type(self)
- if self.__values__ is None:
- if not isinstance(values, tuple):
- values = (values,)
- return cls(values, _root=True)
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
-
- def _eval_type(self, globalns, localns):
- return self
-
- def __repr__(self):
- r = super().__repr__()
- if self.__values__ is not None:
- r += f'[{", ".join(map(typing._type_repr, self.__values__))}]'
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__values__))
-
- def __eq__(self, other):
- if not isinstance(other, _Literal):
- return NotImplemented
- if self.__values__ is not None:
- return self.__values__ == other.__values__
- return self is other
-
- Literal = _Literal(_root=True)
-
-
-_overload_dummy = typing._overload_dummy # noqa
-overload = typing.overload
-
-
-# This is not a real generic class. Don't use outside annotations.
-Type = typing.Type
-
-# Various ABCs mimicking those in collections.abc.
-# A few are simply re-exported for completeness.
-
-
-class _ExtensionsGenericMeta(GenericMeta):
- def __subclasscheck__(self, subclass):
- """This mimics a more modern GenericMeta.__subclasscheck__() logic
- (that does not have problems with recursion) to work around interactions
- between collections, typing, and typing_extensions on older
- versions of Python, see https://github.com/python/typing/issues/501.
- """
- if self.__origin__ is not None:
- if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
- raise TypeError("Parameterized generics cannot be used with class "
- "or instance checks")
- return False
- if not self.__extra__:
- return super().__subclasscheck__(subclass)
- res = self.__extra__.__subclasshook__(subclass)
- if res is not NotImplemented:
- return res
- if self.__extra__ in subclass.__mro__:
- return True
- for scls in self.__extra__.__subclasses__():
- if isinstance(scls, GenericMeta):
- continue
- if issubclass(subclass, scls):
- return True
- return False
-
-
-Awaitable = typing.Awaitable
-Coroutine = typing.Coroutine
-AsyncIterable = typing.AsyncIterable
-AsyncIterator = typing.AsyncIterator
-
-# 3.6.1+
-if hasattr(typing, 'Deque'):
- Deque = typing.Deque
-# 3.6.0
-else:
- class Deque(collections.deque, typing.MutableSequence[T],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.deque):
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is Deque:
- return collections.deque(*args, **kwds)
- return typing._generic_new(collections.deque, cls, *args, **kwds)
-
-ContextManager = typing.ContextManager
-# 3.6.2+
-if hasattr(typing, 'AsyncContextManager'):
- AsyncContextManager = typing.AsyncContextManager
-# 3.6.0-3.6.1
-else:
- from _collections_abc import _check_methods as _check_methods_in_mro # noqa
-
- class AsyncContextManager(typing.Generic[T_co]):
- __slots__ = ()
-
- async def __aenter__(self):
- return self
-
- @abc.abstractmethod
- async def __aexit__(self, exc_type, exc_value, traceback):
- return None
-
- @classmethod
- def __subclasshook__(cls, C):
- if cls is AsyncContextManager:
- return _check_methods_in_mro(C, "__aenter__", "__aexit__")
- return NotImplemented
-
-DefaultDict = typing.DefaultDict
-
-# 3.7.2+
-if hasattr(typing, 'OrderedDict'):
- OrderedDict = typing.OrderedDict
-# 3.7.0-3.7.2
-elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
- OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
-# 3.6
-else:
- class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.OrderedDict):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is OrderedDict:
- return collections.OrderedDict(*args, **kwds)
- return typing._generic_new(collections.OrderedDict, cls, *args, **kwds)
-
-# 3.6.2+
-if hasattr(typing, 'Counter'):
- Counter = typing.Counter
-# 3.6.0-3.6.1
-else:
- class Counter(collections.Counter,
- typing.Dict[T, int],
- metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is Counter:
- return collections.Counter(*args, **kwds)
- return typing._generic_new(collections.Counter, cls, *args, **kwds)
-
-# 3.6.1+
-if hasattr(typing, 'ChainMap'):
- ChainMap = typing.ChainMap
-elif hasattr(collections, 'ChainMap'):
- class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.ChainMap):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is ChainMap:
- return collections.ChainMap(*args, **kwds)
- return typing._generic_new(collections.ChainMap, cls, *args, **kwds)
-
-# 3.6.1+
-if hasattr(typing, 'AsyncGenerator'):
- AsyncGenerator = typing.AsyncGenerator
-# 3.6.0
-else:
- class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.abc.AsyncGenerator):
- __slots__ = ()
-
-NewType = typing.NewType
-Text = typing.Text
-TYPE_CHECKING = typing.TYPE_CHECKING
-
-
-def _gorg(cls):
- """This function exists for compatibility with old typing versions."""
- assert isinstance(cls, GenericMeta)
- if hasattr(cls, '_gorg'):
- return cls._gorg
- while cls.__origin__ is not None:
- cls = cls.__origin__
- return cls
-
-
-_PROTO_WHITELIST = ['Callable', 'Awaitable',
- 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
- 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
- 'ContextManager', 'AsyncContextManager']
-
-
-def _get_protocol_attrs(cls):
- attrs = set()
- for base in cls.__mro__[:-1]: # without object
- if base.__name__ in ('Protocol', 'Generic'):
- continue
- annotations = getattr(base, '__annotations__', {})
- for attr in list(base.__dict__.keys()) + list(annotations.keys()):
- if (not attr.startswith('_abc_') and attr not in (
- '__abstractmethods__', '__annotations__', '__weakref__',
- '_is_protocol', '_is_runtime_protocol', '__dict__',
- '__args__', '__slots__',
- '__next_in_mro__', '__parameters__', '__origin__',
- '__orig_bases__', '__extra__', '__tree_hash__',
- '__doc__', '__subclasshook__', '__init__', '__new__',
- '__module__', '_MutableMapping__marker', '_gorg')):
- attrs.add(attr)
- return attrs
-
-
-def _is_callable_members_only(cls):
- return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
-
-
-# 3.8+
-if hasattr(typing, 'Protocol'):
- Protocol = typing.Protocol
-# 3.7
-elif PEP_560:
- from typing import _collect_type_vars # noqa
-
- def _no_init(self, *args, **kwargs):
- if type(self)._is_protocol:
- raise TypeError('Protocols cannot be instantiated')
-
- class _ProtocolMeta(abc.ABCMeta):
- # This metaclass is a bit unfortunate and exists only because of the lack
- # of __instancehook__.
- def __instancecheck__(cls, instance):
- # We need this method for situations where attributes are
- # assigned in __init__.
- if ((not getattr(cls, '_is_protocol', False) or
- _is_callable_members_only(cls)) and
- issubclass(instance.__class__, cls)):
- return True
- if cls._is_protocol:
- if all(hasattr(instance, attr) and
- (not callable(getattr(cls, attr, None)) or
- getattr(instance, attr) is not None)
- for attr in _get_protocol_attrs(cls)):
- return True
- return super().__instancecheck__(instance)
-
- class Protocol(metaclass=_ProtocolMeta):
- # There is quite a lot of overlapping code with typing.Generic.
- # Unfortunately it is hard to avoid this while these live in two different
- # modules. The duplicated code will be removed when Protocol is moved to typing.
- """Base class for protocol classes. Protocol classes are defined as::
-
- class Proto(Protocol):
- def meth(self) -> int:
- ...
-
- Such classes are primarily used with static type checkers that recognize
- structural subtyping (static duck-typing), for example::
-
- class C:
- def meth(self) -> int:
- return 0
-
- def func(x: Proto) -> int:
- return x.meth()
-
- func(C()) # Passes static type check
-
- See PEP 544 for details. Protocol classes decorated with
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
- only the presence of given attributes, ignoring their type signatures.
-
- Protocol classes can be generic, they are defined as::
-
- class GenProto(Protocol[T]):
- def meth(self) -> T:
- ...
- """
- __slots__ = ()
- _is_protocol = True
-
- def __new__(cls, *args, **kwds):
- if cls is Protocol:
- raise TypeError("Type Protocol cannot be instantiated; "
- "it can only be used as a base class")
- return super().__new__(cls)
-
- @typing._tp_cache
- def __class_getitem__(cls, params):
- if not isinstance(params, tuple):
- params = (params,)
- if not params and cls is not typing.Tuple:
- raise TypeError(
- f"Parameter list to {cls.__qualname__}[...] cannot be empty")
- msg = "Parameters to generic types must be types."
- params = tuple(typing._type_check(p, msg) for p in params) # noqa
- if cls is Protocol:
- # Generic can only be subscripted with unique type variables.
- if not all(isinstance(p, typing.TypeVar) for p in params):
- i = 0
- while isinstance(params[i], typing.TypeVar):
- i += 1
- raise TypeError(
- "Parameters to Protocol[...] must all be type variables."
- f" Parameter {i + 1} is {params[i]}")
- if len(set(params)) != len(params):
- raise TypeError(
- "Parameters to Protocol[...] must all be unique")
- else:
- # Subscripting a regular Generic subclass.
- _check_generic(cls, params)
- return typing._GenericAlias(cls, params)
-
- def __init_subclass__(cls, *args, **kwargs):
- tvars = []
- if '__orig_bases__' in cls.__dict__:
- error = typing.Generic in cls.__orig_bases__
- else:
- error = typing.Generic in cls.__bases__
- if error:
- raise TypeError("Cannot inherit from plain Generic")
- if '__orig_bases__' in cls.__dict__:
- tvars = _collect_type_vars(cls.__orig_bases__)
- # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
- # If found, tvars must be a subset of it.
- # If not found, tvars is it.
- # Also check for and reject plain Generic,
- # and reject multiple Generic[...] and/or Protocol[...].
- gvars = None
- for base in cls.__orig_bases__:
- if (isinstance(base, typing._GenericAlias) and
- base.__origin__ in (typing.Generic, Protocol)):
- # for error messages
- the_base = base.__origin__.__name__
- if gvars is not None:
- raise TypeError(
- "Cannot inherit from Generic[...]"
- " and/or Protocol[...] multiple types.")
- gvars = base.__parameters__
- if gvars is None:
- gvars = tvars
- else:
- tvarset = set(tvars)
- gvarset = set(gvars)
- if not tvarset <= gvarset:
- s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
- s_args = ', '.join(str(g) for g in gvars)
- raise TypeError(f"Some type variables ({s_vars}) are"
- f" not listed in {the_base}[{s_args}]")
- tvars = gvars
- cls.__parameters__ = tuple(tvars)
-
- # Determine if this is a protocol or a concrete subclass.
- if not cls.__dict__.get('_is_protocol', None):
- cls._is_protocol = any(b is Protocol for b in cls.__bases__)
-
- # Set (or override) the protocol subclass hook.
- def _proto_hook(other):
- if not cls.__dict__.get('_is_protocol', None):
- return NotImplemented
- if not getattr(cls, '_is_runtime_protocol', False):
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
- return NotImplemented
- raise TypeError("Instance and class checks can only be used with"
- " @runtime protocols")
- if not _is_callable_members_only(cls):
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
- return NotImplemented
- raise TypeError("Protocols with non-method members"
- " don't support issubclass()")
- if not isinstance(other, type):
- # Same error as for issubclass(1, int)
- raise TypeError('issubclass() arg 1 must be a class')
- for attr in _get_protocol_attrs(cls):
- for base in other.__mro__:
- if attr in base.__dict__:
- if base.__dict__[attr] is None:
- return NotImplemented
- break
- annotations = getattr(base, '__annotations__', {})
- if (isinstance(annotations, typing.Mapping) and
- attr in annotations and
- isinstance(other, _ProtocolMeta) and
- other._is_protocol):
- break
- else:
- return NotImplemented
- return True
- if '__subclasshook__' not in cls.__dict__:
- cls.__subclasshook__ = _proto_hook
-
- # We have nothing more to do for non-protocols.
- if not cls._is_protocol:
- return
-
- # Check consistency of bases.
- for base in cls.__bases__:
- if not (base in (object, typing.Generic) or
- base.__module__ == 'collections.abc' and
- base.__name__ in _PROTO_WHITELIST or
- isinstance(base, _ProtocolMeta) and base._is_protocol):
- raise TypeError('Protocols can only inherit from other'
- f' protocols, got {repr(base)}')
- cls.__init__ = _no_init
-# 3.6
-else:
- from typing import _next_in_mro, _type_check # noqa
-
- def _no_init(self, *args, **kwargs):
- if type(self)._is_protocol:
- raise TypeError('Protocols cannot be instantiated')
-
- class _ProtocolMeta(GenericMeta):
- """Internal metaclass for Protocol.
-
- This exists so Protocol classes can be generic without deriving
- from Generic.
- """
- def __new__(cls, name, bases, namespace,
- tvars=None, args=None, origin=None, extra=None, orig_bases=None):
- # This is just a version copied from GenericMeta.__new__ that
- # includes "Protocol" special treatment. (Comments removed for brevity.)
- assert extra is None # Protocols should not have extra
- if tvars is not None:
- assert origin is not None
- assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars
- else:
- tvars = _type_vars(bases)
- gvars = None
- for base in bases:
- if base is typing.Generic:
- raise TypeError("Cannot inherit from plain Generic")
- if (isinstance(base, GenericMeta) and
- base.__origin__ in (typing.Generic, Protocol)):
- if gvars is not None:
- raise TypeError(
- "Cannot inherit from Generic[...] or"
- " Protocol[...] multiple times.")
- gvars = base.__parameters__
- if gvars is None:
- gvars = tvars
- else:
- tvarset = set(tvars)
- gvarset = set(gvars)
- if not tvarset <= gvarset:
- s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
- s_args = ", ".join(str(g) for g in gvars)
- cls_name = "Generic" if any(b.__origin__ is typing.Generic
- for b in bases) else "Protocol"
- raise TypeError(f"Some type variables ({s_vars}) are"
- f" not listed in {cls_name}[{s_args}]")
- tvars = gvars
-
- initial_bases = bases
- if (extra is not None and type(extra) is abc.ABCMeta and
- extra not in bases):
- bases = (extra,) + bases
- bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
- for b in bases)
- if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases):
- bases = tuple(b for b in bases if b is not typing.Generic)
- namespace.update({'__origin__': origin, '__extra__': extra})
- self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
- _root=True)
- super(GenericMeta, self).__setattr__('_gorg',
- self if not origin else
- _gorg(origin))
- self.__parameters__ = tvars
- self.__args__ = tuple(... if a is typing._TypingEllipsis else
- () if a is typing._TypingEmpty else
- a for a in args) if args else None
- self.__next_in_mro__ = _next_in_mro(self)
- if orig_bases is None:
- self.__orig_bases__ = initial_bases
- elif origin is not None:
- self._abc_registry = origin._abc_registry
- self._abc_cache = origin._abc_cache
- if hasattr(self, '_subs_tree'):
- self.__tree_hash__ = (hash(self._subs_tree()) if origin else
- super(GenericMeta, self).__hash__())
- return self
-
- def __init__(cls, *args, **kwargs):
- super().__init__(*args, **kwargs)
- if not cls.__dict__.get('_is_protocol', None):
- cls._is_protocol = any(b is Protocol or
- isinstance(b, _ProtocolMeta) and
- b.__origin__ is Protocol
- for b in cls.__bases__)
- if cls._is_protocol:
- for base in cls.__mro__[1:]:
- if not (base in (object, typing.Generic) or
- base.__module__ == 'collections.abc' and
- base.__name__ in _PROTO_WHITELIST or
- isinstance(base, typing.TypingMeta) and base._is_protocol or
- isinstance(base, GenericMeta) and
- base.__origin__ is typing.Generic):
- raise TypeError(f'Protocols can only inherit from other'
- f' protocols, got {repr(base)}')
-
- cls.__init__ = _no_init
-
- def _proto_hook(other):
- if not cls.__dict__.get('_is_protocol', None):
- return NotImplemented
- if not isinstance(other, type):
- # Same error as for issubclass(1, int)
- raise TypeError('issubclass() arg 1 must be a class')
- for attr in _get_protocol_attrs(cls):
- for base in other.__mro__:
- if attr in base.__dict__:
- if base.__dict__[attr] is None:
- return NotImplemented
- break
- annotations = getattr(base, '__annotations__', {})
- if (isinstance(annotations, typing.Mapping) and
- attr in annotations and
- isinstance(other, _ProtocolMeta) and
- other._is_protocol):
- break
- else:
- return NotImplemented
- return True
- if '__subclasshook__' not in cls.__dict__:
- cls.__subclasshook__ = _proto_hook
-
- def __instancecheck__(self, instance):
- # We need this method for situations where attributes are
- # assigned in __init__.
- if ((not getattr(self, '_is_protocol', False) or
- _is_callable_members_only(self)) and
- issubclass(instance.__class__, self)):
- return True
- if self._is_protocol:
- if all(hasattr(instance, attr) and
- (not callable(getattr(self, attr, None)) or
- getattr(instance, attr) is not None)
- for attr in _get_protocol_attrs(self)):
- return True
- return super(GenericMeta, self).__instancecheck__(instance)
-
- def __subclasscheck__(self, cls):
- if self.__origin__ is not None:
- if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
- raise TypeError("Parameterized generics cannot be used with class "
- "or instance checks")
- return False
- if (self.__dict__.get('_is_protocol', None) and
- not self.__dict__.get('_is_runtime_protocol', None)):
- if sys._getframe(1).f_globals['__name__'] in ['abc',
- 'functools',
- 'typing']:
- return False
- raise TypeError("Instance and class checks can only be used with"
- " @runtime protocols")
- if (self.__dict__.get('_is_runtime_protocol', None) and
- not _is_callable_members_only(self)):
- if sys._getframe(1).f_globals['__name__'] in ['abc',
- 'functools',
- 'typing']:
- return super(GenericMeta, self).__subclasscheck__(cls)
- raise TypeError("Protocols with non-method members"
- " don't support issubclass()")
- return super(GenericMeta, self).__subclasscheck__(cls)
-
- @typing._tp_cache
- def __getitem__(self, params):
- # We also need to copy this from GenericMeta.__getitem__ to get
- # special treatment of "Protocol". (Comments removed for brevity.)
- if not isinstance(params, tuple):
- params = (params,)
- if not params and _gorg(self) is not typing.Tuple:
- raise TypeError(
- f"Parameter list to {self.__qualname__}[...] cannot be empty")
- msg = "Parameters to generic types must be types."
- params = tuple(_type_check(p, msg) for p in params)
- if self in (typing.Generic, Protocol):
- if not all(isinstance(p, typing.TypeVar) for p in params):
- raise TypeError(
- f"Parameters to {repr(self)}[...] must all be type variables")
- if len(set(params)) != len(params):
- raise TypeError(
- f"Parameters to {repr(self)}[...] must all be unique")
- tvars = params
- args = params
- elif self in (typing.Tuple, typing.Callable):
- tvars = _type_vars(params)
- args = params
- elif self.__origin__ in (typing.Generic, Protocol):
- raise TypeError(f"Cannot subscript already-subscripted {repr(self)}")
- else:
- _check_generic(self, params)
- tvars = _type_vars(params)
- args = params
-
- prepend = (self,) if self.__origin__ is None else ()
- return self.__class__(self.__name__,
- prepend + self.__bases__,
- _no_slots_copy(self.__dict__),
- tvars=tvars,
- args=args,
- origin=self,
- extra=self.__extra__,
- orig_bases=self.__orig_bases__)
-
- class Protocol(metaclass=_ProtocolMeta):
- """Base class for protocol classes. Protocol classes are defined as::
-
- class Proto(Protocol):
- def meth(self) -> int:
- ...
-
- Such classes are primarily used with static type checkers that recognize
- structural subtyping (static duck-typing), for example::
-
- class C:
- def meth(self) -> int:
- return 0
-
- def func(x: Proto) -> int:
- return x.meth()
-
- func(C()) # Passes static type check
-
- See PEP 544 for details. Protocol classes decorated with
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
- only the presence of given attributes, ignoring their type signatures.
-
- Protocol classes can be generic, they are defined as::
-
- class GenProto(Protocol[T]):
- def meth(self) -> T:
- ...
- """
- __slots__ = ()
- _is_protocol = True
-
- def __new__(cls, *args, **kwds):
- if _gorg(cls) is Protocol:
- raise TypeError("Type Protocol cannot be instantiated; "
- "it can be used only as a base class")
- return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds)
-
-
-# 3.8+
-if hasattr(typing, 'runtime_checkable'):
- runtime_checkable = typing.runtime_checkable
-# 3.6-3.7
-else:
- def runtime_checkable(cls):
- """Mark a protocol class as a runtime protocol, so that it
- can be used with isinstance() and issubclass(). Raise TypeError
- if applied to a non-protocol class.
-
- This allows a simple-minded structural check very similar to the
- one-offs in collections.abc such as Hashable.
- """
- if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
- raise TypeError('@runtime_checkable can be only applied to protocol classes,'
- f' got {cls!r}')
- cls._is_runtime_protocol = True
- return cls
-
-
-# Exists for backwards compatibility.
-runtime = runtime_checkable
-
-
-# 3.8+
-if hasattr(typing, 'SupportsIndex'):
- SupportsIndex = typing.SupportsIndex
-# 3.6-3.7
-else:
- @runtime_checkable
- class SupportsIndex(Protocol):
- __slots__ = ()
-
- @abc.abstractmethod
- def __index__(self) -> int:
- pass
-
-
-if sys.version_info >= (3, 9, 2):
- # The standard library TypedDict in Python 3.8 does not store runtime information
- # about which (if any) keys are optional. See https://bugs.python.org/issue38834
- # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
- # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
- TypedDict = typing.TypedDict
-else:
- def _check_fails(cls, other):
- try:
- if sys._getframe(1).f_globals['__name__'] not in ['abc',
- 'functools',
- 'typing']:
- # Typed dicts are only for static structural subtyping.
- raise TypeError('TypedDict does not support instance and class checks')
- except (AttributeError, ValueError):
- pass
- return False
-
- def _dict_new(*args, **kwargs):
- if not args:
- raise TypeError('TypedDict.__new__(): not enough arguments')
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
- return dict(*args, **kwargs)
-
- _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
-
- def _typeddict_new(*args, total=True, **kwargs):
- if not args:
- raise TypeError('TypedDict.__new__(): not enough arguments')
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
- if args:
- typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
- elif '_typename' in kwargs:
- typename = kwargs.pop('_typename')
- import warnings
- warnings.warn("Passing '_typename' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError("TypedDict.__new__() missing 1 required positional "
- "argument: '_typename'")
- if args:
- try:
- fields, = args # allow the "_fields" keyword be passed
- except ValueError:
- raise TypeError('TypedDict.__new__() takes from 2 to 3 '
- f'positional arguments but {len(args) + 2} '
- 'were given')
- elif '_fields' in kwargs and len(kwargs) == 1:
- fields = kwargs.pop('_fields')
- import warnings
- warnings.warn("Passing '_fields' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- fields = None
-
- if fields is None:
- fields = kwargs
- elif kwargs:
- raise TypeError("TypedDict takes either a dict or keyword arguments,"
- " but not both")
-
- ns = {'__annotations__': dict(fields)}
- try:
- # Setting correct module is necessary to make typed dict classes pickleable.
- ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
- except (AttributeError, ValueError):
- pass
-
- return _TypedDictMeta(typename, (), ns, total=total)
-
- _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
- ' /, *, total=True, **kwargs)')
-
- class _TypedDictMeta(type):
- def __init__(cls, name, bases, ns, total=True):
- super().__init__(name, bases, ns)
-
- def __new__(cls, name, bases, ns, total=True):
- # Create new typed dict class object.
- # This method is called directly when TypedDict is subclassed,
- # or via _typeddict_new when TypedDict is instantiated. This way
- # TypedDict supports all three syntaxes described in its docstring.
- # Subclasses and instances of TypedDict return actual dictionaries
- # via _dict_new.
- ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
- tp_dict = super().__new__(cls, name, (dict,), ns)
-
- annotations = {}
- own_annotations = ns.get('__annotations__', {})
- own_annotation_keys = set(own_annotations.keys())
- msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
- own_annotations = {
- n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
- }
- required_keys = set()
- optional_keys = set()
-
- for base in bases:
- annotations.update(base.__dict__.get('__annotations__', {}))
- required_keys.update(base.__dict__.get('__required_keys__', ()))
- optional_keys.update(base.__dict__.get('__optional_keys__', ()))
-
- annotations.update(own_annotations)
- if total:
- required_keys.update(own_annotation_keys)
- else:
- optional_keys.update(own_annotation_keys)
-
- tp_dict.__annotations__ = annotations
- tp_dict.__required_keys__ = frozenset(required_keys)
- tp_dict.__optional_keys__ = frozenset(optional_keys)
- if not hasattr(tp_dict, '__total__'):
- tp_dict.__total__ = total
- return tp_dict
-
- __instancecheck__ = __subclasscheck__ = _check_fails
-
- TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
- TypedDict.__module__ = __name__
- TypedDict.__doc__ = \
- """A simple typed name space. At runtime it is equivalent to a plain dict.
-
- TypedDict creates a dictionary type that expects all of its
- instances to have a certain set of keys, with each key
- associated with a value of a consistent type. This expectation
- is not checked at runtime but is only enforced by type checkers.
- Usage::
-
- class Point2D(TypedDict):
- x: int
- y: int
- label: str
-
- a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
- b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
-
- assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
-
- The type info can be accessed via the Point2D.__annotations__ dict, and
- the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
- TypedDict supports two additional equivalent forms::
-
- Point2D = TypedDict('Point2D', x=int, y=int, label=str)
- Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
-
- The class syntax is only supported in Python 3.6+, while two other
- syntax forms work for Python 2.7 and 3.2+
- """
-
-
-# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
-if hasattr(typing, 'Annotated'):
- Annotated = typing.Annotated
- get_type_hints = typing.get_type_hints
- # Not exported and not a public API, but needed for get_origin() and get_args()
- # to work.
- _AnnotatedAlias = typing._AnnotatedAlias
-# 3.7-3.8
-elif PEP_560:
- class _AnnotatedAlias(typing._GenericAlias, _root=True):
- """Runtime representation of an annotated type.
-
- At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
- with extra annotations. The alias behaves like a normal typing alias,
- instantiating is the same as instantiating the underlying type, binding
- it to types is also the same.
- """
- def __init__(self, origin, metadata):
- if isinstance(origin, _AnnotatedAlias):
- metadata = origin.__metadata__ + metadata
- origin = origin.__origin__
- super().__init__(origin, origin)
- self.__metadata__ = metadata
-
- def copy_with(self, params):
- assert len(params) == 1
- new_type = params[0]
- return _AnnotatedAlias(new_type, self.__metadata__)
-
- def __repr__(self):
- return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
- f"{', '.join(repr(a) for a in self.__metadata__)}]")
-
- def __reduce__(self):
- return operator.getitem, (
- Annotated, (self.__origin__,) + self.__metadata__
- )
-
- def __eq__(self, other):
- if not isinstance(other, _AnnotatedAlias):
- return NotImplemented
- if self.__origin__ != other.__origin__:
- return False
- return self.__metadata__ == other.__metadata__
-
- def __hash__(self):
- return hash((self.__origin__, self.__metadata__))
-
- class Annotated:
- """Add context specific metadata to a type.
-
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
- hypothetical runtime_check module that this type is an unsigned int.
- Every other consumer of this type can ignore this metadata and treat
- this type as int.
-
- The first argument to Annotated must be a valid type (and will be in
- the __origin__ field), the remaining arguments are kept as a tuple in
- the __extra__ field.
-
- Details:
-
- - It's an error to call `Annotated` with less than two arguments.
- - Nested Annotated are flattened::
-
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
- - Instantiating an annotated type is equivalent to instantiating the
- underlying type::
-
- Annotated[C, Ann1](5) == C(5)
-
- - Annotated can be used as a generic type alias::
-
- Optimized = Annotated[T, runtime.Optimize()]
- Optimized[int] == Annotated[int, runtime.Optimize()]
-
- OptimizedList = Annotated[List[T], runtime.Optimize()]
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
- """
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwargs):
- raise TypeError("Type Annotated cannot be instantiated.")
-
- @typing._tp_cache
- def __class_getitem__(cls, params):
- if not isinstance(params, tuple) or len(params) < 2:
- raise TypeError("Annotated[...] should be used "
- "with at least two arguments (a type and an "
- "annotation).")
- msg = "Annotated[t, ...]: t must be a type."
- origin = typing._type_check(params[0], msg)
- metadata = tuple(params[1:])
- return _AnnotatedAlias(origin, metadata)
-
- def __init_subclass__(cls, *args, **kwargs):
- raise TypeError(
- f"Cannot subclass {cls.__module__}.Annotated"
- )
-
- def _strip_annotations(t):
- """Strips the annotations from a given type.
- """
- if isinstance(t, _AnnotatedAlias):
- return _strip_annotations(t.__origin__)
- if isinstance(t, typing._GenericAlias):
- stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
- if stripped_args == t.__args__:
- return t
- res = t.copy_with(stripped_args)
- res._special = t._special
- return res
- return t
-
- def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
- """Return type hints for an object.
-
- This is often the same as obj.__annotations__, but it handles
- forward references encoded as string literals, adds Optional[t] if a
- default value equal to None is set and recursively replaces all
- 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
-
- The argument may be a module, class, method, or function. The annotations
- are returned as a dictionary. For classes, annotations include also
- inherited members.
-
- TypeError is raised if the argument is not of a type that can contain
- annotations, and an empty dictionary is returned if no annotations are
- present.
-
- BEWARE -- the behavior of globalns and localns is counterintuitive
- (unless you are familiar with how eval() and exec() work). The
- search order is locals first, then globals.
-
- - If no dict arguments are passed, an attempt is made to use the
- globals from obj (or the respective module's globals for classes),
- and these are also used as the locals. If the object does not appear
- to have globals, an empty dictionary is used.
-
- - If one dict argument is passed, it is used for both globals and
- locals.
-
- - If two dict arguments are passed, they specify globals and
- locals, respectively.
- """
- hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
- if include_extras:
- return hint
- return {k: _strip_annotations(t) for k, t in hint.items()}
-# 3.6
-else:
-
- def _is_dunder(name):
- """Returns True if name is a __dunder_variable_name__."""
- return len(name) > 4 and name.startswith('__') and name.endswith('__')
-
- # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
- # checks, argument expansion etc. are done on the _subs_tre. As a result we
- # can't provide a get_type_hints function that strips out annotations.
-
- class AnnotatedMeta(typing.GenericMeta):
- """Metaclass for Annotated"""
-
- def __new__(cls, name, bases, namespace, **kwargs):
- if any(b is not object for b in bases):
- raise TypeError("Cannot subclass " + str(Annotated))
- return super().__new__(cls, name, bases, namespace, **kwargs)
-
- @property
- def __metadata__(self):
- return self._subs_tree()[2]
-
- def _tree_repr(self, tree):
- cls, origin, metadata = tree
- if not isinstance(origin, tuple):
- tp_repr = typing._type_repr(origin)
- else:
- tp_repr = origin[0]._tree_repr(origin)
- metadata_reprs = ", ".join(repr(arg) for arg in metadata)
- return f'{cls}[{tp_repr}, {metadata_reprs}]'
-
- def _subs_tree(self, tvars=None, args=None): # noqa
- if self is Annotated:
- return Annotated
- res = super()._subs_tree(tvars=tvars, args=args)
- # Flatten nested Annotated
- if isinstance(res[1], tuple) and res[1][0] is Annotated:
- sub_tp = res[1][1]
- sub_annot = res[1][2]
- return (Annotated, sub_tp, sub_annot + res[2])
- return res
-
- def _get_cons(self):
- """Return the class used to create instance of this type."""
- if self.__origin__ is None:
- raise TypeError("Cannot get the underlying type of a "
- "non-specialized Annotated type.")
- tree = self._subs_tree()
- while isinstance(tree, tuple) and tree[0] is Annotated:
- tree = tree[1]
- if isinstance(tree, tuple):
- return tree[0]
- else:
- return tree
-
- @typing._tp_cache
- def __getitem__(self, params):
- if not isinstance(params, tuple):
- params = (params,)
- if self.__origin__ is not None: # specializing an instantiated type
- return super().__getitem__(params)
- elif not isinstance(params, tuple) or len(params) < 2:
- raise TypeError("Annotated[...] should be instantiated "
- "with at least two arguments (a type and an "
- "annotation).")
- else:
- msg = "Annotated[t, ...]: t must be a type."
- tp = typing._type_check(params[0], msg)
- metadata = tuple(params[1:])
- return self.__class__(
- self.__name__,
- self.__bases__,
- _no_slots_copy(self.__dict__),
- tvars=_type_vars((tp,)),
- # Metadata is a tuple so it won't be touched by _replace_args et al.
- args=(tp, metadata),
- origin=self,
- )
-
- def __call__(self, *args, **kwargs):
- cons = self._get_cons()
- result = cons(*args, **kwargs)
- try:
- result.__orig_class__ = self
- except AttributeError:
- pass
- return result
-
- def __getattr__(self, attr):
- # For simplicity we just don't relay all dunder names
- if self.__origin__ is not None and not _is_dunder(attr):
- return getattr(self._get_cons(), attr)
- raise AttributeError(attr)
-
- def __setattr__(self, attr, value):
- if _is_dunder(attr) or attr.startswith('_abc_'):
- super().__setattr__(attr, value)
- elif self.__origin__ is None:
- raise AttributeError(attr)
- else:
- setattr(self._get_cons(), attr, value)
-
- def __instancecheck__(self, obj):
- raise TypeError("Annotated cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Annotated cannot be used with issubclass().")
-
- class Annotated(metaclass=AnnotatedMeta):
- """Add context specific metadata to a type.
-
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
- hypothetical runtime_check module that this type is an unsigned int.
- Every other consumer of this type can ignore this metadata and treat
- this type as int.
-
- The first argument to Annotated must be a valid type, the remaining
- arguments are kept as a tuple in the __metadata__ field.
-
- Details:
-
- - It's an error to call `Annotated` with less than two arguments.
- - Nested Annotated are flattened::
-
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
- - Instantiating an annotated type is equivalent to instantiating the
- underlying type::
-
- Annotated[C, Ann1](5) == C(5)
-
- - Annotated can be used as a generic type alias::
-
- Optimized = Annotated[T, runtime.Optimize()]
- Optimized[int] == Annotated[int, runtime.Optimize()]
-
- OptimizedList = Annotated[List[T], runtime.Optimize()]
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
- """
-
-# Python 3.8 has get_origin() and get_args() but those implementations aren't
-# Annotated-aware, so we can't use those. Python 3.9's versions don't support
-# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
-if sys.version_info[:2] >= (3, 10):
- get_origin = typing.get_origin
- get_args = typing.get_args
-# 3.7-3.9
-elif PEP_560:
- try:
- # 3.9+
- from typing import _BaseGenericAlias
- except ImportError:
- _BaseGenericAlias = typing._GenericAlias
- try:
- # 3.9+
- from typing import GenericAlias
- except ImportError:
- GenericAlias = typing._GenericAlias
-
- def get_origin(tp):
- """Get the unsubscripted version of a type.
-
- This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
- and Annotated. Return None for unsupported types. Examples::
-
- get_origin(Literal[42]) is Literal
- get_origin(int) is None
- get_origin(ClassVar[int]) is ClassVar
- get_origin(Generic) is Generic
- get_origin(Generic[T]) is Generic
- get_origin(Union[T, int]) is Union
- get_origin(List[Tuple[T, T]][int]) == list
- get_origin(P.args) is P
- """
- if isinstance(tp, _AnnotatedAlias):
- return Annotated
- if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias,
- ParamSpecArgs, ParamSpecKwargs)):
- return tp.__origin__
- if tp is typing.Generic:
- return typing.Generic
- return None
-
- def get_args(tp):
- """Get type arguments with all substitutions performed.
-
- For unions, basic simplifications used by Union constructor are performed.
- Examples::
- get_args(Dict[str, int]) == (str, int)
- get_args(int) == ()
- get_args(Union[int, Union[T, int], str][int]) == (int, str)
- get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
- get_args(Callable[[], T][int]) == ([], int)
- """
- if isinstance(tp, _AnnotatedAlias):
- return (tp.__origin__,) + tp.__metadata__
- if isinstance(tp, (typing._GenericAlias, GenericAlias)):
- if getattr(tp, "_special", False):
- return ()
- res = tp.__args__
- if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
- res = (list(res[:-1]), res[-1])
- return res
- return ()
-
-
-# 3.10+
-if hasattr(typing, 'TypeAlias'):
- TypeAlias = typing.TypeAlias
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
- class _TypeAliasForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- @_TypeAliasForm
- def TypeAlias(self, parameters):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
- raise TypeError(f"{self} is not subscriptable")
-# 3.7-3.8
-elif sys.version_info[:2] >= (3, 7):
- class _TypeAliasForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- TypeAlias = _TypeAliasForm('TypeAlias',
- doc="""Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example
- above.""")
-# 3.6
-else:
- class _TypeAliasMeta(typing.TypingMeta):
- """Metaclass for TypeAlias"""
-
- def __repr__(self):
- return 'typing_extensions.TypeAlias'
-
- class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("TypeAlias cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("TypeAlias cannot be used with issubclass().")
-
- def __repr__(self):
- return 'typing_extensions.TypeAlias'
-
- TypeAlias = _TypeAliasBase(_root=True)
-
-
-# Python 3.10+ has PEP 612
-if hasattr(typing, 'ParamSpecArgs'):
- ParamSpecArgs = typing.ParamSpecArgs
- ParamSpecKwargs = typing.ParamSpecKwargs
-# 3.6-3.9
-else:
- class _Immutable:
- """Mixin to indicate that object should not be copied."""
- __slots__ = ()
-
- def __copy__(self):
- return self
-
- def __deepcopy__(self, memo):
- return self
-
- class ParamSpecArgs(_Immutable):
- """The args for a ParamSpec object.
-
- Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
-
- ParamSpecArgs objects have a reference back to their ParamSpec:
-
- P.args.__origin__ is P
-
- This type is meant for runtime introspection and has no special meaning to
- static type checkers.
- """
- def __init__(self, origin):
- self.__origin__ = origin
-
- def __repr__(self):
- return f"{self.__origin__.__name__}.args"
-
- class ParamSpecKwargs(_Immutable):
- """The kwargs for a ParamSpec object.
-
- Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
-
- ParamSpecKwargs objects have a reference back to their ParamSpec:
-
- P.kwargs.__origin__ is P
-
- This type is meant for runtime introspection and has no special meaning to
- static type checkers.
- """
- def __init__(self, origin):
- self.__origin__ = origin
-
- def __repr__(self):
- return f"{self.__origin__.__name__}.kwargs"
-
-# 3.10+
-if hasattr(typing, 'ParamSpec'):
- ParamSpec = typing.ParamSpec
-# 3.6-3.9
-else:
-
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
- class ParamSpec(list):
- """Parameter specification variable.
-
- Usage::
-
- P = ParamSpec('P')
-
- Parameter specification variables exist primarily for the benefit of static
- type checkers. They are used to forward the parameter types of one
- callable to another callable, a pattern commonly found in higher order
- functions and decorators. They are only valid when used in ``Concatenate``,
- or s the first argument to ``Callable``. In Python 3.10 and higher,
- they are also supported in user-defined Generics at runtime.
- See class Generic for more information on generic types. An
- example for annotating a decorator::
-
- T = TypeVar('T')
- P = ParamSpec('P')
-
- def add_logging(f: Callable[P, T]) -> Callable[P, T]:
- '''A type-safe decorator to add logging to a function.'''
- def inner(*args: P.args, **kwargs: P.kwargs) -> T:
- logging.info(f'{f.__name__} was called')
- return f(*args, **kwargs)
- return inner
-
- @add_logging
- def add_two(x: float, y: float) -> float:
- '''Add two numbers together.'''
- return x + y
-
- Parameter specification variables defined with covariant=True or
- contravariant=True can be used to declare covariant or contravariant
- generic types. These keyword arguments are valid, but their actual semantics
- are yet to be decided. See PEP 612 for details.
-
- Parameter specification variables can be introspected. e.g.:
-
- P.__name__ == 'T'
- P.__bound__ == None
- P.__covariant__ == False
- P.__contravariant__ == False
-
- Note that only parameter specification variables defined in global scope can
- be pickled.
- """
-
- # Trick Generic __parameters__.
- __class__ = typing.TypeVar
-
- @property
- def args(self):
- return ParamSpecArgs(self)
-
- @property
- def kwargs(self):
- return ParamSpecKwargs(self)
-
- def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
- super().__init__([self])
- self.__name__ = name
- self.__covariant__ = bool(covariant)
- self.__contravariant__ = bool(contravariant)
- if bound:
- self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
- else:
- self.__bound__ = None
-
- # for pickling:
- try:
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
- except (AttributeError, ValueError):
- def_mod = None
- if def_mod != 'typing_extensions':
- self.__module__ = def_mod
-
- def __repr__(self):
- if self.__covariant__:
- prefix = '+'
- elif self.__contravariant__:
- prefix = '-'
- else:
- prefix = '~'
- return prefix + self.__name__
-
- def __hash__(self):
- return object.__hash__(self)
-
- def __eq__(self, other):
- return self is other
-
- def __reduce__(self):
- return self.__name__
-
- # Hack to get typing._type_check to pass.
- def __call__(self, *args, **kwargs):
- pass
-
- if not PEP_560:
- # Only needed in 3.6.
- def _get_type_vars(self, tvars):
- if self not in tvars:
- tvars.append(self)
-
-
-# 3.6-3.9
-if not hasattr(typing, 'Concatenate'):
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
- class _ConcatenateGenericAlias(list):
-
- # Trick Generic into looking into this for __parameters__.
- if PEP_560:
- __class__ = typing._GenericAlias
- else:
- __class__ = typing._TypingBase
-
- # Flag in 3.8.
- _special = False
- # Attribute in 3.6 and earlier.
- _gorg = typing.Generic
-
- def __init__(self, origin, args):
- super().__init__(args)
- self.__origin__ = origin
- self.__args__ = args
-
- def __repr__(self):
- _type_repr = typing._type_repr
- return (f'{_type_repr(self.__origin__)}'
- f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
-
- def __hash__(self):
- return hash((self.__origin__, self.__args__))
-
- # Hack to get typing._type_check to pass in Generic.
- def __call__(self, *args, **kwargs):
- pass
-
- @property
- def __parameters__(self):
- return tuple(
- tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
- )
-
- if not PEP_560:
- # Only required in 3.6.
- def _get_type_vars(self, tvars):
- if self.__origin__ and self.__parameters__:
- typing._get_type_vars(self.__parameters__, tvars)
-
-
-# 3.6-3.9
-@typing._tp_cache
-def _concatenate_getitem(self, parameters):
- if parameters == ():
- raise TypeError("Cannot take a Concatenate of no types.")
- if not isinstance(parameters, tuple):
- parameters = (parameters,)
- if not isinstance(parameters[-1], ParamSpec):
- raise TypeError("The last parameter to Concatenate should be a "
- "ParamSpec variable.")
- msg = "Concatenate[arg, ...]: each arg must be a type."
- parameters = tuple(typing._type_check(p, msg) for p in parameters)
- return _ConcatenateGenericAlias(self, parameters)
-
-
-# 3.10+
-if hasattr(typing, 'Concatenate'):
- Concatenate = typing.Concatenate
- _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
- @_TypeAliasForm
- def Concatenate(self, parameters):
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
- higher order function which adds, removes or transforms parameters of a
- callable.
-
- For example::
-
- Callable[Concatenate[int, P], int]
-
- See PEP 612 for detailed information.
- """
- return _concatenate_getitem(self, parameters)
-# 3.7-8
-elif sys.version_info[:2] >= (3, 7):
- class _ConcatenateForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- return _concatenate_getitem(self, parameters)
-
- Concatenate = _ConcatenateForm(
- 'Concatenate',
- doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
- higher order function which adds, removes or transforms parameters of a
- callable.
-
- For example::
-
- Callable[Concatenate[int, P], int]
-
- See PEP 612 for detailed information.
- """)
-# 3.6
-else:
- class _ConcatenateAliasMeta(typing.TypingMeta):
- """Metaclass for Concatenate."""
-
- def __repr__(self):
- return 'typing_extensions.Concatenate'
-
- class _ConcatenateAliasBase(typing._FinalTypingBase,
- metaclass=_ConcatenateAliasMeta,
- _root=True):
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
- higher order function which adds, removes or transforms parameters of a
- callable.
-
- For example::
-
- Callable[Concatenate[int, P], int]
-
- See PEP 612 for detailed information.
- """
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("Concatenate cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Concatenate cannot be used with issubclass().")
-
- def __repr__(self):
- return 'typing_extensions.Concatenate'
-
- def __getitem__(self, parameters):
- return _concatenate_getitem(self, parameters)
-
- Concatenate = _ConcatenateAliasBase(_root=True)
-
-# 3.10+
-if hasattr(typing, 'TypeGuard'):
- TypeGuard = typing.TypeGuard
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
- class _TypeGuardForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- @_TypeGuardForm
- def TypeGuard(self, parameters):
- """Special typing form used to annotate the return type of a user-defined
- type guard function. ``TypeGuard`` only accepts a single type argument.
- At runtime, functions marked this way should return a boolean.
-
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
- type checkers to determine a more precise type of an expression within a
- program's code flow. Usually type narrowing is done by analyzing
- conditional code flow and applying the narrowing to a block of code. The
- conditional expression here is sometimes referred to as a "type guard".
-
- Sometimes it would be convenient to use a user-defined boolean function
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
- return type to alert static type checkers to this intention.
-
- Using ``-> TypeGuard`` tells the static type checker that for a given
- function:
-
- 1. The return value is a boolean.
- 2. If the return value is ``True``, the type of its argument
- is the type inside ``TypeGuard``.
-
- For example::
-
- def is_str(val: Union[str, float]):
- # "isinstance" type guard
- if isinstance(val, str):
- # Type of ``val`` is narrowed to ``str``
- ...
- else:
- # Else, type of ``val`` is narrowed to ``float``.
- ...
-
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
- form of ``TypeA`` (it can even be a wider form) and this may lead to
- type-unsafe results. The main reason is to allow for things like
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
- a subtype of the former, since ``List`` is invariant. The responsibility of
- writing type-safe type guards is left to the user.
-
- ``TypeGuard`` also works with type variables. For more information, see
- PEP 647 (User-Defined Type Guards).
- """
- item = typing._type_check(parameters, f'{self} accepts only single type.')
- return typing._GenericAlias(self, (item,))
-# 3.7-3.8
-elif sys.version_info[:2] >= (3, 7):
- class _TypeGuardForm(typing._SpecialForm, _root=True):
-
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(parameters,
- f'{self._name} accepts only a single type')
- return typing._GenericAlias(self, (item,))
-
- TypeGuard = _TypeGuardForm(
- 'TypeGuard',
- doc="""Special typing form used to annotate the return type of a user-defined
- type guard function. ``TypeGuard`` only accepts a single type argument.
- At runtime, functions marked this way should return a boolean.
-
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
- type checkers to determine a more precise type of an expression within a
- program's code flow. Usually type narrowing is done by analyzing
- conditional code flow and applying the narrowing to a block of code. The
- conditional expression here is sometimes referred to as a "type guard".
-
- Sometimes it would be convenient to use a user-defined boolean function
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
- return type to alert static type checkers to this intention.
-
- Using ``-> TypeGuard`` tells the static type checker that for a given
- function:
-
- 1. The return value is a boolean.
- 2. If the return value is ``True``, the type of its argument
- is the type inside ``TypeGuard``.
-
- For example::
-
- def is_str(val: Union[str, float]):
- # "isinstance" type guard
- if isinstance(val, str):
- # Type of ``val`` is narrowed to ``str``
- ...
- else:
- # Else, type of ``val`` is narrowed to ``float``.
- ...
-
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
- form of ``TypeA`` (it can even be a wider form) and this may lead to
- type-unsafe results. The main reason is to allow for things like
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
- a subtype of the former, since ``List`` is invariant. The responsibility of
- writing type-safe type guards is left to the user.
-
- ``TypeGuard`` also works with type variables. For more information, see
- PEP 647 (User-Defined Type Guards).
- """)
-# 3.6
-else:
- class _TypeGuard(typing._FinalTypingBase, _root=True):
- """Special typing form used to annotate the return type of a user-defined
- type guard function. ``TypeGuard`` only accepts a single type argument.
- At runtime, functions marked this way should return a boolean.
-
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
- type checkers to determine a more precise type of an expression within a
- program's code flow. Usually type narrowing is done by analyzing
- conditional code flow and applying the narrowing to a block of code. The
- conditional expression here is sometimes referred to as a "type guard".
-
- Sometimes it would be convenient to use a user-defined boolean function
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
- return type to alert static type checkers to this intention.
-
- Using ``-> TypeGuard`` tells the static type checker that for a given
- function:
-
- 1. The return value is a boolean.
- 2. If the return value is ``True``, the type of its argument
- is the type inside ``TypeGuard``.
-
- For example::
-
- def is_str(val: Union[str, float]):
- # "isinstance" type guard
- if isinstance(val, str):
- # Type of ``val`` is narrowed to ``str``
- ...
- else:
- # Else, type of ``val`` is narrowed to ``float``.
- ...
-
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
- form of ``TypeA`` (it can even be a wider form) and this may lead to
- type-unsafe results. The main reason is to allow for things like
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
- a subtype of the former, since ``List`` is invariant. The responsibility of
- writing type-safe type guards is left to the user.
-
- ``TypeGuard`` also works with type variables. For more information, see
- PEP 647 (User-Defined Type Guards).
- """
-
- __slots__ = ('__type__',)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(typing._type_check(item,
- f'{cls.__name__[1:]} accepts only a single type.'),
- _root=True)
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += f'[{typing._type_repr(self.__type__)}]'
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, _TypeGuard):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- TypeGuard = _TypeGuard(_root=True)
-
-if hasattr(typing, "Self"):
- Self = typing.Self
-elif sys.version_info[:2] >= (3, 7):
- # Vendored from cpython typing._SpecialFrom
- class _SpecialForm(typing._Final, _root=True):
- __slots__ = ('_name', '__doc__', '_getitem')
-
- def __init__(self, getitem):
- self._getitem = getitem
- self._name = getitem.__name__
- self.__doc__ = getitem.__doc__
-
- def __getattr__(self, item):
- if item in {'__name__', '__qualname__'}:
- return self._name
-
- raise AttributeError(item)
-
- def __mro_entries__(self, bases):
- raise TypeError(f"Cannot subclass {self!r}")
-
- def __repr__(self):
- return f'typing_extensions.{self._name}'
-
- def __reduce__(self):
- return self._name
-
- def __call__(self, *args, **kwds):
- raise TypeError(f"Cannot instantiate {self!r}")
-
- def __or__(self, other):
- return typing.Union[self, other]
-
- def __ror__(self, other):
- return typing.Union[other, self]
-
- def __instancecheck__(self, obj):
- raise TypeError(f"{self} cannot be used with isinstance()")
-
- def __subclasscheck__(self, cls):
- raise TypeError(f"{self} cannot be used with issubclass()")
-
- @typing._tp_cache
- def __getitem__(self, parameters):
- return self._getitem(self, parameters)
-
- @_SpecialForm
- def Self(self, params):
- """Used to spell the type of "self" in classes.
-
- Example::
-
- from typing import Self
-
- class ReturnsSelf:
- def parse(self, data: bytes) -> Self:
- ...
- return self
-
- """
-
- raise TypeError(f"{self} is not subscriptable")
-else:
- class _Self(typing._FinalTypingBase, _root=True):
- """Used to spell the type of "self" in classes.
-
- Example::
-
- from typing import Self
-
- class ReturnsSelf:
- def parse(self, data: bytes) -> Self:
- ...
- return self
-
- """
-
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError(f"{self} cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError(f"{self} cannot be used with issubclass().")
-
- Self = _Self(_root=True)
-
-
-if hasattr(typing, 'Required'):
- Required = typing.Required
- NotRequired = typing.NotRequired
-elif sys.version_info[:2] >= (3, 9):
- class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- @_ExtensionsSpecialForm
- def Required(self, parameters):
- """A special typing construct to mark a key of a total=False TypedDict
- as required. For example:
-
- class Movie(TypedDict, total=False):
- title: Required[str]
- year: int
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
-
- There is no runtime checking that a required key is actually provided
- when instantiating a related TypedDict.
- """
- item = typing._type_check(parameters, f'{self._name} accepts only single type')
- return typing._GenericAlias(self, (item,))
-
- @_ExtensionsSpecialForm
- def NotRequired(self, parameters):
- """A special typing construct to mark a key of a TypedDict as
- potentially missing. For example:
-
- class Movie(TypedDict):
- title: str
- year: NotRequired[int]
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
- """
- item = typing._type_check(parameters, f'{self._name} accepts only single type')
- return typing._GenericAlias(self, (item,))
-
-elif sys.version_info[:2] >= (3, 7):
- class _RequiredForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(parameters,
- '{} accepts only single type'.format(self._name))
- return typing._GenericAlias(self, (item,))
-
- Required = _RequiredForm(
- 'Required',
- doc="""A special typing construct to mark a key of a total=False TypedDict
- as required. For example:
-
- class Movie(TypedDict, total=False):
- title: Required[str]
- year: int
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
-
- There is no runtime checking that a required key is actually provided
- when instantiating a related TypedDict.
- """)
- NotRequired = _RequiredForm(
- 'NotRequired',
- doc="""A special typing construct to mark a key of a TypedDict as
- potentially missing. For example:
-
- class Movie(TypedDict):
- title: str
- year: NotRequired[int]
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
- """)
-else:
- # NOTE: Modeled after _Final's implementation when _FinalTypingBase available
- class _MaybeRequired(typing._FinalTypingBase, _root=True):
- __slots__ = ('__type__',)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(typing._type_check(item,
- '{} accepts only single type.'.format(cls.__name__[1:])),
- _root=True)
- raise TypeError('{} cannot be further subscripted'
- .format(cls.__name__[1:]))
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += '[{}]'.format(typing._type_repr(self.__type__))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, type(self)):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- class _Required(_MaybeRequired, _root=True):
- """A special typing construct to mark a key of a total=False TypedDict
- as required. For example:
-
- class Movie(TypedDict, total=False):
- title: Required[str]
- year: int
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
-
- There is no runtime checking that a required key is actually provided
- when instantiating a related TypedDict.
- """
-
- class _NotRequired(_MaybeRequired, _root=True):
- """A special typing construct to mark a key of a TypedDict as
- potentially missing. For example:
-
- class Movie(TypedDict):
- title: str
- year: NotRequired[int]
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
- """
-
- Required = _Required(_root=True)
- NotRequired = _NotRequired(_root=True)
diff --git a/spaces/BreadBytes1/SB-Dashboard/old_app.py b/spaces/BreadBytes1/SB-Dashboard/old_app.py
deleted file mode 100644
index 0dc0999378e781020db83d863734a7f2cdc6fc41..0000000000000000000000000000000000000000
--- a/spaces/BreadBytes1/SB-Dashboard/old_app.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# ---
-# jupyter:
-# jupytext:
-# text_representation:
-# extension: .py
-# format_name: light
-# format_version: '1.5'
-# jupytext_version: 1.14.2
-# kernelspec:
-# display_name: Python [conda env:bbytes] *
-# language: python
-# name: conda-env-bbytes-py
-# ---
-
-# +
-import csv
-import pandas as pd
-from datetime import datetime, timedelta
-import numpy as np
-import datetime as dt
-import matplotlib.pyplot as plt
-from pathlib import Path
-
-import streamlit as st
-import plotly.express as px
-import altair as alt
-import dateutil.parser
-import copy
-
-
-# +
-@st.experimental_memo
-def get_hist_info(df_coin, principal_balance,plheader):
- numtrades = int(len(df_coin))
- numwin = int(sum(df_coin[plheader] > 0))
- numloss = int(sum(df_coin[plheader] < 0))
- winrate = int(np.round(100*numwin/numtrades,2))
-
- grosswin = sum(df_coin[df_coin[plheader] > 0][plheader])
- grossloss = sum(df_coin[df_coin[plheader] < 0][plheader])
- if grossloss !=0:
- pfactor = -1*np.round(grosswin/grossloss,2)
- else:
- pfactor = np.nan
- return numtrades, numwin, numloss, winrate, pfactor
-@st.experimental_memo
-def get_rolling_stats(df, lev, otimeheader, days):
- max_roll = (df[otimeheader].max() - df[otimeheader].min()).days
-
- if max_roll >= days:
- rollend = df[otimeheader].max()-timedelta(days=days)
- rolling_df = df[df[otimeheader] >= rollend]
-
- if len(rolling_df) > 0:
- rolling_perc = rolling_df['Return Per Trade'].dropna().cumprod().values[-1]-1
- else:
- rolling_perc = np.nan
- else:
- rolling_perc = np.nan
- return 100*rolling_perc
-
-@st.experimental_memo
-def filt_df(df, cheader, symbol_selections):
- """
- Inputs: df (pd.DataFrame), cheader (str) and symbol_selections (list[str]).
-
- Returns a filtered pd.DataFrame containing only data that matches symbol_selections (list[str])
- from df[cheader].
- """
-
- df = df.copy()
- df = df[df[cheader].isin(symbol_selections)]
-
- return df
-
-@st.experimental_memo
-def my_style(v, props=''):
- props = 'color:red' if v < 0 else 'color:green'
- return props
-
-@st.cache(ttl=24*3600, allow_output_mutation=True)
-def load_data(filename, otimeheader,fmat):
- df = pd.read_csv(open(filename,'r'), sep='\t') # so as not to mutate cached value
- df.columns = ['Trade','Signal','Entry Date','Buy Price', 'Sell Price','Exit Date', 'P/L per token', 'P/L %']
-
- df['Buy Price'] = df['Buy Price'].str.replace('$', '', regex=True)
- df['Sell Price'] = df['Sell Price'].str.replace('$', '', regex=True)
- df['Buy Price'] = df['Buy Price'].str.replace(',', '', regex=True)
- df['Sell Price'] = df['Sell Price'].str.replace(',', '', regex=True)
- df['P/L per token'] = df['P/L per token'].str.replace('$', '', regex=True)
- df['P/L per token'] = df['P/L per token'].str.replace(',', '', regex=True)
- df['P/L %'] = df['P/L %'].str.replace('%', '', regex=True)
-
- df['Buy Price'] = pd.to_numeric(df['Buy Price'])
- df['Sell Price'] = pd.to_numeric(df['Sell Price'])
- df['P/L per token'] = pd.to_numeric(df['P/L per token'])
- df['P/L %'] = pd.to_numeric(df['P/L %'])
-
- dateheader = 'Date'
- theader = 'Time'
-
- df[dateheader] = [tradetimes.split(" ")[0] for tradetimes in df[otimeheader].values]
- df[theader] = [tradetimes.split(" ")[1] for tradetimes in df[otimeheader].values]
-
- df[otimeheader]= [dateutil.parser.parse(date+' '+time)
- for date,time in zip(df[dateheader],df[theader])]
-
- df[otimeheader] = pd.to_datetime(df[otimeheader])
- df['Exit Date'] = pd.to_datetime(df['Exit Date'])
- df.sort_values(by=otimeheader, inplace=True)
-
- df[dateheader] = [dateutil.parser.parse(date).date() for date in df[dateheader]]
- df[theader] = [dateutil.parser.parse(time).time() for time in df[theader]]
- df['Trade'] = [i+1 for i in range(len(df))] #reindex
-
- return df
-
-def runapp():
- bot_selections = "Short Bread"
- otimeheader = 'Entry Date'
- plheader = 'Calculated Return %'
- fmat = '%Y-%m-%d %H:%M:%S'
- dollar_cap = 100000.00
- fees = .075/100
- st.header(f"{bot_selections} Performance Dashboard :bread: :moneybag:")
- st.write("Welcome to the Trading Bot Dashboard by BreadBytes! You can use this dashboard to track " +
- "the performance of our trading bots.")
- # st.sidebar.header("FAQ")
-
- # with st.sidebar.subheader("FAQ"):
- # st.write(Path("FAQ_README.md").read_text())
- st.subheader("Choose your settings:")
- no_errors = True
-
- data = load_data("SB-Trade-Log.csv",otimeheader,fmat)
- df = data.copy(deep=True)
-
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
- 'Sell Price' : 'max',
- 'P/L per token': 'mean',
- 'P/L %':lambda x: np.round(x.sum()/4,2)})
- grouped_df.index = range(1, len(grouped_df)+1)
- grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
- 'P/L per token':'Avg. P/L per token'}, inplace=True)
-
- dateheader = 'Date'
- theader = 'Time'
-
- with st.form("user input"):
- if no_errors:
- with st.container():
- col1, col2 = st.columns(2)
- with col1:
- try:
- startdate = st.date_input("Start Date", value=pd.to_datetime(df[otimeheader]).min())
- except:
- st.error("Please select your exchange or upload a supported trade log file.")
- no_errors = False
- with col2:
- try:
- enddate = st.date_input("End Date", value=datetime.today())
- except:
- st.error("Please select your exchange or upload a supported trade log file.")
- no_errors = False
- #st.sidebar.subheader("Customize your Dashboard")
-
- if no_errors and (enddate < startdate):
- st.error("End Date must be later than Start date. Please try again.")
- no_errors = False
- with st.container():
- col1,col2 = st.columns(2)
- with col2:
- lev = st.number_input('Leverage', min_value=1, value=1, max_value= 5, step=1)
- with col1:
- principal_balance = st.number_input('Starting Balance', min_value=0.00, value=1000.00, max_value= dollar_cap, step=.01)
-
- #hack way to get button centered
- c = st.columns(9)
- with c[4]:
- submitted = st.form_submit_button("Get Cookin'!")
-
- signal_map = {'Long': 1, 'Short':-1} # 1 for long #-1 for short
-
- df['Calculated Return %'] = (1-fees)*(df['Signal'].map(signal_map)*(df['Sell Price']-df['Buy Price'])/df['Buy Price'] - fees) #accounts for fees on open and close of trade
-
-
- if submitted and principal_balance * lev > dollar_cap:
- lev = np.floor(dollar_cap/principal_balance)
- st.error(f"WARNING: (Starting Balance)*(Leverage) exceeds the ${dollar_cap} limit. Using maximum available leverage of {lev}")
-
- if submitted and no_errors:
- df = df[(df[dateheader] >= startdate) & (df[dateheader] <= enddate)]
-
- if len(df) == 0:
- st.error("There are no available trades matching your selections. Please try again!")
- no_errors = False
- if no_errors:
- df['Return Per Trade'] = 1+lev*df['Calculated Return %'].values
-
- df['Compounded Return'] = df['Return Per Trade'].cumprod()
- df['New Balance'] = [min(dollar_cap/lev, bal*principal_balance) for bal in df['Compounded Return']]
- df['Balance used in Trade'] = np.concatenate([[principal_balance], df['New Balance'].values[:-1]])
- df['Net P/L Per Trade'] = (df['Return Per Trade']-1)*df['Balance used in Trade']
- df['Cumulative P/L'] = df['Net P/L Per Trade'].cumsum()
-
- cum_pl = df.loc[df.dropna().index[-1],'Cumulative P/L'] + principal_balance
-
- effective_return = 100*((cum_pl - principal_balance)/principal_balance)
-
- st.header(f"{bot_selections} Results")
- if len(bot_selections) > 1:
- st.metric(
- "Total Account Balance",
- f"${cum_pl:.2f}",
- f"{100*(cum_pl-principal_balance)/(principal_balance):.2f} %",
- )
-
- st.line_chart(data=df.dropna(), x='Exit Date', y='Cumulative P/L', use_container_width=True)
-
- df['Per Trade Return Rate'] = df['Return Per Trade']-1
-
- totals = pd.DataFrame([], columns = ['# of Trades', 'Wins', 'Losses', 'Win Rate', 'Profit Factor'])
- data = get_hist_info(df.dropna(), principal_balance,'Calculated Return %')
- totals.loc[len(totals)] = list(i for i in data)
-
- totals['Cum. P/L'] = cum_pl-principal_balance
- totals['Cum. P/L (%)'] = 100*(cum_pl-principal_balance)/principal_balance
- #results_df['Avg. P/L'] = (cum_pl-principal_balance)/results_df['# of Trades'].values[0]
- #results_df['Avg. P/L (%)'] = 100*results_df['Avg. P/L'].values[0]/principal_balance
-
- if df.empty:
- st.error("Oops! None of the data provided matches your selection(s). Please try again.")
- else:
- #st.dataframe(totals.style.format({'# of Trades': '{:.0f}','Wins': '{:.0f}','Losses': '{:.0f}','Win Rate': '{:.2f}%','Profit Factor' : '{:.2f}', 'Avg. P/L (%)': '{:.2f}%', 'Cum. P/L (%)': '{:.2f}%', 'Cum. P/L': '{:.2f}', 'Avg. P/L': '{:.2f}'})
- #.text_gradient(subset=['Win Rate'],cmap="RdYlGn", vmin = 0, vmax = 100)\
- #.text_gradient(subset=['Profit Factor'],cmap="RdYlGn", vmin = 0, vmax = 2), use_container_width=True)
- for row in totals.itertuples():
- col1, col2, col3, col4 = st.columns(4)
- c1, c2, c3, c4 = st.columns(4)
- with col1:
- st.metric(
- "Total Trades",
- f"{row._1:.0f}",
- )
- with c1:
- st.metric(
- "Profit Factor",
- f"{row._5:.2f}",
- )
- with col2:
- st.metric(
- "Wins",
- f"{row.Wins:.0f}",
- )
- with c2:
- st.metric(
- "Cumulative P/L",
- f"${row._6:.2f}",
- f"{row._7:.2f} %",
- )
- with col3:
- st.metric(
- "Losses",
- f"{row.Losses:.0f}",
- )
- with c3:
- st.metric(
- "Rolling 7 Days",
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
- f"{get_rolling_stats(df,lev, otimeheader, 7):.2f}%",
- )
- st.metric(
- "Rolling 30 Days",
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
- f"{get_rolling_stats(df,lev, otimeheader, 30):.2f}%",
- )
-
- with col4:
- st.metric(
- "Win Rate",
- f"{row._4:.1f}%",
- )
- with c4:
- st.metric(
- "Rolling 90 Days",
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
- f"{get_rolling_stats(df,lev, otimeheader, 90):.2f}%",
- )
- st.metric(
- "Rolling 180 Days",
- "",#f"{(1+get_rolling_stats(df,otimeheader, 30))*principal_balance:.2f}",
- f"{get_rolling_stats(df,lev, otimeheader, 180):.2f}%",
- )
- if submitted:
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
- 'Sell Price' : 'max',
- 'Net P/L Per Trade': 'mean',
- 'Calculated Return %' : lambda x: np.round(100*lev*x.sum(),3)})
- grouped_df.index = range(1, len(grouped_df)+1)
- grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
- 'Net P/L Per Trade':'Net P/L',
- 'Calculated Return %':'P/L %'}, inplace=True)
- else:
- grouped_df = df.groupby('Exit Date').agg({'Signal':'min','Entry Date': 'min','Exit Date': 'max','Buy Price': 'mean',
- 'Sell Price' : 'max',
- 'P/L per token': 'mean',
- 'Calculated Return %' : lambda x: np.round(100*x.sum(),3)})
- grouped_df.index = range(1, len(grouped_df)+1)
- grouped_df.rename(columns={'Buy Price':'Avg. Buy Price',
- 'P/L per token':'Net P/L',
- 'Calculated Return %':'P/L %'}, inplace=True)
- st.subheader("Trade Logs")
- grouped_df['Entry Date'] = pd.to_datetime(grouped_df['Entry Date'])
- grouped_df['Exit Date'] = pd.to_datetime(grouped_df['Exit Date'])
- st.dataframe(grouped_df.style.format({'Entry Date':'{:%m-%d-%Y %H:%M:%S}','Exit Date':'{:%m-%d-%Y %H:%M:%S}','Avg. Buy Price': '${:.2f}', 'Sell Price': '${:.2f}', 'Net P/L':'${:.3f}', 'P/L %':'{:.2f}%'})\
- .applymap(my_style,subset=['Net P/L'])\
- .applymap(my_style,subset=['P/L %']), use_container_width=True)
-
-if __name__ == "__main__":
- st.set_page_config(
- "Trading Bot Dashboard",
- layout="wide",
- )
- runapp()
-# -
-
-
diff --git a/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/README.md b/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/README.md
deleted file mode 100644
index 66f8d2cea00f6ab3c723ab0c5362bfa02269b227..0000000000000000000000000000000000000000
--- a/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: CVMX Jaca Tonos
-emoji: ⚡
-colorFrom: green
-colorTo: indigo
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: mit
----
-
-
-# CVMX-jaca-tonos
-un proyecto de identificación de idiomas para el jacatón de Common Voice MX
-https://huggingface.co/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases
\ No newline at end of file
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/dev/run_instant_tests.sh b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/dev/run_instant_tests.sh
deleted file mode 100644
index a53785180974a70bce7fdb0c9da4024166efd596..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/dev/run_instant_tests.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash -e
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-
-BIN="python train_net.py"
-OUTPUT="instant_test_output"
-NUM_GPUS=2
-SOLVER_IMS_PER_BATCH=$((NUM_GPUS * 2))
-
-CFG_LIST=( "${@:1}" )
-if [ ${#CFG_LIST[@]} -eq 0 ]; then
- CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml )
-fi
-
-echo "========================================================================"
-echo "Configs to run:"
-echo "${CFG_LIST[@]}"
-echo "========================================================================"
-
-for cfg in "${CFG_LIST[@]}"; do
- echo "========================================================================"
- echo "Running $cfg ..."
- echo "========================================================================"
- $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \
- SOLVER.IMS_PER_BATCH $SOLVER_IMS_PER_BATCH \
- OUTPUT_DIR "$OUTPUT"
- rm -rf "$OUTPUT"
-done
-
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/clevr/eval/result_eval.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/clevr/eval/result_eval.py
deleted file mode 100644
index e813b636ea5ed68c1bcce40637f0f0d80066df1e..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/clevr/eval/result_eval.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# --------------------------------------------------------
-
-import json, pickle
-import numpy as np
-from collections import defaultdict
-
-
-def eval(__C, dataset, ans_ix_list, pred_list, result_eval_file, ensemble_file, log_file, valid=False):
- result_eval_file = result_eval_file + '.txt'
-
- ans_size = dataset.ans_size
-
- result_eval_file_fs = open(result_eval_file, 'w')
- for qix in range(dataset.data_size):
- result_eval_file_fs.write(dataset.ix_to_ans[ans_ix_list[qix]])
- result_eval_file_fs.write("\n")
- result_eval_file_fs.close()
-
-
- if __C.TEST_SAVE_PRED:
- print('Save the prediction vector to file: {}'.format(ensemble_file))
-
- pred_list = np.array(pred_list).reshape(-1, ans_size)
- result_pred = [{
- 'pred': pred_list[qix],
- 'qid': qix
- } for qix in range(dataset.data_size)]
- pickle.dump(result_pred, open(ensemble_file, 'wb+'), protocol=-1)
-
-
- if valid:
- ques_file_path = __C.RAW_PATH[__C.DATASET][__C.SPLIT['val']]
-
- true_answers = []
- with open(ques_file_path, 'r') as f:
- questions = json.load(f)['questions']
- for ques in questions:
- true_answers.append(ques['answer'])
-
- correct_by_q_type = defaultdict(list)
-
- # Load predicted answers
- predicted_answers = []
- with open(result_eval_file, 'r') as f:
- for line in f:
- predicted_answers.append(line.strip())
-
- num_true, num_pred = len(true_answers), len(predicted_answers)
- assert num_true == num_pred, 'Expected %d answers but got %d' % (
- num_true, num_pred)
-
- for i, (true_answer, predicted_answer) in enumerate(zip(true_answers, predicted_answers)):
- correct = 1 if true_answer == predicted_answer else 0
- correct_by_q_type['Overall'].append(correct)
- q_type = questions[i]['program'][-1]['function']
- correct_by_q_type[q_type].append(correct)
-
- print('Write to log file: {}'.format(log_file))
- logfile = open(log_file, 'a+')
- q_dict = {}
- for q_type, vals in sorted(correct_by_q_type.items()):
- vals = np.asarray(vals)
- q_dict[q_type] = [vals.sum(), vals.shape[0]]
- # print(q_type, '%d / %d = %.2f' % (vals.sum(), vals.shape[0], 100.0 * vals.mean()))
- # logfile.write(q_type + ' : ' + '%d / %d = %.2f\n' % (vals.sum(), vals.shape[0], 100.0 * vals.mean()))
-
- # Score Summary
- score_type = ['Overall', 'Count', 'Exist', 'Compare_Numbers', 'Query_Attribute', 'Compare_Attribute']
- compare_numbers_type = ['greater_than', 'less_than']
- query_attribute_type = ['query_color', 'query_material', 'query_shape', 'query_size']
- compare_attribute_type = ['equal_color', 'equal_integer', 'equal_material', 'equal_shape', 'equal_size']
- score_dict = {}
- score_dict['Overall'] = q_dict['Overall']
- score_dict['Count'] = q_dict['count']
- score_dict['Exist'] = q_dict['exist']
-
- correct_num, total_num = 0, 0
- for q_type in compare_numbers_type:
- correct_num += q_dict[q_type][0]
- total_num += q_dict[q_type][1]
- score_dict['Compare_Numbers'] = [correct_num, total_num]
-
- correct_num, total_num = 0, 0
- for q_type in query_attribute_type:
- correct_num += q_dict[q_type][0]
- total_num += q_dict[q_type][1]
- score_dict['Query_Attribute'] = [correct_num, total_num]
-
- correct_num, total_num = 0, 0
- for q_type in compare_attribute_type:
- correct_num += q_dict[q_type][0]
- total_num += q_dict[q_type][1]
- score_dict['Compare_Attribute'] = [correct_num, total_num]
-
- for q_type in score_type:
- val, tol = score_dict[q_type]
- print(q_type, '%d / %d = %.2f' % (val, tol, 100.0 * val / tol))
- logfile.write(q_type + ' : ' + '%d / %d = %.2f\n' % (val, tol, 100.0 * val / tol))
-
- logfile.write("\n")
- logfile.close()
-
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/future.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/future.h
deleted file mode 100644
index fc2986f8b2238d9aca56db1a99c7dc6b0d7fd259..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/future.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (c) 2018 NVIDIA Corporation
-// Author: Bryce Adelstein Lelbach
-//
-// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
-
-#pragma once
-
-#include
-#include
-#include
-
-#if THRUST_CPP_DIALECT >= 2011 && !defined(THRUST_LEGACY_GCC)
-
-#include
-#include
-
-namespace thrust
-{
-
-namespace system { namespace cuda
-{
-
-struct ready_event;
-
-template
-struct ready_future;
-
-struct unique_eager_event;
-
-template
-struct unique_eager_future;
-
-template
-__host__
-unique_eager_event when_all(Events&&... evs);
-
-}} // namespace system::cuda
-
-namespace cuda
-{
-
-using thrust::system::cuda::ready_event;
-
-using thrust::system::cuda::ready_future;
-
-using thrust::system::cuda::unique_eager_event;
-using event = unique_eager_event;
-
-using thrust::system::cuda::unique_eager_future;
-template using future = unique_eager_future;
-
-using thrust::system::cuda::when_all;
-
-} // namespace cuda
-
-template
-__host__
-thrust::cuda::unique_eager_event
-unique_eager_event_type(
- thrust::cuda::execution_policy const&
-) noexcept;
-
-template
-__host__
-thrust::cuda::unique_eager_future
-unique_eager_future_type(
- thrust::cuda::execution_policy const&
-) noexcept;
-
-} // end namespace thrust
-
-#include
-
-#endif
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory_resource.h b/spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory_resource.h
deleted file mode 100644
index de664eb9374904d92984bb7626dbeb6d3d8e8df8..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/memory_resource.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright 2018 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*! \file tbb/memory_resource.h
- * \brief Memory resources for the TBB system.
- */
-
-#pragma once
-
-#include
-#include
-#include
-
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace tbb
-{
-
-//! \cond
-namespace detail
-{
- typedef thrust::mr::fancy_pointer_resource<
- thrust::mr::new_delete_resource,
- thrust::tbb::pointer
- > native_resource;
-}
-//! \endcond
-
-/*! \addtogroup memory_resources Memory Resources
- * \ingroup memory_management_classes
- * \{
- */
-
-/*! The memory resource for the TBB system. Uses \p mr::new_delete_resource and tags it with \p tbb::pointer. */
-typedef detail::native_resource memory_resource;
-/*! An alias for \p tbb::memory_resource. */
-typedef detail::native_resource universal_memory_resource;
-/*! An alias for \p tbb::memory_resource. */
-typedef detail::native_resource universal_host_pinned_memory_resource;
-
-/*! \}
- */
-
-}
-}
-}
diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
deleted file mode 100644
index b2b88e8c46f19b6db0933163e57ccdb51180f517..0000000000000000000000000000000000000000
--- a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*!
-**************************************************************************************************
-* Deformable DETR
-* Copyright (c) 2020 SenseTime. All Rights Reserved.
-* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-**************************************************************************************************
-* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
-**************************************************************************************************
-*/
-
-#pragma once
-#include
-
-namespace groundingdino {
-
-at::Tensor
-ms_deform_attn_cpu_forward(
- const at::Tensor &value,
- const at::Tensor &spatial_shapes,
- const at::Tensor &level_start_index,
- const at::Tensor &sampling_loc,
- const at::Tensor &attn_weight,
- const int im2col_step);
-
-std::vector
-ms_deform_attn_cpu_backward(
- const at::Tensor &value,
- const at::Tensor &spatial_shapes,
- const at::Tensor &level_start_index,
- const at::Tensor &sampling_loc,
- const at::Tensor &attn_weight,
- const at::Tensor &grad_output,
- const int im2col_step);
-
-} // namespace groundingdino
diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/utils.py b/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
deleted file mode 100644
index 5bd18f70225e12b2e27fdb4eabcde91d959f8e31..0000000000000000000000000000000000000000
--- a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-
-import copy
-import math
-
-import torch
-import torch.nn.functional as F
-from torch import Tensor, nn
-
-
-def _get_clones(module, N, layer_share=False):
- # import ipdb; ipdb.set_trace()
- if layer_share:
- return nn.ModuleList([module for i in range(N)])
- else:
- return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
-
-
-def get_sine_pos_embed(
- pos_tensor: torch.Tensor,
- num_pos_feats: int = 128,
- temperature: int = 10000,
- exchange_xy: bool = True,
-):
- """generate sine position embedding from a position tensor
- Args:
- pos_tensor (torch.Tensor): shape: [..., n].
- num_pos_feats (int): projected shape for each float in the tensor.
- temperature (int): temperature in the sine/cosine function.
- exchange_xy (bool, optional): exchange pos x and pos y. \
- For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True.
- Returns:
- pos_embed (torch.Tensor): shape: [..., n*num_pos_feats].
- """
- scale = 2 * math.pi
- dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
- dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
-
- def sine_func(x: torch.Tensor):
- sin_x = x * scale / dim_t
- sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
- return sin_x
-
- pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
- if exchange_xy:
- pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
- pos_res = torch.cat(pos_res, dim=-1)
- return pos_res
-
-
-def gen_encoder_output_proposals(
- memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None
-):
- """
- Input:
- - memory: bs, \sum{hw}, d_model
- - memory_padding_mask: bs, \sum{hw}
- - spatial_shapes: nlevel, 2
- - learnedwh: 2
- Output:
- - output_memory: bs, \sum{hw}, d_model
- - output_proposals: bs, \sum{hw}, 4
- """
- N_, S_, C_ = memory.shape
- proposals = []
- _cur = 0
- for lvl, (H_, W_) in enumerate(spatial_shapes):
- mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1)
- valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
- valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
-
- # import ipdb; ipdb.set_trace()
-
- grid_y, grid_x = torch.meshgrid(
- torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
- torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device),
- )
- grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
-
- scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
- grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
-
- if learnedwh is not None:
- # import ipdb; ipdb.set_trace()
- wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
- else:
- wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
-
- # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)
- # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
- # wh = torch.ones_like(grid) / scale
- proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
- proposals.append(proposal)
- _cur += H_ * W_
- # import ipdb; ipdb.set_trace()
- output_proposals = torch.cat(proposals, 1)
- output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(
- -1, keepdim=True
- )
- output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
- output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf"))
- output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
-
- output_memory = memory
- output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
- output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
-
- # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
- # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))
-
- return output_memory, output_proposals
-
-
-class RandomBoxPerturber:
- def __init__(
- self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2
- ) -> None:
- self.noise_scale = torch.Tensor(
- [x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale]
- )
-
- def __call__(self, refanchors: Tensor) -> Tensor:
- nq, bs, query_dim = refanchors.shape
- device = refanchors.device
-
- noise_raw = torch.rand_like(refanchors)
- noise_scale = self.noise_scale.to(device)[:query_dim]
-
- new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale)
- return new_refanchors.clamp_(0, 1)
-
-
-def sigmoid_focal_loss(
- inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False
-):
- """
- Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
- Args:
- inputs: A float tensor of arbitrary shape.
- The predictions for each example.
- targets: A float tensor with the same shape as inputs. Stores the binary
- classification label for each element in inputs
- (0 for the negative class and 1 for the positive class).
- alpha: (optional) Weighting factor in range (0,1) to balance
- positive vs negative examples. Default = -1 (no weighting).
- gamma: Exponent of the modulating factor (1 - p_t) to
- balance easy vs hard examples.
- Returns:
- Loss tensor
- """
- prob = inputs.sigmoid()
- ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
- p_t = prob * targets + (1 - prob) * (1 - targets)
- loss = ce_loss * ((1 - p_t) ** gamma)
-
- if alpha >= 0:
- alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
- loss = alpha_t * loss
-
- if no_reduction:
- return loss
-
- return loss.mean(1).sum() / num_boxes
-
-
-class MLP(nn.Module):
- """Very simple multi-layer perceptron (also called FFN)"""
-
- def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
- super().__init__()
- self.num_layers = num_layers
- h = [hidden_dim] * (num_layers - 1)
- self.layers = nn.ModuleList(
- nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
- )
-
- def forward(self, x):
- for i, layer in enumerate(self.layers):
- x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
- return x
-
-
-def _get_activation_fn(activation, d_model=256, batch_dim=0):
- """Return an activation function given a string"""
- if activation == "relu":
- return F.relu
- if activation == "gelu":
- return F.gelu
- if activation == "glu":
- return F.glu
- if activation == "prelu":
- return nn.PReLU()
- if activation == "selu":
- return F.selu
-
- raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
-
-
-def gen_sineembed_for_position(pos_tensor):
- # n_query, bs, _ = pos_tensor.size()
- # sineembed_tensor = torch.zeros(n_query, bs, 256)
- scale = 2 * math.pi
- dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
- dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128)
- x_embed = pos_tensor[:, :, 0] * scale
- y_embed = pos_tensor[:, :, 1] * scale
- pos_x = x_embed[:, :, None] / dim_t
- pos_y = y_embed[:, :, None] / dim_t
- pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
- pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
- if pos_tensor.size(-1) == 2:
- pos = torch.cat((pos_y, pos_x), dim=2)
- elif pos_tensor.size(-1) == 4:
- w_embed = pos_tensor[:, :, 2] * scale
- pos_w = w_embed[:, :, None] / dim_t
- pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
-
- h_embed = pos_tensor[:, :, 3] * scale
- pos_h = h_embed[:, :, None] / dim_t
- pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
-
- pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
- else:
- raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
- return pos
-
-
-class ContrastiveEmbed(nn.Module):
- def __init__(self, max_text_len=256):
- """
- Args:
- max_text_len: max length of text.
- """
- super().__init__()
- self.max_text_len = max_text_len
-
- def forward(self, x, text_dict):
- """_summary_
-
- Args:
- x (_type_): _description_
- text_dict (_type_): _description_
- {
- 'encoded_text': encoded_text, # bs, 195, d_model
- 'text_token_mask': text_token_mask, # bs, 195
- # True for used tokens. False for padding tokens
- }
- Returns:
- _type_: _description_
- """
- assert isinstance(text_dict, dict)
-
- y = text_dict["encoded_text"]
- text_token_mask = text_dict["text_token_mask"]
-
- res = x @ y.transpose(-1, -2)
- res.masked_fill_(~text_token_mask[:, None, :], float("-inf"))
-
- # padding to max_text_len
- new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device)
- new_res[..., : res.shape[-1]] = res
-
- return new_res
diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/agent/agent_manager.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/agent/agent_manager.py
deleted file mode 100644
index 898767a485e50b5e62625a7883edf1b30d5fddf9..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/agent/agent_manager.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""Agent manager for managing GPT agents"""
-from __future__ import annotations
-
-from typing import Union
-
-from autogpt.config.config import Singleton
-from autogpt.llm_utils import create_chat_completion
-
-
-class AgentManager(metaclass=Singleton):
- """Agent manager for managing GPT agents"""
-
- def __init__(self):
- self.next_key = 0
- self.agents = {} # key, (task, full_message_history, model)
-
- # Create new GPT agent
- # TODO: Centralise use of create_chat_completion() to globally enforce token limit
-
- def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
- """Create a new agent and return its key
-
- Args:
- task: The task to perform
- prompt: The prompt to use
- model: The model to use
-
- Returns:
- The key of the new agent
- """
- messages = [
- {"role": "user", "content": prompt},
- ]
-
- # Start GPT instance
- agent_reply = create_chat_completion(
- model=model,
- messages=messages,
- )
-
- # Update full message history
- messages.append({"role": "assistant", "content": agent_reply})
-
- key = self.next_key
- # This is done instead of len(agents) to make keys unique even if agents
- # are deleted
- self.next_key += 1
-
- self.agents[key] = (task, messages, model)
-
- return key, agent_reply
-
- def message_agent(self, key: str | int, message: str) -> str:
- """Send a message to an agent and return its response
-
- Args:
- key: The key of the agent to message
- message: The message to send to the agent
-
- Returns:
- The agent's response
- """
- task, messages, model = self.agents[int(key)]
-
- # Add user message to message history before sending to agent
- messages.append({"role": "user", "content": message})
-
- # Start GPT instance
- agent_reply = create_chat_completion(
- model=model,
- messages=messages,
- )
-
- # Update full message history
- messages.append({"role": "assistant", "content": agent_reply})
-
- return agent_reply
-
- def list_agents(self) -> list[tuple[str | int, str]]:
- """Return a list of all agents
-
- Returns:
- A list of tuples of the form (key, task)
- """
-
- # Return a list of agent keys and their tasks
- return [(key, task) for key, (task, _, _) in self.agents.items()]
-
- def delete_agent(self, key: Union[str, int]) -> bool:
- """Delete an agent from the agent manager
-
- Args:
- key: The key of the agent to delete
-
- Returns:
- True if successful, False otherwise
- """
-
- try:
- del self.agents[int(key)]
- return True
- except KeyError:
- return False
diff --git a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/models.py b/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/models.py
deleted file mode 100644
index 24e16895d519d493d6e672d153b81507eabe4043..0000000000000000000000000000000000000000
--- a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/models.py
+++ /dev/null
@@ -1 +0,0 @@
-# from django.db import models
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cffLib/width.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cffLib/width.py
deleted file mode 100644
index c0a746b6922d4c66d0559078457c9546c77c65d3..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cffLib/width.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""T2CharString glyph width optimizer.
-
-CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
-value do not need to specify their width in their charstring, saving bytes.
-This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
-values for a font, when provided with a list of glyph widths."""
-
-from fontTools.ttLib import TTFont
-from collections import defaultdict
-from operator import add
-from functools import reduce
-
-
-class missingdict(dict):
- def __init__(self, missing_func):
- self.missing_func = missing_func
-
- def __missing__(self, v):
- return self.missing_func(v)
-
-
-def cumSum(f, op=add, start=0, decreasing=False):
-
- keys = sorted(f.keys())
- minx, maxx = keys[0], keys[-1]
-
- total = reduce(op, f.values(), start)
-
- if decreasing:
- missing = lambda x: start if x > maxx else total
- domain = range(maxx, minx - 1, -1)
- else:
- missing = lambda x: start if x < minx else total
- domain = range(minx, maxx + 1)
-
- out = missingdict(missing)
-
- v = start
- for x in domain:
- v = op(v, f[x])
- out[x] = v
-
- return out
-
-
-def byteCost(widths, default, nominal):
-
- if not hasattr(widths, "items"):
- d = defaultdict(int)
- for w in widths:
- d[w] += 1
- widths = d
-
- cost = 0
- for w, freq in widths.items():
- if w == default:
- continue
- diff = abs(w - nominal)
- if diff <= 107:
- cost += freq
- elif diff <= 1131:
- cost += freq * 2
- else:
- cost += freq * 5
- return cost
-
-
-def optimizeWidthsBruteforce(widths):
- """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
-
- d = defaultdict(int)
- for w in widths:
- d[w] += 1
-
- # Maximum number of bytes using default can possibly save
- maxDefaultAdvantage = 5 * max(d.values())
-
- minw, maxw = min(widths), max(widths)
- domain = list(range(minw, maxw + 1))
-
- bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
-
- bestCost = len(widths) * 5 + 1
- for nominal in domain:
- if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
- continue
- for default in domain:
- cost = byteCost(widths, default, nominal)
- if cost < bestCost:
- bestCost = cost
- bestDefault = default
- bestNominal = nominal
-
- return bestDefault, bestNominal
-
-
-def optimizeWidths(widths):
- """Given a list of glyph widths, or dictionary mapping glyph width to number of
- glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
-
- This algorithm is linear in UPEM+numGlyphs."""
-
- if not hasattr(widths, "items"):
- d = defaultdict(int)
- for w in widths:
- d[w] += 1
- widths = d
-
- keys = sorted(widths.keys())
- minw, maxw = keys[0], keys[-1]
- domain = list(range(minw, maxw + 1))
-
- # Cumulative sum/max forward/backward.
- cumFrqU = cumSum(widths, op=add)
- cumMaxU = cumSum(widths, op=max)
- cumFrqD = cumSum(widths, op=add, decreasing=True)
- cumMaxD = cumSum(widths, op=max, decreasing=True)
-
- # Cost per nominal choice, without default consideration.
- nomnCostU = missingdict(
- lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
- )
- nomnCostD = missingdict(
- lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
- )
- nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
-
- # Cost-saving per nominal choice, by best default choice.
- dfltCostU = missingdict(
- lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
- )
- dfltCostD = missingdict(
- lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
- )
- dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
-
- # Combined cost per nominal choice.
- bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
-
- # Best nominal.
- nominal = min(domain, key=lambda x: bestCost[x])
-
- # Work back the best default.
- bestC = bestCost[nominal]
- dfltC = nomnCost[nominal] - bestCost[nominal]
- ends = []
- if dfltC == dfltCostU[nominal]:
- starts = [nominal, nominal - 108, nominal - 1132]
- for start in starts:
- while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
- start -= 1
- ends.append(start)
- else:
- starts = [nominal, nominal + 108, nominal + 1132]
- for start in starts:
- while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
- start += 1
- ends.append(start)
- default = min(ends, key=lambda default: byteCost(widths, default, nominal))
-
- return default, nominal
-
-
-def main(args=None):
- """Calculate optimum defaultWidthX/nominalWidthX values"""
-
- import argparse
-
- parser = argparse.ArgumentParser(
- "fonttools cffLib.width",
- description=main.__doc__,
- )
- parser.add_argument(
- "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
- )
- parser.add_argument(
- "-b",
- "--brute-force",
- dest="brute",
- action="store_true",
- help="Use brute-force approach (VERY slow)",
- )
-
- args = parser.parse_args(args)
-
- for fontfile in args.inputs:
- font = TTFont(fontfile)
- hmtx = font["hmtx"]
- widths = [m[0] for m in hmtx.metrics.values()]
- if args.brute:
- default, nominal = optimizeWidthsBruteforce(widths)
- else:
- default, nominal = optimizeWidths(widths)
- print(
- "glyphs=%d default=%d nominal=%d byteCost=%d"
- % (len(widths), default, nominal, byteCost(widths, default, nominal))
- )
-
-
-if __name__ == "__main__":
- import sys
-
- if len(sys.argv) == 1:
- import doctest
-
- sys.exit(doctest.testmod().failed)
- main()
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/file.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/file.py
deleted file mode 100644
index 79426473d605eef21fa8bd793b2fe9f17d5ba660..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/file.py
+++ /dev/null
@@ -1,281 +0,0 @@
-"""gr.File() component"""
-
-from __future__ import annotations
-
-import tempfile
-import warnings
-from pathlib import Path
-from typing import Any, Callable, Literal
-
-from gradio_client import utils as client_utils
-from gradio_client.documentation import document, set_documentation_group
-from gradio_client.serializing import FileSerializable
-
-from gradio import utils
-from gradio.components.base import IOComponent, _Keywords
-from gradio.deprecation import warn_deprecation
-from gradio.events import (
- Changeable,
- Clearable,
- EventListenerMethod,
- Selectable,
- Uploadable,
-)
-
-set_documentation_group("component")
-
-
-@document()
-class File(
- Changeable,
- Selectable,
- Clearable,
- Uploadable,
- IOComponent,
- FileSerializable,
-):
- """
- Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).
- Preprocessing: passes the uploaded file as a {tempfile._TemporaryFileWrapper} or {List[tempfile._TemporaryFileWrapper]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)
- Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.
- Examples-format: a {str} path to a local file that populates the component.
- Demos: zip_to_json, zip_files
- """
-
- def __init__(
- self,
- value: str | list[str] | Callable | None = None,
- *,
- file_count: Literal["single", "multiple", "directory"] = "single",
- file_types: list[str] | None = None,
- type: Literal["file", "binary"] = "file",
- label: str | None = None,
- every: float | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default file to display, given as str file path. If callable, the function will be called whenever the app loads to set the initial value of the component.
- file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory".
- file_types: List of file extensions or types of files to be uploaded (e.g. ['image', '.json', '.mp4']). "file" allows any file to be uploaded, "image" allows only image files to be uploaded, "audio" allows only audio files to be uploaded, "video" allows only video files to be uploaded, "text" allows only text files to be uploaded.
- type: Type of value to be returned by component. "file" returns a temporary file object with the same base name as the uploaded file, whose full path can be retrieved by file_obj.name, "binary" returns an bytes object.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- interactive: if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.file_count = file_count
- self.file_types = file_types
- if file_types is not None and not isinstance(file_types, list):
- raise ValueError(
- f"Parameter file_types must be a list. Received {file_types.__class__.__name__}"
- )
- valid_types = [
- "file",
- "binary",
- "bytes",
- ] # "bytes" is included for backwards compatibility
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- if type == "bytes":
- warn_deprecation(
- "The `bytes` type is deprecated and may not work as expected. Please use `binary` instead."
- )
- if file_count == "directory" and file_types is not None:
- warnings.warn(
- "The `file_types` parameter is ignored when `file_count` is 'directory'."
- )
- self.type = type
- self.select: EventListenerMethod
- """
- Event listener for when the user selects file from list.
- Uses event data gradio.SelectData to carry `value` referring to name of selected file, and `index` to refer to index.
- See EventData documentation on how to use this event data.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "file_count": self.file_count,
- "file_types": self.file_types,
- "value": self.value,
- "selectable": self.selectable,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- container: bool | None = None,
- scale: int | None = None,
- min_width: int | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- return {
- "label": label,
- "show_label": show_label,
- "container": container,
- "scale": scale,
- "min_width": min_width,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
-
- def preprocess(
- self, x: list[dict[str, Any]] | None
- ) -> (
- bytes
- | tempfile._TemporaryFileWrapper
- | list[bytes | tempfile._TemporaryFileWrapper]
- | None
- ):
- """
- Parameters:
- x: List of JSON objects with filename as 'name' property and base64 data as 'data' property
- Returns:
- File objects in requested format
- """
- if x is None:
- return None
-
- def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
- file_name, data, is_file = (
- f["name"],
- f["data"],
- f.get("is_file", False),
- )
- if self.type == "file":
- if is_file:
- path = self.make_temp_copy_if_needed(file_name)
- else:
- data, _ = client_utils.decode_base64_to_binary(data)
- path = self.file_bytes_to_file(
- data, dir=self.DEFAULT_TEMP_DIR, file_name=file_name
- )
- path = str(utils.abspath(path))
- self.temp_files.add(path)
-
- # Creation of tempfiles here
- file = tempfile.NamedTemporaryFile(
- delete=False, dir=self.DEFAULT_TEMP_DIR
- )
- file.name = path
- file.orig_name = file_name # type: ignore
- return file
- elif (
- self.type == "binary" or self.type == "bytes"
- ): # "bytes" is included for backwards compatibility
- if is_file:
- with open(file_name, "rb") as file_data:
- return file_data.read()
- return client_utils.decode_base64_to_binary(data)[0]
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'file', 'bytes'."
- )
-
- if self.file_count == "single":
- if isinstance(x, list):
- return process_single_file(x[0])
- else:
- return process_single_file(x)
- else:
- if isinstance(x, list):
- return [process_single_file(f) for f in x]
- else:
- return process_single_file(x)
-
- def postprocess(
- self, y: str | list[str] | None
- ) -> dict[str, Any] | list[dict[str, Any]] | None:
- """
- Parameters:
- y: file path
- Returns:
- JSON object with key 'name' for filename, 'data' for base64 url, and 'size' for filesize in bytes
- """
- if y is None:
- return None
- if isinstance(y, list):
- return [
- {
- "orig_name": Path(file).name,
- "name": self.make_temp_copy_if_needed(file),
- "size": Path(file).stat().st_size,
- "data": None,
- "is_file": True,
- }
- for file in y
- ]
- else:
- d = {
- "orig_name": Path(y).name,
- "name": self.make_temp_copy_if_needed(y),
- "size": Path(y).stat().st_size,
- "data": None,
- "is_file": True,
- }
- return d
-
- def as_example(self, input_data: str | list | None) -> str:
- if input_data is None:
- return ""
- elif isinstance(input_data, list):
- return ", ".join([Path(file).name for file in input_data])
- else:
- return Path(input_data).name
-
- def api_info(self) -> dict[str, dict | bool]:
- if self.file_count == "single":
- return self._single_file_api_info()
- else:
- return self._multiple_file_api_info()
-
- def serialized_info(self):
- if self.file_count == "single":
- return self._single_file_serialized_info()
- else:
- return self._multiple_file_serialized_info()
-
- def example_inputs(self) -> dict[str, Any]:
- if self.file_count == "single":
- return self._single_file_example_inputs()
- else:
- return self._multiple_file_example_inputs()
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/number.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/number.py
deleted file mode 100644
index 1b2029a7042ca9566b288add69a3a8232b31387c..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/number.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""gr.Number() component."""
-
-from __future__ import annotations
-
-import math
-from typing import Callable, Literal
-
-import numpy as np
-from gradio_client.documentation import document, set_documentation_group
-from gradio_client.serializing import NumberSerializable
-
-from gradio.components.base import FormComponent, IOComponent, _Keywords
-from gradio.events import (
- Blurrable,
- Changeable,
- Inputable,
- Submittable,
-)
-from gradio.exceptions import Error
-from gradio.interpretation import NeighborInterpretable
-
-set_documentation_group("component")
-
-
-@document()
-class Number(
- FormComponent,
- Changeable,
- Inputable,
- Submittable,
- Blurrable,
- IOComponent,
- NumberSerializable,
- NeighborInterpretable,
-):
- """
- Creates a numeric field for user to enter numbers as input or display numeric output.
- Preprocessing: passes field value as a {float} or {int} into the function, depending on `precision`.
- Postprocessing: expects an {int} or {float} returned from the function and sets field value to it.
- Examples-format: a {float} or {int} representing the number's value.
-
- Demos: tax_calculator, titanic_survival, blocks_simple_squares
- """
-
- def __init__(
- self,
- value: float | Callable | None = None,
- *,
- label: str | None = None,
- info: str | None = None,
- every: float | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- precision: int | None = None,
- minimum: float | None = None,
- maximum: float | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- info: additional component description.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- interactive: if True, will be editable; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- precision: Precision to round input/output to. If set to 0, will round to nearest integer and convert type to int. If None, no rounding happens.
- minimum: Minimum value. Only applied when component is used as an input. If a user provides a smaller value, a gr.Error exception is raised by the backend.
- maximum: Maximum value. Only applied when component is used as an input. If a user provides a larger value, a gr.Error exception is raised by the backend.
- """
- self.precision = precision
- self.minimum = minimum
- self.maximum = maximum
-
- IOComponent.__init__(
- self,
- label=label,
- info=info,
- every=every,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- value=value,
- **kwargs,
- )
- NeighborInterpretable.__init__(self)
-
- @staticmethod
- def _round_to_precision(num: float | int, precision: int | None) -> float | int:
- """
- Round to a given precision.
-
- If precision is None, no rounding happens. If 0, num is converted to int.
-
- Parameters:
- num: Number to round.
- precision: Precision to round to.
- Returns:
- rounded number
- """
- if precision is None:
- return float(num)
- elif precision == 0:
- return int(round(num, precision))
- else:
- return round(num, precision)
-
- def get_config(self):
- return {
- "value": self.value,
- "minimum": self.minimum,
- "maximum": self.maximum,
- "container": self.container,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: float | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- minimum: float | None = None,
- maximum: float | None = None,
- label: str | None = None,
- info: str | None = None,
- show_label: bool | None = None,
- container: bool | None = None,
- scale: int | None = None,
- min_width: int | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- return {
- "label": label,
- "info": info,
- "show_label": show_label,
- "container": container,
- "scale": scale,
- "min_width": min_width,
- "visible": visible,
- "value": value,
- "minimum": minimum,
- "maximum": maximum,
- "interactive": interactive,
- "__type__": "update",
- }
-
- def preprocess(self, x: float | None) -> float | None:
- """
- Parameters:
- x: numeric input
- Returns:
- number representing function input
- """
- if x is None:
- return None
- elif self.minimum is not None and x < self.minimum:
- raise Error(f"Value {x} is less than minimum value {self.minimum}.")
- elif self.maximum is not None and x > self.maximum:
- raise Error(f"Value {x} is greater than maximum value {self.maximum}.")
- return self._round_to_precision(x, self.precision)
-
- def postprocess(self, y: float | None) -> float | None:
- """
- Any postprocessing needed to be performed on function output.
-
- Parameters:
- y: numeric output
- Returns:
- number representing function output
- """
- if y is None:
- return None
- return self._round_to_precision(y, self.precision)
-
- def set_interpret_parameters(
- self, steps: int = 3, delta: float = 1, delta_type: str = "percent"
- ):
- """
- Calculates interpretation scores of numeric values close to the input number.
- Parameters:
- steps: Number of nearby values to measure in each direction (above and below the input number).
- delta: Size of step in each direction between nearby values.
- delta_type: "percent" if delta step between nearby values should be a calculated as a percent, or "absolute" if delta should be a constant step change.
- """
- self.interpretation_steps = steps
- self.interpretation_delta = delta
- self.interpretation_delta_type = delta_type
- return self
-
- def get_interpretation_neighbors(self, x: float | int) -> tuple[list[float], dict]:
- x = self._round_to_precision(x, self.precision)
- if self.interpretation_delta_type == "percent":
- delta = 1.0 * self.interpretation_delta * x / 100
- elif self.interpretation_delta_type == "absolute":
- delta = self.interpretation_delta
- else:
- delta = self.interpretation_delta
- if self.precision == 0 and math.floor(delta) != delta:
- raise ValueError(
- f"Delta value {delta} is not an integer and precision=0. Cannot generate valid set of neighbors. "
- "If delta_type='percent', pick a value of delta such that x * delta is an integer. "
- "If delta_type='absolute', pick a value of delta that is an integer."
- )
- # run_interpretation will preprocess the neighbors so no need to convert to int here
- negatives = (
- np.array(x) + np.arange(-self.interpretation_steps, 0) * delta
- ).tolist()
- positives = (
- np.array(x) + np.arange(1, self.interpretation_steps + 1) * delta
- ).tolist()
- return negatives + positives, {}
-
- def get_interpretation_scores(
- self, x: float, neighbors: list[float], scores: list[float | None], **kwargs
- ) -> list[tuple[float, float | None]]:
- """
- Returns:
- Each tuple set represents a numeric value near the input and its corresponding interpretation score.
- """
- interpretation = list(zip(neighbors, scores))
- interpretation.insert(int(len(interpretation) / 2), (x, None))
- return interpretation
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_deprecation.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_deprecation.py
deleted file mode 100644
index bd0a90595d478dfd331696aa766f695d7638f1ed..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_deprecation.py
+++ /dev/null
@@ -1,229 +0,0 @@
-import warnings
-from functools import wraps
-from inspect import Parameter, signature
-from typing import Generator, Iterable, Optional
-
-
-def _deprecate_positional_args(*, version: str):
- """Decorator for methods that issues warnings for positional arguments.
- Using the keyword-only argument syntax in pep 3102, arguments after the
- * will issue a warning when passed as a positional argument.
-
- Args:
- version (`str`):
- The version when positional arguments will result in error.
- """
-
- def _inner_deprecate_positional_args(f):
- sig = signature(f)
- kwonly_args = []
- all_args = []
- for name, param in sig.parameters.items():
- if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
- all_args.append(name)
- elif param.kind == Parameter.KEYWORD_ONLY:
- kwonly_args.append(name)
-
- @wraps(f)
- def inner_f(*args, **kwargs):
- extra_args = len(args) - len(all_args)
- if extra_args <= 0:
- return f(*args, **kwargs)
- # extra_args > 0
- args_msg = [
- f"{name}='{arg}'" if isinstance(arg, str) else f"{name}={arg}"
- for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
- ]
- args_msg = ", ".join(args_msg)
- warnings.warn(
- (
- f"Deprecated positional argument(s) used in '{f.__name__}': pass"
- f" {args_msg} as keyword args. From version {version} passing these"
- " as positional arguments will result in an error,"
- ),
- FutureWarning,
- )
- kwargs.update(zip(sig.parameters, args))
- return f(**kwargs)
-
- return inner_f
-
- return _inner_deprecate_positional_args
-
-
-def _deprecate_arguments(
- *,
- version: str,
- deprecated_args: Iterable[str],
- custom_message: Optional[str] = None,
-):
- """Decorator to issue warnings when using deprecated arguments.
-
- TODO: could be useful to be able to set a custom error message.
-
- Args:
- version (`str`):
- The version when deprecated arguments will result in error.
- deprecated_args (`List[str]`):
- List of the arguments to be deprecated.
- custom_message (`str`, *optional*):
- Warning message that is raised. If not passed, a default warning message
- will be created.
- """
-
- def _inner_deprecate_positional_args(f):
- sig = signature(f)
-
- @wraps(f)
- def inner_f(*args, **kwargs):
- # Check for used deprecated arguments
- used_deprecated_args = []
- for _, parameter in zip(args, sig.parameters.values()):
- if parameter.name in deprecated_args:
- used_deprecated_args.append(parameter.name)
- for kwarg_name, kwarg_value in kwargs.items():
- if (
- # If argument is deprecated but still used
- kwarg_name in deprecated_args
- # And then the value is not the default value
- and kwarg_value != sig.parameters[kwarg_name].default
- ):
- used_deprecated_args.append(kwarg_name)
-
- # Warn and proceed
- if len(used_deprecated_args) > 0:
- message = (
- f"Deprecated argument(s) used in '{f.__name__}':"
- f" {', '.join(used_deprecated_args)}. Will not be supported from"
- f" version '{version}'."
- )
- if custom_message is not None:
- message += "\n\n" + custom_message
- warnings.warn(message, FutureWarning)
- return f(*args, **kwargs)
-
- return inner_f
-
- return _inner_deprecate_positional_args
-
-
-def _deprecate_method(*, version: str, message: Optional[str] = None):
- """Decorator to issue warnings when using a deprecated method.
-
- Args:
- version (`str`):
- The version when deprecated arguments will result in error.
- message (`str`, *optional*):
- Warning message that is raised. If not passed, a default warning message
- will be created.
- """
-
- def _inner_deprecate_method(f):
- @wraps(f)
- def inner_f(*args, **kwargs):
- warning_message = (
- f"'{f.__name__}' (from '{f.__module__}') is deprecated and will be removed from version '{version}'."
- )
- if message is not None:
- warning_message += " " + message
- warnings.warn(warning_message, FutureWarning)
- return f(*args, **kwargs)
-
- return inner_f
-
- return _inner_deprecate_method
-
-
-def _deprecate_list_output(*, version: str):
- """Decorator to deprecate the usage as a list of the output of a method.
-
- To be used when a method currently returns a list of objects but is planned to return
- an generator instead in the future. Output is still a list but tweaked to issue a
- warning message when it is specifically used as a list (e.g. get/set/del item, get
- length,...).
-
- Args:
- version (`str`):
- The version when output will start to be an generator.
- """
-
- def _inner_deprecate_method(f):
- @wraps(f)
- def inner_f(*args, **kwargs):
- list_value = f(*args, **kwargs)
- return DeprecatedList(
- list_value,
- warning_message=(
- "'{f.__name__}' currently returns a list of objects but is planned"
- " to be a generator starting from version {version} in order to"
- " implement pagination. Please avoid to use"
- " `{f.__name__}(...).{attr_name}` or explicitly convert the output"
- " to a list first with `[item for item in {f.__name__}(...)]`.".format(
- f=f,
- version=version,
- # Dumb but working workaround to render `attr_name` later
- # Taken from https://stackoverflow.com/a/35300723
- attr_name="{attr_name}",
- )
- ),
- )
-
- return inner_f
-
- return _inner_deprecate_method
-
-
-def _empty_gen() -> Generator:
- # Create an empty generator
- # Taken from https://stackoverflow.com/a/13243870
- return
- yield
-
-
-# Build the set of attributes that are specific to a List object (and will be deprecated)
-_LIST_ONLY_ATTRS = frozenset(set(dir([])) - set(dir(_empty_gen())))
-
-
-class DeprecateListMetaclass(type):
- """Metaclass that overwrites all list-only methods, including magic ones."""
-
- def __new__(cls, clsname, bases, attrs):
- # Check consistency
- if "_deprecate" not in attrs:
- raise TypeError("A `_deprecate` method must be implemented to use `DeprecateListMetaclass`.")
- if list not in bases:
- raise TypeError("Class must inherit from `list` to use `DeprecateListMetaclass`.")
-
- # Create decorator to deprecate list-only methods, including magic ones
- def _with_deprecation(f, name):
- @wraps(f)
- def _inner(self, *args, **kwargs):
- self._deprecate(name) # Use the `_deprecate`
- return f(self, *args, **kwargs)
-
- return _inner
-
- # Deprecate list-only methods
- for attr in _LIST_ONLY_ATTRS:
- attrs[attr] = _with_deprecation(getattr(list, attr), attr)
-
- return super().__new__(cls, clsname, bases, attrs)
-
-
-class DeprecatedList(list, metaclass=DeprecateListMetaclass):
- """Custom List class for which all calls to a list-specific method is deprecated.
-
- Methods that are shared with a generator are not deprecated.
- See `_deprecate_list_output` for more details.
- """
-
- def __init__(self, iterable, warning_message: str):
- """Initialize the list with a default warning message.
-
- Warning message will be formatted at runtime with a "{attr_name}" value.
- """
- super().__init__(iterable)
- self._deprecation_msg = warning_message
-
- def _deprecate(self, attr_name: str) -> None:
- warnings.warn(self._deprecation_msg.format(attr_name=attr_name), FutureWarning)
diff --git a/spaces/DaFujaTyping/hf-Chat-ui/src/routes/conversation/+server.ts b/spaces/DaFujaTyping/hf-Chat-ui/src/routes/conversation/+server.ts
deleted file mode 100644
index 8d51bd4dfb1624da65ae09a49e090811564970b6..0000000000000000000000000000000000000000
--- a/spaces/DaFujaTyping/hf-Chat-ui/src/routes/conversation/+server.ts
+++ /dev/null
@@ -1,63 +0,0 @@
-import type { RequestHandler } from "./$types";
-import { collections } from "$lib/server/database";
-import { ObjectId } from "mongodb";
-import { error, redirect } from "@sveltejs/kit";
-import { base } from "$app/paths";
-import { z } from "zod";
-import type { Message } from "$lib/types/Message";
-import { models } from "$lib/server/models";
-import { validateModel } from "$lib/utils/models";
-
-export const POST: RequestHandler = async (input) => {
- const body = await input.request.text();
-
- let title = "";
- let messages: Message[] = [];
-
- const values = z
- .object({
- fromShare: z.string().optional(),
- model: validateModel(models),
- })
- .parse(JSON.parse(body));
-
- if (values.fromShare) {
- const conversation = await collections.sharedConversations.findOne({
- _id: values.fromShare,
- });
-
- if (!conversation) {
- throw error(404, "Conversation not found");
- }
-
- title = conversation.title;
- messages = conversation.messages;
- values.model = conversation.model;
- }
-
- const res = await collections.conversations.insertOne({
- _id: new ObjectId(),
- title:
- title ||
- "Untitled " +
- ((await collections.conversations.countDocuments({ sessionId: input.locals.sessionId })) +
- 1),
- messages,
- model: values.model,
- createdAt: new Date(),
- updatedAt: new Date(),
- sessionId: input.locals.sessionId,
- ...(values.fromShare ? { meta: { fromShareId: values.fromShare } } : {}),
- });
-
- return new Response(
- JSON.stringify({
- conversationId: res.insertedId.toString(),
- }),
- { headers: { "Content-Type": "application/json" } }
- );
-};
-
-export const GET: RequestHandler = async () => {
- throw redirect(302, base || "/");
-};
diff --git a/spaces/DaleChen/AutoGPT/autogpt/logs.py b/spaces/DaleChen/AutoGPT/autogpt/logs.py
deleted file mode 100644
index 35037404a98f7be9b7d577b625cc190ca27f4566..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/autogpt/logs.py
+++ /dev/null
@@ -1,332 +0,0 @@
-"""Logging module for Auto-GPT."""
-import json
-import logging
-import os
-import random
-import re
-import time
-import traceback
-from logging import LogRecord
-
-from colorama import Fore, Style
-
-from autogpt.config import Config, Singleton
-from autogpt.speech import say_text
-
-CFG = Config()
-
-
-class Logger(metaclass=Singleton):
- """
- Logger that handle titles in different colors.
- Outputs logs in console, activity.log, and errors.log
- For console handler: simulates typing
- """
-
- def __init__(self):
- # create log directory if it doesn't exist
- this_files_dir_path = os.path.dirname(__file__)
- log_dir = os.path.join(this_files_dir_path, "../logs")
- if not os.path.exists(log_dir):
- os.makedirs(log_dir)
-
- log_file = "activity.log"
- error_file = "error.log"
-
- console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
-
- # Create a handler for console which simulate typing
- self.typing_console_handler = TypingConsoleHandler()
- self.typing_console_handler.setLevel(logging.INFO)
- self.typing_console_handler.setFormatter(console_formatter)
-
- # Create a handler for console without typing simulation
- self.console_handler = ConsoleHandler()
- self.console_handler.setLevel(logging.DEBUG)
- self.console_handler.setFormatter(console_formatter)
-
- # Info handler in activity.log
- self.file_handler = logging.FileHandler(
- os.path.join(log_dir, log_file), "a", "utf-8"
- )
- self.file_handler.setLevel(logging.DEBUG)
- info_formatter = AutoGptFormatter(
- "%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
- )
- self.file_handler.setFormatter(info_formatter)
-
- # Error handler error.log
- error_handler = logging.FileHandler(
- os.path.join(log_dir, error_file), "a", "utf-8"
- )
- error_handler.setLevel(logging.ERROR)
- error_formatter = AutoGptFormatter(
- "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
- " %(message_no_color)s"
- )
- error_handler.setFormatter(error_formatter)
-
- self.typing_logger = logging.getLogger("TYPER")
- self.typing_logger.addHandler(self.typing_console_handler)
- self.typing_logger.addHandler(self.file_handler)
- self.typing_logger.addHandler(error_handler)
- self.typing_logger.setLevel(logging.DEBUG)
-
- self.logger = logging.getLogger("LOGGER")
- self.logger.addHandler(self.console_handler)
- self.logger.addHandler(self.file_handler)
- self.logger.addHandler(error_handler)
- self.logger.setLevel(logging.DEBUG)
-
- def typewriter_log(
- self, title="", title_color="", content="", speak_text=False, level=logging.INFO
- ):
- if speak_text and CFG.speak_mode:
- say_text(f"{title}. {content}")
-
- if content:
- if isinstance(content, list):
- content = " ".join(content)
- else:
- content = ""
-
- self.typing_logger.log(
- level, content, extra={"title": title, "color": title_color}
- )
-
- def debug(
- self,
- message,
- title="",
- title_color="",
- ):
- self._log(title, title_color, message, logging.DEBUG)
-
- def warn(
- self,
- message,
- title="",
- title_color="",
- ):
- self._log(title, title_color, message, logging.WARN)
-
- def error(self, title, message=""):
- self._log(title, Fore.RED, message, logging.ERROR)
-
- def _log(self, title="", title_color="", message="", level=logging.INFO):
- if message:
- if isinstance(message, list):
- message = " ".join(message)
- self.logger.log(level, message, extra={"title": title, "color": title_color})
-
- def set_level(self, level):
- self.logger.setLevel(level)
- self.typing_logger.setLevel(level)
-
- def double_check(self, additionalText=None):
- if not additionalText:
- additionalText = (
- "Please ensure you've setup and configured everything"
- " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
- "double check. You can also create a github issue or join the discord"
- " and ask there!"
- )
-
- self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
-
-
-"""
-Output stream to console using simulated typing
-"""
-
-
-class TypingConsoleHandler(logging.StreamHandler):
- def emit(self, record):
- min_typing_speed = 0.05
- max_typing_speed = 0.01
-
- msg = self.format(record)
- try:
- words = msg.split()
- for i, word in enumerate(words):
- print(word, end="", flush=True)
- if i < len(words) - 1:
- print(" ", end="", flush=True)
- typing_speed = random.uniform(min_typing_speed, max_typing_speed)
- time.sleep(typing_speed)
- # type faster after each word
- min_typing_speed = min_typing_speed * 0.95
- max_typing_speed = max_typing_speed * 0.95
- print()
- except Exception:
- self.handleError(record)
-
-
-class ConsoleHandler(logging.StreamHandler):
- def emit(self, record) -> None:
- msg = self.format(record)
- try:
- print(msg)
- except Exception:
- self.handleError(record)
-
-
-class AutoGptFormatter(logging.Formatter):
- """
- Allows to handle custom placeholders 'title_color' and 'message_no_color'.
- To use this formatter, make sure to pass 'color', 'title' as log extras.
- """
-
- def format(self, record: LogRecord) -> str:
- if hasattr(record, "color"):
- record.title_color = (
- getattr(record, "color")
- + getattr(record, "title")
- + " "
- + Style.RESET_ALL
- )
- else:
- record.title_color = getattr(record, "title")
- if hasattr(record, "msg"):
- record.message_no_color = remove_color_codes(getattr(record, "msg"))
- else:
- record.message_no_color = ""
- return super().format(record)
-
-
-def remove_color_codes(s: str) -> str:
- ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
- return ansi_escape.sub("", s)
-
-
-logger = Logger()
-
-
-def print_assistant_thoughts(ai_name, assistant_reply):
- """Prints the assistant's thoughts to the console"""
- from autogpt.json_utils.json_fix_llm import (
- attempt_to_fix_json_by_finding_outermost_brackets,
- fix_and_parse_json,
- )
-
- try:
- try:
- # Parse and print Assistant response
- assistant_reply_json = fix_and_parse_json(assistant_reply)
- except json.JSONDecodeError:
- logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
- assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
- assistant_reply
- )
- if isinstance(assistant_reply_json, str):
- assistant_reply_json = fix_and_parse_json(assistant_reply_json)
-
- # Check if assistant_reply_json is a string and attempt to parse
- # it into a JSON object
- if isinstance(assistant_reply_json, str):
- try:
- assistant_reply_json = json.loads(assistant_reply_json)
- except json.JSONDecodeError:
- logger.error("Error: Invalid JSON\n", assistant_reply)
- assistant_reply_json = (
- attempt_to_fix_json_by_finding_outermost_brackets(
- assistant_reply_json
- )
- )
-
- assistant_thoughts_reasoning = None
- assistant_thoughts_plan = None
- assistant_thoughts_speak = None
- assistant_thoughts_criticism = None
- if not isinstance(assistant_reply_json, dict):
- assistant_reply_json = {}
- assistant_thoughts = assistant_reply_json.get("thoughts", {})
- assistant_thoughts_text = assistant_thoughts.get("text")
-
- if assistant_thoughts:
- assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
- assistant_thoughts_plan = assistant_thoughts.get("plan")
- assistant_thoughts_criticism = assistant_thoughts.get("criticism")
- assistant_thoughts_speak = assistant_thoughts.get("speak")
-
- logger.typewriter_log(
- f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
- )
- logger.typewriter_log(
- "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
- )
-
- if assistant_thoughts_plan:
- logger.typewriter_log("PLAN:", Fore.YELLOW, "")
- # If it's a list, join it into a string
- if isinstance(assistant_thoughts_plan, list):
- assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
- elif isinstance(assistant_thoughts_plan, dict):
- assistant_thoughts_plan = str(assistant_thoughts_plan)
-
- # Split the input_string using the newline character and dashes
- lines = assistant_thoughts_plan.split("\n")
- for line in lines:
- line = line.lstrip("- ")
- logger.typewriter_log("- ", Fore.GREEN, line.strip())
-
- logger.typewriter_log(
- "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
- )
- # Speak the assistant's thoughts
- if CFG.speak_mode and assistant_thoughts_speak:
- say_text(assistant_thoughts_speak)
- else:
- logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
-
- return assistant_reply_json
- except json.decoder.JSONDecodeError:
- logger.error("Error: Invalid JSON\n", assistant_reply)
- if CFG.speak_mode:
- say_text(
- "I have received an invalid JSON response from the OpenAI API."
- " I cannot ignore this response."
- )
-
- # All other errors, return "Error: + error message"
- except Exception:
- call_stack = traceback.format_exc()
- logger.error("Error: \n", call_stack)
-
-
-def print_assistant_thoughts(
- ai_name: object, assistant_reply_json_valid: object
-) -> None:
- assistant_thoughts_reasoning = None
- assistant_thoughts_plan = None
- assistant_thoughts_speak = None
- assistant_thoughts_criticism = None
-
- assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
- assistant_thoughts_text = assistant_thoughts.get("text")
- if assistant_thoughts:
- assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
- assistant_thoughts_plan = assistant_thoughts.get("plan")
- assistant_thoughts_criticism = assistant_thoughts.get("criticism")
- assistant_thoughts_speak = assistant_thoughts.get("speak")
- logger.typewriter_log(
- f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
- )
- logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
- if assistant_thoughts_plan:
- logger.typewriter_log("PLAN:", Fore.YELLOW, "")
- # If it's a list, join it into a string
- if isinstance(assistant_thoughts_plan, list):
- assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
- elif isinstance(assistant_thoughts_plan, dict):
- assistant_thoughts_plan = str(assistant_thoughts_plan)
-
- # Split the input_string using the newline character and dashes
- lines = assistant_thoughts_plan.split("\n")
- for line in lines:
- line = line.lstrip("- ")
- logger.typewriter_log("- ", Fore.GREEN, line.strip())
- logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
- # Speak the assistant's thoughts
- if CFG.speak_mode and assistant_thoughts_speak:
- say_text(assistant_thoughts_speak)
diff --git a/spaces/DarthVaderAI/Diffusion-Art/README.md b/spaces/DarthVaderAI/Diffusion-Art/README.md
deleted file mode 100644
index db4c6cfb04970422dabe17ef3a53a1414badd582..0000000000000000000000000000000000000000
--- a/spaces/DarthVaderAI/Diffusion-Art/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Diffusion Art
-emoji: ⚡
-colorFrom: green
-colorTo: pink
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/DataScienceEngineering/7-NER-Biomed-ClinicalTerms/README.md b/spaces/DataScienceEngineering/7-NER-Biomed-ClinicalTerms/README.md
deleted file mode 100644
index 586e4bf4256fadae0ed2f71687dc091d5b385922..0000000000000000000000000000000000000000
--- a/spaces/DataScienceEngineering/7-NER-Biomed-ClinicalTerms/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ⚕️ Clinical Terminology Biomed NLP AI NER 🩺 Gradio
-emoji: 👩⚕️CT👩⚕️
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/DeclK/pose/app.py b/spaces/DeclK/pose/app.py
deleted file mode 100644
index 5ac9a0e19691410146a04b8ed16468a81b47ff2a..0000000000000000000000000000000000000000
--- a/spaces/DeclK/pose/app.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Inference 2 videos and use dtw to match the pose keypoints.
-from tools.inferencer import PoseInferencerV2
-from tools.dtw import DTWForKeypoints
-from tools.visualizer import FastVisualizer
-from tools.utils import convert_video_to_playable_mp4
-from pathlib import Path
-from tqdm import tqdm
-import mmengine
-import numpy as np
-import mmcv
-import cv2
-import gradio as gr
-
-def concat(img1, img2, height=1080):
- h1, w1, _ = img1.shape
- h2, w2, _ = img2.shape
-
- # Calculate the scaling factor for each image
- scale1 = height / img1.shape[0]
- scale2 = height / img2.shape[0]
-
- # Resize the images
- img1 = cv2.resize(img1, (int(w1*scale1), int(h1*scale1)))
- img2 = cv2.resize(img2, (int(w2*scale2), int(h2*scale2)))
-
- # Concatenate the images horizontally
- image = cv2.hconcat([img1, img2])
- return image
-
-def draw(vis: FastVisualizer, img, keypoint, box, oks, oks_unnorm,
- draw_human_keypoints=True,
- draw_score_bar=True):
- vis.set_image(img)
- vis.draw_non_transparent_area(box)
- if draw_score_bar:
- vis.draw_score_bar(oks)
- if draw_human_keypoints:
- vis.draw_human_keypoints(keypoint, oks_unnorm)
- return vis.get_image()
-
-def main(video1, video2, draw_human_keypoints,
- progress=gr.Progress(track_tqdm=True)):
- # build PoseInferencerV2
- config = 'configs/mark2.py'
- cfg = mmengine.Config.fromfile(config)
- pose_inferencer = PoseInferencerV2(
- cfg.det_cfg,
- cfg.pose_cfg,
- device='cpu')
-
- v1 = mmcv.VideoReader(video1)
- v2 = mmcv.VideoReader(video2)
- video_writer = None
-
- all_det1, all_pose1 = pose_inferencer.inference_video(video1)
- all_det2, all_pose2 = pose_inferencer.inference_video(video2)
-
- keypoints1 = np.stack([p.keypoints[0] for p in all_pose1]) # forced the first pred
- keypoints2 = np.stack([p.keypoints[0] for p in all_pose2])
- boxes1 = np.stack([d.bboxes[0] for d in all_det1])
- boxes2 = np.stack([d.bboxes[0] for d in all_det2])
-
- dtw_path, oks, oks_unnorm = DTWForKeypoints(keypoints1, keypoints2).get_dtw_path()
-
- vis = FastVisualizer()
-
- for i, j in tqdm(dtw_path, desc='Visualizing'):
- frame1 = v1[i]
- frame2 = v2[j]
-
- frame1_ = draw(vis, frame1.copy(), keypoints1[i], boxes1[i],
- oks[i, j], oks_unnorm[i, j], draw_human_keypoints)
- frame2_ = draw(vis, frame2.copy(), keypoints2[j], boxes2[j],
- oks[i, j], oks_unnorm[i, j], draw_human_keypoints, draw_score_bar=False)
- # concate two frames
- frame = concat(frame1_, frame2_)
- # draw logo
- vis.set_image(frame)
- frame = vis.draw_logo().get_image()
- # write video
- w, h = frame.shape[1], frame.shape[0]
- if video_writer is None:
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
- video_writer = cv2.VideoWriter('dtw_compare.mp4',
- fourcc, v1.fps, (w, h))
- video_writer.write(frame)
- video_writer.release()
- # output video file
- convert_video_to_playable_mp4('dtw_compare.mp4')
- output = str(Path('dtw_compare.mp4').resolve())
- return output
-
-if __name__ == '__main__':
- config = 'configs/mark2.py'
- cfg = mmengine.Config.fromfile(config)
-
- inputs = [
- gr.Video(label="Input video 1"),
- gr.Video(label="Input video 2"),
- "checkbox"
- ]
-
- output = gr.Video(label="Output video")
-
- demo = gr.Interface(fn=main, inputs=inputs, outputs=output,
- allow_flagging='never').queue()
- demo.launch()
diff --git a/spaces/Detomo/ai-comic-generation/src/components/ui/select.tsx b/spaces/Detomo/ai-comic-generation/src/components/ui/select.tsx
deleted file mode 100644
index 704239634b359b9e680dab25275e205e72579f82..0000000000000000000000000000000000000000
--- a/spaces/Detomo/ai-comic-generation/src/components/ui/select.tsx
+++ /dev/null
@@ -1,121 +0,0 @@
-"use client"
-
-import * as React from "react"
-import * as SelectPrimitive from "@radix-ui/react-select"
-import { Check, ChevronDown } from "lucide-react"
-
-import { cn } from "@/lib/utils"
-
-const Select = SelectPrimitive.Root
-
-const SelectGroup = SelectPrimitive.Group
-
-const SelectValue = SelectPrimitive.Value
-
-const SelectTrigger = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
- {children}
-
-
-
-
-))
-SelectTrigger.displayName = SelectPrimitive.Trigger.displayName
-
-const SelectContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, position = "popper", ...props }, ref) => (
-
-
-
- {children}
-
-
-
-))
-SelectContent.displayName = SelectPrimitive.Content.displayName
-
-const SelectLabel = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SelectLabel.displayName = SelectPrimitive.Label.displayName
-
-const SelectItem = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
-
-
-
-
-
- {children}
-
-))
-SelectItem.displayName = SelectPrimitive.Item.displayName
-
-const SelectSeparator = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SelectSeparator.displayName = SelectPrimitive.Separator.displayName
-
-export {
- Select,
- SelectGroup,
- SelectValue,
- SelectTrigger,
- SelectContent,
- SelectLabel,
- SelectItem,
- SelectSeparator,
-}
diff --git a/spaces/DragGan/DragGan-Inversion/scripts/gui.sh b/spaces/DragGan/DragGan-Inversion/scripts/gui.sh
deleted file mode 100644
index 5eb68e3b7d2e51b8781fa2e638c7005f0c994246..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/scripts/gui.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-python visualizer_drag.py \
- checkpoints/stylegan2_lions_512_pytorch.pkl \
- checkpoints/stylegan2-ffhq-512x512.pkl \
- checkpoints/stylegan2-afhqcat-512x512.pkl \
- checkpoints/stylegan2-car-config-f.pkl \
- checkpoints/stylegan2_dogs_1024_pytorch.pkl \
- checkpoints/stylegan2_horses_256_pytorch.pkl \
- checkpoints/stylegan2-cat-config-f.pkl \
- checkpoints/stylegan2_elephants_512_pytorch.pkl \
- checkpoints/stylegan_human_v2_512.pkl \
- checkpoints/stylegan2-lhq-256x256.pkl
diff --git a/spaces/EronSamez/RVC_HFmeu/slicer2.py b/spaces/EronSamez/RVC_HFmeu/slicer2.py
deleted file mode 100644
index 5b29ee262aa54045e807be2cffeb41687499ba58..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/slicer2.py
+++ /dev/null
@@ -1,260 +0,0 @@
-import numpy as np
-
-
-# This function is obtained from librosa.
-def get_rms(
- y,
- frame_length=2048,
- hop_length=512,
- pad_mode="constant",
-):
- padding = (int(frame_length // 2), int(frame_length // 2))
- y = np.pad(y, padding, mode=pad_mode)
-
- axis = -1
- # put our new within-frame axis at the end for now
- out_strides = y.strides + tuple([y.strides[axis]])
- # Reduce the shape on the framing axis
- x_shape_trimmed = list(y.shape)
- x_shape_trimmed[axis] -= frame_length - 1
- out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
- xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
- if axis < 0:
- target_axis = axis - 1
- else:
- target_axis = axis + 1
- xw = np.moveaxis(xw, -1, target_axis)
- # Downsample along the target axis
- slices = [slice(None)] * xw.ndim
- slices[axis] = slice(0, None, hop_length)
- x = xw[tuple(slices)]
-
- # Calculate power
- power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
-
- return np.sqrt(power)
-
-
-class Slicer:
- def __init__(
- self,
- sr: int,
- threshold: float = -40.0,
- min_length: int = 5000,
- min_interval: int = 300,
- hop_size: int = 20,
- max_sil_kept: int = 5000,
- ):
- if not min_length >= min_interval >= hop_size:
- raise ValueError(
- "The following condition must be satisfied: min_length >= min_interval >= hop_size"
- )
- if not max_sil_kept >= hop_size:
- raise ValueError(
- "The following condition must be satisfied: max_sil_kept >= hop_size"
- )
- min_interval = sr * min_interval / 1000
- self.threshold = 10 ** (threshold / 20.0)
- self.hop_size = round(sr * hop_size / 1000)
- self.win_size = min(round(min_interval), 4 * self.hop_size)
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
- self.min_interval = round(min_interval / self.hop_size)
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
-
- def _apply_slice(self, waveform, begin, end):
- if len(waveform.shape) > 1:
- return waveform[
- :, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
- ]
- else:
- return waveform[
- begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
- ]
-
- # @timeit
- def slice(self, waveform):
- if len(waveform.shape) > 1:
- samples = waveform.mean(axis=0)
- else:
- samples = waveform
- if samples.shape[0] <= self.min_length:
- return [waveform]
- rms_list = get_rms(
- y=samples, frame_length=self.win_size, hop_length=self.hop_size
- ).squeeze(0)
- sil_tags = []
- silence_start = None
- clip_start = 0
- for i, rms in enumerate(rms_list):
- # Keep looping while frame is silent.
- if rms < self.threshold:
- # Record start of silent frames.
- if silence_start is None:
- silence_start = i
- continue
- # Keep looping while frame is not silent and silence start has not been recorded.
- if silence_start is None:
- continue
- # Clear recorded silence start if interval is not enough or clip is too short
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
- need_slice_middle = (
- i - silence_start >= self.min_interval
- and i - clip_start >= self.min_length
- )
- if not is_leading_silence and not need_slice_middle:
- silence_start = None
- continue
- # Need slicing. Record the range of silent frames to be removed.
- if i - silence_start <= self.max_sil_kept:
- pos = rms_list[silence_start : i + 1].argmin() + silence_start
- if silence_start == 0:
- sil_tags.append((0, pos))
- else:
- sil_tags.append((pos, pos))
- clip_start = pos
- elif i - silence_start <= self.max_sil_kept * 2:
- pos = rms_list[
- i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
- ].argmin()
- pos += i - self.max_sil_kept
- pos_l = (
- rms_list[
- silence_start : silence_start + self.max_sil_kept + 1
- ].argmin()
- + silence_start
- )
- pos_r = (
- rms_list[i - self.max_sil_kept : i + 1].argmin()
- + i
- - self.max_sil_kept
- )
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- clip_start = pos_r
- else:
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
- clip_start = max(pos_r, pos)
- else:
- pos_l = (
- rms_list[
- silence_start : silence_start + self.max_sil_kept + 1
- ].argmin()
- + silence_start
- )
- pos_r = (
- rms_list[i - self.max_sil_kept : i + 1].argmin()
- + i
- - self.max_sil_kept
- )
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- else:
- sil_tags.append((pos_l, pos_r))
- clip_start = pos_r
- silence_start = None
- # Deal with trailing silence.
- total_frames = rms_list.shape[0]
- if (
- silence_start is not None
- and total_frames - silence_start >= self.min_interval
- ):
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
- pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
- sil_tags.append((pos, total_frames + 1))
- # Apply and return slices.
- if len(sil_tags) == 0:
- return [waveform]
- else:
- chunks = []
- if sil_tags[0][0] > 0:
- chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
- for i in range(len(sil_tags) - 1):
- chunks.append(
- self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])
- )
- if sil_tags[-1][1] < total_frames:
- chunks.append(
- self._apply_slice(waveform, sil_tags[-1][1], total_frames)
- )
- return chunks
-
-
-def main():
- import os.path
- from argparse import ArgumentParser
-
- import librosa
- import soundfile
-
- parser = ArgumentParser()
- parser.add_argument("audio", type=str, help="The audio to be sliced")
- parser.add_argument(
- "--out", type=str, help="Output directory of the sliced audio clips"
- )
- parser.add_argument(
- "--db_thresh",
- type=float,
- required=False,
- default=-40,
- help="The dB threshold for silence detection",
- )
- parser.add_argument(
- "--min_length",
- type=int,
- required=False,
- default=5000,
- help="The minimum milliseconds required for each sliced audio clip",
- )
- parser.add_argument(
- "--min_interval",
- type=int,
- required=False,
- default=300,
- help="The minimum milliseconds for a silence part to be sliced",
- )
- parser.add_argument(
- "--hop_size",
- type=int,
- required=False,
- default=10,
- help="Frame length in milliseconds",
- )
- parser.add_argument(
- "--max_sil_kept",
- type=int,
- required=False,
- default=500,
- help="The maximum silence length kept around the sliced clip, presented in milliseconds",
- )
- args = parser.parse_args()
- out = args.out
- if out is None:
- out = os.path.dirname(os.path.abspath(args.audio))
- audio, sr = librosa.load(args.audio, sr=None, mono=False)
- slicer = Slicer(
- sr=sr,
- threshold=args.db_thresh,
- min_length=args.min_length,
- min_interval=args.min_interval,
- hop_size=args.hop_size,
- max_sil_kept=args.max_sil_kept,
- )
- chunks = slicer.slice(audio)
- if not os.path.exists(out):
- os.makedirs(out)
- for i, chunk in enumerate(chunks):
- if len(chunk.shape) > 1:
- chunk = chunk.T
- soundfile.write(
- os.path.join(
- out,
- f"%s_%d.wav"
- % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i),
- ),
- chunk,
- sr,
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/FEIMENG/andite-anything-v4.0/README.md b/spaces/FEIMENG/andite-anything-v4.0/README.md
deleted file mode 100644
index 3ad0a2f614ff756cbe290296ec22830d635b40fc..0000000000000000000000000000000000000000
--- a/spaces/FEIMENG/andite-anything-v4.0/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Andite Anything V4.0
-emoji: 💻
-colorFrom: gray
-colorTo: pink
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/ops/upfirdn2d/src/upfirdn2d.cpp b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/ops/upfirdn2d/src/upfirdn2d.cpp
deleted file mode 100644
index 43d0b6783a5b512b55815a291fcac2bebeea31e0..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/ops/upfirdn2d/src/upfirdn2d.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-// from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.cpp
-#include
-
-
-torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
- int up_x, int up_y, int down_x, int down_y,
- int pad_x0, int pad_x1, int pad_y0, int pad_y1);
-
-#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
-
-torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
- int up_x, int up_y, int down_x, int down_y,
- int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
- CHECK_CUDA(input);
- CHECK_CUDA(kernel);
-
- return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
-}
diff --git a/spaces/Fernando22/freegpt-webui/server/backend.py b/spaces/Fernando22/freegpt-webui/server/backend.py
deleted file mode 100644
index fd45b94d916512059e4d1f7850b63de6f9da6320..0000000000000000000000000000000000000000
--- a/spaces/Fernando22/freegpt-webui/server/backend.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import re
-from datetime import datetime
-from g4f import ChatCompletion
-from flask import request, Response, stream_with_context
-from requests import get
-from server.config import special_instructions
-
-
-class Backend_Api:
- def __init__(self, bp, config: dict) -> None:
- """
- Initialize the Backend_Api class.
- :param app: Flask application instance
- :param config: Configuration dictionary
- """
- self.bp = bp
- self.routes = {
- '/backend-api/v2/conversation': {
- 'function': self._conversation,
- 'methods': ['POST']
- }
- }
-
- def _conversation(self):
- """
- Handles the conversation route.
-
- :return: Response object containing the generated conversation stream
- """
- conversation_id = request.json['conversation_id']
-
- try:
- jailbreak = request.json['jailbreak']
- model = request.json['model']
- messages = build_messages(jailbreak)
-
- # Generate response
- response = ChatCompletion.create(
- model=model,
- chatId=conversation_id,
- messages=messages
- )
-
- return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
-
- except Exception as e:
- print(e)
- print(e.__traceback__.tb_next)
-
- return {
- '_action': '_ask',
- 'success': False,
- "error": f"an error occurred {str(e)}"
- }, 400
-
-
-def build_messages(jailbreak):
- """
- Build the messages for the conversation.
-
- :param jailbreak: Jailbreak instruction string
- :return: List of messages for the conversation
- """
- _conversation = request.json['meta']['content']['conversation']
- internet_access = request.json['meta']['content']['internet_access']
- prompt = request.json['meta']['content']['parts'][0]
-
- # Add the existing conversation
- conversation = _conversation
-
- # Add web results if enabled
- if internet_access:
- current_date = datetime.now().strftime("%Y-%m-%d")
- query = f'Current date: {current_date}. ' + prompt["content"]
- search_results = fetch_search_results(query)
- conversation.extend(search_results)
-
- # Add jailbreak instructions if enabled
- if jailbreak_instructions := getJailbreak(jailbreak):
- conversation.extend(jailbreak_instructions)
-
- # Add the prompt
- conversation.append(prompt)
-
- # Reduce conversation size to avoid API Token quantity error
- if len(conversation) > 3:
- conversation = conversation[-4:]
-
- return conversation
-
-
-def fetch_search_results(query):
- """
- Fetch search results for a given query.
-
- :param query: Search query string
- :return: List of search results
- """
- search = get('https://ddg-api.herokuapp.com/search',
- params={
- 'query': query,
- 'limit': 3,
- })
-
- snippets = ""
- for index, result in enumerate(search.json()):
- snippet = f'[{index + 1}] "{result["snippet"]}" URL:{result["link"]}.'
- snippets += snippet
-
- response = "Here are some updated web searches. Use this to improve user response:"
- response += snippets
-
- return [{'role': 'system', 'content': response}]
-
-
-def generate_stream(response, jailbreak):
- """
- Generate the conversation stream.
-
- :param response: Response object from ChatCompletion.create
- :param jailbreak: Jailbreak instruction string
- :return: Generator object yielding messages in the conversation
- """
- if getJailbreak(jailbreak):
- response_jailbreak = ''
- jailbroken_checked = False
- for message in response:
- response_jailbreak += message
- if jailbroken_checked:
- yield message
- else:
- if response_jailbroken_success(response_jailbreak):
- jailbroken_checked = True
- if response_jailbroken_failed(response_jailbreak):
- yield response_jailbreak
- jailbroken_checked = True
- else:
- yield from response
-
-
-def response_jailbroken_success(response: str) -> bool:
- """Check if the response has been jailbroken.
-
- :param response: Response string
- :return: Boolean indicating if the response has been jailbroken
- """
- act_match = re.search(r'ACT:', response, flags=re.DOTALL)
- return bool(act_match)
-
-
-def response_jailbroken_failed(response):
- """
- Check if the response has not been jailbroken.
-
- :param response: Response string
- :return: Boolean indicating if the response has not been jailbroken
- """
- return False if len(response) < 4 else not (response.startswith("GPT:") or response.startswith("ACT:"))
-
-
-def getJailbreak(jailbreak):
- """
- Check if jailbreak instructions are provided.
-
- :param jailbreak: Jailbreak instruction string
- :return: Jailbreak instructions if provided, otherwise None
- """
- if jailbreak != "default":
- special_instructions[jailbreak][0]['content'] += special_instructions['two_responses_instruction']
- if jailbreak in special_instructions:
- special_instructions[jailbreak]
- return special_instructions[jailbreak]
- else:
- return None
- else:
- return None
diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/modules/F0Predictor/F0Predictor.py b/spaces/FrankZxShen/so-vits-svc-models-ba/modules/F0Predictor/F0Predictor.py
deleted file mode 100644
index 69d8a9bd28729e33d092a5af8e2ce544c1330c3b..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/so-vits-svc-models-ba/modules/F0Predictor/F0Predictor.py
+++ /dev/null
@@ -1,16 +0,0 @@
-class F0Predictor(object):
- def compute_f0(self,wav,p_len):
- '''
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length]
- '''
- pass
-
- def compute_f0_uv(self,wav,p_len):
- '''
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
- '''
- pass
\ No newline at end of file
diff --git a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/whisper/utils.py b/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/whisper/utils.py
deleted file mode 100644
index 5dacc173c40bcd6e999d728862e29a968000b12e..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/whisper/utils.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import json
-import os
-import sys
-import zlib
-from typing import Callable, TextIO
-
-system_encoding = sys.getdefaultencoding()
-
-if system_encoding != "utf-8":
- def make_safe(string):
- # replaces any character not representable using the system default encoding with an '?',
- # avoiding UnicodeEncodeError (https://github.com/openai/whisper/discussions/729).
- return string.encode(system_encoding, errors="replace").decode(system_encoding)
-else:
- def make_safe(string):
- # utf-8 can encode any Unicode code point, so no need to do the round-trip encoding
- return string
-
-
-def exact_div(x, y):
- assert x % y == 0
- return x // y
-
-
-def str2bool(string):
- str2val = {"True": True, "False": False}
- if string in str2val:
- return str2val[string]
- else:
- raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
-
-
-def optional_int(string):
- return None if string == "None" else int(string)
-
-
-def optional_float(string):
- return None if string == "None" else float(string)
-
-
-def compression_ratio(text) -> float:
- text_bytes = text.encode("utf-8")
- return len(text_bytes) / len(zlib.compress(text_bytes))
-
-
-def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
- assert seconds >= 0, "non-negative timestamp expected"
- milliseconds = round(seconds * 1000.0)
-
- hours = milliseconds // 3_600_000
- milliseconds -= hours * 3_600_000
-
- minutes = milliseconds // 60_000
- milliseconds -= minutes * 60_000
-
- seconds = milliseconds // 1_000
- milliseconds -= seconds * 1_000
-
- hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
- return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
-
-
-class ResultWriter:
- extension: str
-
- def __init__(self, output_dir: str):
- self.output_dir = output_dir
-
- def __call__(self, result: dict, audio_path: str):
- audio_basename = os.path.basename(audio_path)
- output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
-
- with open(output_path, "w", encoding="utf-8") as f:
- self.write_result(result, file=f)
-
- def write_result(self, result: dict, file: TextIO):
- raise NotImplementedError
-
-
-class WriteTXT(ResultWriter):
- extension: str = "txt"
-
- def write_result(self, result: dict, file: TextIO):
- for segment in result["segments"]:
- print(segment['text'].strip(), file=file, flush=True)
-
-
-class WriteVTT(ResultWriter):
- extension: str = "vtt"
-
- def write_result(self, result: dict, file: TextIO):
- print("WEBVTT\n", file=file)
- for segment in result["segments"]:
- print(
- f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
- f"{segment['text'].strip().replace('-->', '->')}\n",
- file=file,
- flush=True,
- )
-
-
-class WriteSRT(ResultWriter):
- extension: str = "srt"
-
- def write_result(self, result: dict, file: TextIO):
- for i, segment in enumerate(result["segments"], start=1):
- # write srt lines
- print(
- f"{i}\n"
- f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
- f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
- f"{segment['text'].strip().replace('-->', '->')}\n",
- file=file,
- flush=True,
- )
-
-
-class WriteTSV(ResultWriter):
- """
- Write a transcript to a file in TSV (tab-separated values) format containing lines like:
- \t\t
-
- Using integer milliseconds as start and end times means there's no chance of interference from
- an environment setting a language encoding that causes the decimal in a floating point number
- to appear as a comma; also is faster and more efficient to parse & store, e.g., in C++.
- """
- extension: str = "tsv"
-
- def write_result(self, result: dict, file: TextIO):
- print("start", "end", "text", sep="\t", file=file)
- for segment in result["segments"]:
- print(round(1000 * segment['start']), file=file, end="\t")
- print(round(1000 * segment['end']), file=file, end="\t")
- print(segment['text'].strip().replace("\t", " "), file=file, flush=True)
-
-
-class WriteJSON(ResultWriter):
- extension: str = "json"
-
- def write_result(self, result: dict, file: TextIO):
- json.dump(result, file)
-
-
-def get_writer(output_format: str, output_dir: str) -> Callable[[dict, TextIO], None]:
- writers = {
- "txt": WriteTXT,
- "vtt": WriteVTT,
- "srt": WriteSRT,
- "tsv": WriteTSV,
- "json": WriteJSON,
- }
-
- if output_format == "all":
- all_writers = [writer(output_dir) for writer in writers.values()]
-
- def write_all(result: dict, file: TextIO):
- for writer in all_writers:
- writer(result, file)
-
- return write_all
-
- return writers[output_format](output_dir)
-
diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/vdecoder/__init__.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/vdecoder/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/FridaZuley/RVC_HFKawaii/tools/infer_cli.py b/spaces/FridaZuley/RVC_HFKawaii/tools/infer_cli.py
deleted file mode 100644
index bbe0a53c1aac6a8f2d42613d554b2bdd07abea2d..0000000000000000000000000000000000000000
--- a/spaces/FridaZuley/RVC_HFKawaii/tools/infer_cli.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import argparse
-import os
-import sys
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from dotenv import load_dotenv
-from scipy.io import wavfile
-
-from configs.config import Config
-from infer.modules.vc.modules import VC
-
-####
-# USAGE
-#
-# In your Terminal or CMD or whatever
-
-
-def arg_parse() -> tuple:
- parser = argparse.ArgumentParser()
- parser.add_argument("--f0up_key", type=int, default=0)
- parser.add_argument("--input_path", type=str, help="input path")
- parser.add_argument("--index_path", type=str, help="index path")
- parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm")
- parser.add_argument("--opt_path", type=str, help="opt path")
- parser.add_argument("--model_name", type=str, help="store in assets/weight_root")
- parser.add_argument("--index_rate", type=float, default=0.66, help="index rate")
- parser.add_argument("--device", type=str, help="device")
- parser.add_argument("--is_half", type=bool, help="use half -> True")
- parser.add_argument("--filter_radius", type=int, default=3, help="filter radius")
- parser.add_argument("--resample_sr", type=int, default=0, help="resample sr")
- parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate")
- parser.add_argument("--protect", type=float, default=0.33, help="protect")
-
- args = parser.parse_args()
- sys.argv = sys.argv[:1]
-
- return args
-
-
-def main():
- load_dotenv()
- args = arg_parse()
- config = Config()
- config.device = args.device if args.device else config.device
- config.is_half = args.is_half if args.is_half else config.is_half
- vc = VC(config)
- vc.get_vc(args.model_name)
- _, wav_opt = vc.vc_single(
- 0,
- args.input_path,
- args.f0up_key,
- None,
- args.f0method,
- args.index_path,
- None,
- args.index_rate,
- args.filter_radius,
- args.resample_sr,
- args.rms_mix_rate,
- args.protect,
- )
- wavfile.write(args.opt_path, wav_opt[0], wav_opt[1])
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_two_circles.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_two_circles.py
deleted file mode 100644
index 3ee8c0ecc905f82504f17ff3b062dc3de3c5e501..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_two_circles.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import numpy as np
-from cliport.tasks.task import Task
-from cliport.utils import utils
-
-class BuildTwoCircles(Task):
- """Construct two distinct circles on the tabletop using 10 red and 10 blue blocks.
- Each circle should consist of blocks of the same color, with the blue circle larger and surrounding the red circle."""
-
- def __init__(self):
- super().__init__()
- self.max_steps = 30
- self.lang_template = "construct two distinct circles on the tabletop using 6 red and 6 blue blocks"
- self.task_completed_desc = "done building two circles."
-
- def reset(self, env):
- super().reset(env)
-
- # Add blocks.
- block_urdf = 'block/block.urdf'
- block_size = (0.04, 0.04, 0.04)
-
- # Add 6 red blocks.
- red_blocks = []
- red_circle_poses = []
- circle_radius = 0.1
- circle_center = (0, 0, block_size[2] / 2)
- angles = np.linspace(0, 2 * np.pi, 6, endpoint=False)
- circle_pose = ((0.4, 0.3, 0.0), (0, 0, 0, 1)) # fixed pose
- self.add_corner_anchor_for_pose(env, circle_pose)
-
- # Define initial and target poses for the red and blue circles.
- for angle in angles:
- pos = (circle_center[0] + circle_radius * np.cos(angle),
- circle_center[1] + circle_radius * np.sin(angle),
- circle_center[2])
- block_pose = (utils.apply(circle_pose, pos), circle_pose[1])
- block_id = env.add_object(block_urdf, self.get_random_pose(env, block_size), color=utils.COLORS['red'])
- red_circle_poses.append(block_pose)
- red_blocks.append(block_id)
-
- # Add 6 blue blocks.
- blue_blocks = []
- blue_circle_poses = []
- circle_radius = 0.1
- circle_center = (0, 0, block_size[2] / 2)
- circle_pose = ((0.4, -0.3, 0.0), (0,0,0,1)) # fixed pose
- self.add_corner_anchor_for_pose(env, circle_pose)
-
- for angle in angles:
- pos = (circle_center[0] + circle_radius * np.cos(angle),
- circle_center[1] + circle_radius * np.sin(angle),
- circle_center[2])
- block_pose = (utils.apply(circle_pose, pos), circle_pose[1])
- block_id = env.add_object(block_urdf, self.get_random_pose(env, block_size), color=utils.COLORS['blue'])
- blue_circle_poses.append(block_pose)
- blue_blocks.append(block_id)
-
-
- # Goal: each red block is in the red circle, each blue block is in the blue circle.
- self.add_goal(objs=red_blocks, matches=np.ones((6, 6)), targ_poses=red_circle_poses, replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1 / 2, language_goal=self.lang_template)
- self.add_goal(objs=blue_blocks, matches=np.ones((6, 6)), targ_poses=blue_circle_poses, replace=False,
- rotations=True, metric='pose', params=None, step_max_reward=1 / 2, language_goal=self.lang_template)
\ No newline at end of file
diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/model.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/model.py
deleted file mode 100644
index fcb12af85669ab6fd7f79cb14ddbdf80b2fbd83d..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/model.py
+++ /dev/null
@@ -1,678 +0,0 @@
-import math
-import random
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-if torch.cuda.is_available():
- from op.fused_act import FusedLeakyReLU, fused_leaky_relu
- from op.upfirdn2d import upfirdn2d
-else:
- from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu
- from op.upfirdn2d_cpu import upfirdn2d
-
-
-class PixelNorm(nn.Module):
- def __init__(self):
- super().__init__()
-
- def forward(self, input):
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
-
-
-def make_kernel(k):
- k = torch.tensor(k, dtype=torch.float32)
-
- if k.ndim == 1:
- k = k[None, :] * k[:, None]
-
- k /= k.sum()
-
- return k
-
-
-class Upsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel) * (factor ** 2)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
-
- return out
-
-
-class Downsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
-
- return out
-
-
-class Blur(nn.Module):
- def __init__(self, kernel, pad, upsample_factor=1):
- super().__init__()
-
- kernel = make_kernel(kernel)
-
- if upsample_factor > 1:
- kernel = kernel * (upsample_factor ** 2)
-
- self.register_buffer('kernel', kernel)
-
- self.pad = pad
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, pad=self.pad)
-
- return out
-
-
-class EqualConv2d(nn.Module):
- def __init__(
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
- ):
- super().__init__()
-
- self.weight = nn.Parameter(
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
- )
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
-
- self.stride = stride
- self.padding = padding
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_channel))
-
- else:
- self.bias = None
-
- def forward(self, input):
- out = F.conv2d(
- input,
- self.weight * self.scale,
- bias=self.bias,
- stride=self.stride,
- padding=self.padding,
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
- f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
- )
-
-
-class EqualLinear(nn.Module):
- def __init__(
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
- ):
- super().__init__()
-
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
-
- else:
- self.bias = None
-
- self.activation = activation
-
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
- self.lr_mul = lr_mul
-
- def forward(self, input):
- if self.activation:
- out = F.linear(input, self.weight * self.scale)
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
-
- else:
- out = F.linear(
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
- )
-
-
-class ScaledLeakyReLU(nn.Module):
- def __init__(self, negative_slope=0.2):
- super().__init__()
-
- self.negative_slope = negative_slope
-
- def forward(self, input):
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
-
- return out * math.sqrt(2)
-
-
-class ModulatedConv2d(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- demodulate=True,
- upsample=False,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- ):
- super().__init__()
-
- self.eps = 1e-8
- self.kernel_size = kernel_size
- self.in_channel = in_channel
- self.out_channel = out_channel
- self.upsample = upsample
- self.downsample = downsample
-
- if upsample:
- factor = 2
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2 + 1
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
-
- fan_in = in_channel * kernel_size ** 2
- self.scale = 1 / math.sqrt(fan_in)
- self.padding = kernel_size // 2
-
- self.weight = nn.Parameter(
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
- )
-
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
-
- self.demodulate = demodulate
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
- f'upsample={self.upsample}, downsample={self.downsample})'
- )
-
- def forward(self, input, style):
- batch, in_channel, height, width = input.shape
-
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
- weight = self.scale * self.weight * style
-
- if self.demodulate:
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
-
- weight = weight.view(
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
-
- if self.upsample:
- input = input.view(1, batch * in_channel, height, width)
- weight = weight.view(
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
- weight = weight.transpose(1, 2).reshape(
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
- )
- out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
- out = self.blur(out)
-
- elif self.downsample:
- input = self.blur(input)
- _, _, height, width = input.shape
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- else:
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- return out
-
-
-class NoiseInjection(nn.Module):
- def __init__(self):
- super().__init__()
-
- self.weight = nn.Parameter(torch.zeros(1))
-
- def forward(self, image, noise=None):
- if noise is None:
- batch, _, height, width = image.shape
- noise = image.new_empty(batch, 1, height, width).normal_()
-
- return image + self.weight * noise
-
-
-class ConstantInput(nn.Module):
- def __init__(self, channel, size=4):
- super().__init__()
-
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
-
- def forward(self, input):
- batch = input.shape[0]
- out = self.input.repeat(batch, 1, 1, 1)
-
- return out
-
-
-class StyledConv(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=False,
- blur_kernel=[1, 3, 3, 1],
- demodulate=True,
- ):
- super().__init__()
-
- self.conv = ModulatedConv2d(
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=upsample,
- blur_kernel=blur_kernel,
- demodulate=demodulate,
- )
-
- self.noise = NoiseInjection()
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
- # self.activate = ScaledLeakyReLU(0.2)
- self.activate = FusedLeakyReLU(out_channel)
-
- def forward(self, input, style, noise=None):
- out = self.conv(input, style)
- out = self.noise(out, noise=noise)
- # out = out + self.bias
- out = self.activate(out)
-
- return out
-
-
-class ToRGB(nn.Module):
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- if upsample:
- self.upsample = Upsample(blur_kernel)
-
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
-
- def forward(self, input, style, skip=None):
- out = self.conv(input, style)
- out = out + self.bias
-
- if skip is not None:
- skip = self.upsample(skip)
-
- out = out + skip
-
- return out
-
-
-class Generator(nn.Module):
- def __init__(
- self,
- size,
- style_dim,
- n_mlp,
- channel_multiplier=2,
- blur_kernel=[1, 3, 3, 1],
- lr_mlp=0.01,
- ):
- super().__init__()
-
- self.size = size
-
- self.style_dim = style_dim
-
- layers = [PixelNorm()]
-
- for i in range(n_mlp):
- layers.append(
- EqualLinear(
- style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
- )
- )
-
- self.style = nn.Sequential(*layers)
-
- self.channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- self.input = ConstantInput(self.channels[4])
- self.conv1 = StyledConv(
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- )
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
-
- self.log_size = int(math.log(size, 2))
- self.num_layers = (self.log_size - 2) * 2 + 1
-
- self.convs = nn.ModuleList()
- self.upsamples = nn.ModuleList()
- self.to_rgbs = nn.ModuleList()
- self.noises = nn.Module()
-
- in_channel = self.channels[4]
-
- for layer_idx in range(self.num_layers):
- res = (layer_idx + 5) // 2
- shape = [1, 1, 2 ** res, 2 ** res]
- self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
-
- for i in range(3, self.log_size + 1):
- out_channel = self.channels[2 ** i]
-
- self.convs.append(
- StyledConv(
- in_channel,
- out_channel,
- 3,
- style_dim,
- upsample=True,
- blur_kernel=blur_kernel,
- )
- )
-
- self.convs.append(
- StyledConv(
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
- )
- )
-
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
-
- in_channel = out_channel
-
- self.n_latent = self.log_size * 2 - 2
-
- def make_noise(self):
- device = self.input.input.device
-
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
-
- for i in range(3, self.log_size + 1):
- for _ in range(2):
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
-
- return noises
-
- def mean_latent(self, n_latent):
- latent_in = torch.randn(
- n_latent, self.style_dim, device=self.input.input.device
- )
- latent = self.style(latent_in).mean(0, keepdim=True)
-
- return latent
-
- def get_latent(self, input):
- return self.style(input)
-
- def forward(
- self,
- styles,
- return_latents=False,
- return_features=False,
- inject_index=None,
- truncation=1,
- truncation_latent=None,
- input_is_latent=False,
- noise=None,
- randomize_noise=True,
- ):
- if not input_is_latent:
- styles = [self.style(s) for s in styles]
-
- if noise is None:
- if randomize_noise:
- noise = [None] * self.num_layers
- else:
- noise = [
- getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
- ]
-
- if truncation < 1:
- style_t = []
-
- for style in styles:
- style_t.append(
- truncation_latent + truncation * (style - truncation_latent)
- )
-
- styles = style_t
-
- if len(styles) < 2:
- inject_index = self.n_latent
-
- if styles[0].ndim < 3:
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles[0]
-
- else:
- if inject_index is None:
- inject_index = random.randint(1, self.n_latent - 1)
-
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
-
- latent = torch.cat([latent, latent2], 1)
-
- out = self.input(latent)
- out = self.conv1(out, latent[:, 0], noise=noise[0])
-
- skip = self.to_rgb1(out, latent[:, 1])
-
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
- ):
- out = conv1(out, latent[:, i], noise=noise1)
- out = conv2(out, latent[:, i + 1], noise=noise2)
- skip = to_rgb(out, latent[:, i + 2], skip)
-
- i += 2
-
- image = skip
-
- if return_latents:
- return image, latent
- elif return_features:
- return image, out
- else:
- return image, None
-
-
-class ConvLayer(nn.Sequential):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- bias=True,
- activate=True,
- ):
- layers = []
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
-
- stride = 2
- self.padding = 0
-
- else:
- stride = 1
- self.padding = kernel_size // 2
-
- layers.append(
- EqualConv2d(
- in_channel,
- out_channel,
- kernel_size,
- padding=self.padding,
- stride=stride,
- bias=bias and not activate,
- )
- )
-
- if activate:
- if bias:
- layers.append(FusedLeakyReLU(out_channel))
-
- else:
- layers.append(ScaledLeakyReLU(0.2))
-
- super().__init__(*layers)
-
-
-class ResBlock(nn.Module):
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
-
- self.skip = ConvLayer(
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
- )
-
- def forward(self, input):
- out = self.conv1(input)
- out = self.conv2(out)
-
- skip = self.skip(input)
- out = (out + skip) / math.sqrt(2)
-
- return out
-
-
-class Discriminator(nn.Module):
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- convs = [ConvLayer(3, channels[size], 1)]
-
- log_size = int(math.log(size, 2))
-
- in_channel = channels[size]
-
- for i in range(log_size, 2, -1):
- out_channel = channels[2 ** (i - 1)]
-
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
-
- in_channel = out_channel
-
- self.convs = nn.Sequential(*convs)
-
- self.stddev_group = 4
- self.stddev_feat = 1
-
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
- self.final_linear = nn.Sequential(
- EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
- EqualLinear(channels[4], 1),
- )
-
- def forward(self, input):
- out = self.convs(input)
-
- batch, channel, height, width = out.shape
- group = min(batch, self.stddev_group)
- stddev = out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- out = torch.cat([out, stddev], 1)
-
- out = self.final_conv(out)
-
- out = out.view(batch, -1)
- out = self.final_linear(out)
-
- return out
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
deleted file mode 100644
index 452b0fe2d89566a998744d9c7812e550596462e3..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py'
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w40',
- backbone=dict(
- type='HRNet',
- extra=dict(
- stage2=dict(num_channels=(40, 80)),
- stage3=dict(num_channels=(40, 80, 160)),
- stage4=dict(num_channels=(40, 80, 160, 320)))),
- neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_r101_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_r101_fpn_2x_coco.py
deleted file mode 100644
index 6908d3001d89ee3efe2b1e508759fbda94b7bf7a..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_r101_fpn_2x_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './rpn_r50_fpn_2x_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/drive.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/drive.py
deleted file mode 100644
index 3cbfda8ae74bdf26c5aef197ff2866a7c7ad0cfd..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/drive.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os.path as osp
-
-from .builder import DATASETS
-from .custom import CustomDataset
-
-
-@DATASETS.register_module()
-class DRIVEDataset(CustomDataset):
- """DRIVE dataset.
-
- In segmentation map annotation for DRIVE, 0 stands for background, which is
- included in 2 categories. ``reduce_zero_label`` is fixed to False. The
- ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
- '_manual1.png'.
- """
-
- CLASSES = ('background', 'vessel')
-
- PALETTE = [[120, 120, 120], [6, 230, 230]]
-
- def __init__(self, **kwargs):
- super(DRIVEDataset, self).__init__(
- img_suffix='.png',
- seg_map_suffix='_manual1.png',
- reduce_zero_label=False,
- **kwargs)
- assert osp.exists(self.img_dir)
diff --git a/spaces/Gradio-Themes/informativedrawings-sketch-style/README.md b/spaces/Gradio-Themes/informativedrawings-sketch-style/README.md
deleted file mode 100644
index 46d0c0aeb524257ee064f6bc5784c72618a5843a..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Themes/informativedrawings-sketch-style/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: Informativedrawings (sketch style)
-emoji: 📚
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: carolineec/informativedrawings
-tags:
- - track-5
----
-
-# Informativedrawings (sketch style)
-
-A clone of the [carolineec/informativedrawings](https://huggingface.co/spaces/carolineec/informativedrawings) space using the [sketch Gradio style](https://huggingface.co/spaces/gstaff/sketch) by [@gstaff](https://huggingface.co/gstaff).
-
-Submitted for the [Gradio Themes Party](https://huggingface.co/Gradio-Themes).
\ No newline at end of file
diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/modules/transformer.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/modules/transformer.py
deleted file mode 100644
index e69cca829d774d0b8b36c0de9b7924373da81b43..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/audiocraft/modules/transformer.py
+++ /dev/null
@@ -1,747 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Transformer model, with streaming support, xformer attention support
-and easy causal attention with a potentially finite receptive field.
-
-See `StreamingTransformer` for more information.
-
-Unlike regular PyTorch Transformer, we make the hard choice that batches are first.
-"""
-
-import typing as tp
-
-from einops import rearrange
-import torch
-import torch.nn as nn
-from torch.nn import functional as F
-from torch.utils.checkpoint import checkpoint as torch_checkpoint
-from xformers import ops
-
-from .rope import RotaryEmbedding
-from .streaming import StreamingModule
-
-_efficient_attention_backend: str = 'torch'
-
-
-def set_efficient_attention_backend(backend: str = 'torch'):
- # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster).
- global _efficient_attention_backend
- assert _efficient_attention_backend in ['xformers', 'torch']
- _efficient_attention_backend = backend
-
-
-def _get_attention_time_dimension() -> int:
- if _efficient_attention_backend == 'torch':
- return 2
- else:
- return 1
-
-
-def _is_profiled() -> bool:
- # Return true if we are currently running with a xformers profiler activated.
- try:
- from xformers.profiler import profiler
- except ImportError:
- return False
- return profiler._Profiler._CURRENT_PROFILER is not None
-
-
-def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module:
- """Create normalization module for transformer encoder layer.
-
- Args:
- norm_type (str): Normalization method.
- dim (int): Dimension of the normalized layer.
- **kwargs (dict): Additional parameters for normalization layer.
- Returns:
- nn.Module: Normalization module.
- """
- if norm_type == 'layer_norm':
- return nn.LayerNorm(dim, eps=1e-5, **kwargs)
- else:
- raise ValueError(f"Unknown norm type: {norm_type}")
-
-
-def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,
- dtype: torch.dtype = torch.float32) -> torch.Tensor:
- """Create sinusoidal positional embedding, with shape `[B, T, C]`.
-
- Args:
- positions (torch.Tensor): LongTensor of positions.
- dim (int): Dimension of the embedding.
- max_period (float): Maximum period of the cosine/sine functions.
- dtype (torch.dtype or str): dtype to use to generate the embedding.
- Returns:
- torch.Tensor: Sinusoidal positional embedding.
- """
- # We aim for BTC format
- assert dim % 2 == 0
- half_dim = dim // 2
- positions = positions.to(dtype)
- adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)
- max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point
- phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))
- return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)
-
-
-def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
- """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers"""
- if n_rep == 1:
- return x
- if _efficient_attention_backend == 'torch':
- bs, n_kv_heads, slen, head_dim = x.shape
- return (
- x[:, :, None, :, :]
- .expand(bs, n_kv_heads, n_rep, slen, head_dim)
- .reshape(bs, n_kv_heads * n_rep, slen, head_dim)
- )
- else:
- bs, slen, n_kv_heads, head_dim = x.shape
- return (
- x[:, :, :, None, :]
- .expand(bs, slen, n_kv_heads, n_rep, head_dim)
- .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
- )
-
-
-class LayerScale(nn.Module):
- """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
- This rescales diagonaly the residual outputs close to 0, with a learnt scale.
-
- Args:
- channels (int): Number of channels.
- init (float): Initial scale.
- channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`.
- device (torch.device or None): Device on which to initialize the module.
- dtype (torch.dtype or None): dtype to use to initialize the module.
- """
- def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True,
- device=None, dtype=None):
- super().__init__()
- self.channel_last = channel_last
- self.scale = nn.Parameter(
- torch.full((channels,), init,
- requires_grad=True, device=device, dtype=dtype))
-
- def forward(self, x: torch.Tensor):
- if self.channel_last:
- return self.scale * x
- else:
- return self.scale[:, None] * x
-
-
-class StreamingMultiheadAttention(StreamingModule):
- """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation.
-
- Args:
- embed_dim (int): Dimension to project to.
- num_heads (int): Number of heads.
- dropout (float): Dropout level.
- bias (bool): Use bias in projections.
- causal (bool): Causal mask applied automatically.
- past_context (int or None): Receptive field for the causal mask, infinite if None.
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
- memory_efficient (bool): Use xformers based memory efficient attention.
- attention_as_float32 (bool): Perform the attention as float32
- (especially important with memory_efficient as autocast won't do this automatically).
- rope (`RotaryEmbedding` or None): Rope embedding to use.
- cross_attention: Should be true when used as a cross attention.
- All keys and values must be available at once, streaming is only for the queries.
- Cannot be used with `causal` or `rope` (as it wouldn't make sens to
- intepret the time steps in the keys relative to those in the queries).
- safe_streaming (bool): Bug fix, will go away with xformers update.
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product.
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
- device (torch.device or None): Sevice on which to initialize.
- dtype (torch.dtype or None): dtype to use.
- """
- def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True,
- causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False,
- memory_efficient: bool = False, attention_as_float32: bool = False,
- rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False,
- safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1,
- device=None, dtype=None):
- super().__init__()
- factory_kwargs = {'device': device, 'dtype': dtype}
- if past_context is not None:
- assert causal
-
- self.embed_dim = embed_dim
- self.causal = causal
- self.past_context = past_context
- self.memory_efficient = memory_efficient
- self.attention_as_float32 = attention_as_float32
- self.rope = rope
- self.cross_attention = cross_attention
- self.safe_streaming = safe_streaming
- self.num_heads = num_heads
- self.dropout = dropout
- self.kv_repeat = kv_repeat
- if cross_attention:
- assert not causal, "Causal cannot work with cross attention."
- assert rope is None, "Rope cannot work with cross attention."
-
- if memory_efficient:
- _verify_xformers_memory_efficient_compat()
-
- self.custom = _is_custom(custom, memory_efficient)
- if self.custom:
- out_dim = embed_dim
- assert num_heads % kv_repeat == 0
- assert not cross_attention or kv_repeat == 1
- num_kv = num_heads // kv_repeat
- kv_dim = (embed_dim // num_heads) * num_kv
- out_dim += 2 * kv_dim
- in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs)
- # We try to follow the default PyTorch MHA convention, to easily compare results.
- self.in_proj_weight = in_proj.weight
- self.in_proj_bias = in_proj.bias
- if bias:
- self.in_proj_bias.data.zero_() # Following Pytorch convention
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
- if bias:
- self.out_proj.bias.data.zero_()
- else:
- assert not qk_layer_norm
- assert kv_repeat == 1
- self.mha = nn.MultiheadAttention(
- embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True,
- **factory_kwargs)
- self.qk_layer_norm = qk_layer_norm
- if qk_layer_norm:
- assert self.custom
- assert kv_repeat == 1
- ln_dim = embed_dim
- self.q_layer_norm = nn.LayerNorm(ln_dim)
- self.k_layer_norm = nn.LayerNorm(ln_dim)
-
- def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
- if not self.custom:
- # Support compat with regular MHA
- keys = [n for n, _ in self.mha.named_parameters()]
- for key in keys:
- if prefix + key in state_dict:
- state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key)
- super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
-
- def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype):
- # Return a causal mask, accounting for potentially stored past keys/values
- # We actually return a bias for the attention score, as this has the same
- # convention both in the builtin MHA in Pytorch, and Xformers functions.
- time_dim = _get_attention_time_dimension()
- if self.memory_efficient:
- from xformers.ops import LowerTriangularMask
- if current_steps == 1:
- # If we only have one step, then we do not need a mask.
- return None
- elif 'past_keys' in self._streaming_state:
- raise RuntimeError('Not supported at the moment')
- else:
- # Then we can safely use a lower triangular mask
- return LowerTriangularMask()
- if self._streaming_state:
- past_keys = self._streaming_state['past_keys']
- past_steps = past_keys.shape[time_dim]
- else:
- past_steps = 0
-
- queries_pos = torch.arange(
- past_steps, current_steps + past_steps, device=device).view(-1, 1)
- keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1)
- delta = queries_pos - keys_pos
- valid = delta >= 0
- if self.past_context is not None:
- valid &= (delta <= self.past_context)
- return torch.where(
- valid,
- torch.zeros([], device=device, dtype=dtype),
- torch.full([], float('-inf'), device=device, dtype=dtype))
-
- def _complete_kv(self, k, v):
- time_dim = _get_attention_time_dimension()
- if self.cross_attention:
- # With cross attention we assume all keys and values
- # are already available, and streaming is with respect
- # to the queries only.
- return k, v
- # Complete the key/value pair using the streaming state.
- if self._streaming_state:
- pk = self._streaming_state['past_keys']
- nk = torch.cat([pk, k], dim=time_dim)
- if v is k:
- nv = nk
- else:
- pv = self._streaming_state['past_values']
- nv = torch.cat([pv, v], dim=time_dim)
- else:
- nk = k
- nv = v
-
- assert nk.shape[time_dim] == nv.shape[time_dim]
- offset = 0
- if self.past_context is not None:
- offset = max(0, nk.shape[time_dim] - self.past_context)
- if self._is_streaming:
- self._streaming_state['past_keys'] = nk[:, offset:]
- if v is not k:
- self._streaming_state['past_values'] = nv[:, offset:]
- if 'offset' in self._streaming_state:
- self._streaming_state['offset'] += offset
- else:
- self._streaming_state['offset'] = torch.tensor(0)
- return nk, nv
-
- def _apply_rope(self, query: torch.Tensor, key: torch.Tensor):
- # TODO: fix and verify layout.
- assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.'
- # Apply rope embeddings to query and key tensors.
- assert self.rope is not None
- if 'past_keys' in self._streaming_state:
- past_keys_offset = self._streaming_state['past_keys'].shape[1]
- else:
- past_keys_offset = 0
- if 'offset' in self._streaming_state:
- past_context_offset = int(self._streaming_state['offset'].item())
- else:
- past_context_offset = 0
- streaming_offset = past_context_offset + past_keys_offset
- return self.rope.rotate_qk(query, key, start=streaming_offset)
-
- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
- key_padding_mask=None, need_weights=False, attn_mask=None,
- average_attn_weights=True, is_causal=False):
- assert attn_mask is None
- assert not is_causal, ("new param added in torch 2.0.1 not supported, "
- "use the causal args in the constructor.")
-
- time_dim = _get_attention_time_dimension()
- if time_dim == 2:
- layout = "b h t d"
- else:
- layout = "b t h d"
- dtype = query.dtype
- if self._is_streaming:
- assert self.causal or self.cross_attention, \
- "Streaming only available for causal or cross attention"
-
- if self.causal:
- # At the moment we specialize only for the self-attention case.
- assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value"
- assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value"
- attn_mask = self._get_mask(query.shape[1], query.device, query.dtype)
-
- if self.custom:
- # custom implementation
- assert need_weights is False
- assert key_padding_mask is None
- if self.cross_attention:
- # Different queries, keys, values, we have to spit manually the weights
- # before applying the linear.
- dim = self.in_proj_weight.shape[0] // 3
- if self.in_proj_bias is None:
- bias_q, bias_k, bias_v = None, None, None
- else:
- bias_q = self.in_proj_bias[:dim]
- bias_k = self.in_proj_bias[dim: 2 * dim]
- bias_v = self.in_proj_bias[2 * dim:]
- q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q)
- # todo: when streaming, we could actually save k, v and check the shape actually match.
- k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k)
- v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v)
- if self.qk_layer_norm is True:
- q = self.q_layer_norm(q)
- k = self.k_layer_norm(k)
- q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]]
- else:
- if not _is_profiled():
- # profiling breaks that propertysomehow.
- assert query is key, "specialized implementation"
- assert value is key, "specialized implementation"
- projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias)
- if self.kv_repeat == 1:
- if time_dim == 2:
- bound_layout = "b h p t d"
- else:
- bound_layout = "b t p h d"
- packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads)
- q, k, v = ops.unbind(packed, dim=2)
- else:
- embed_dim = self.embed_dim
- per_head_dim = (embed_dim // self.num_heads)
- kv_heads = self.num_heads // self.kv_repeat
- q = projected[:, :, :embed_dim]
- start = embed_dim
- end = start + per_head_dim * kv_heads
- k = projected[:, :, start: end]
- v = projected[:, :, end:]
- q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads)
- k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads)
- v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads)
-
- if self.qk_layer_norm is True:
- assert self.kv_repeat == 1
- q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]]
- q = self.q_layer_norm(q)
- k = self.k_layer_norm(k)
- q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]]
- if self.rope:
- q, k = self._apply_rope(q, k)
- k, v = self._complete_kv(k, v)
- if self.kv_repeat > 1:
- k = expand_repeated_kv(k, self.kv_repeat)
- v = expand_repeated_kv(v, self.kv_repeat)
- if self.attention_as_float32:
- q, k, v = [x.float() for x in [q, k, v]]
- if self.memory_efficient:
- p = self.dropout if self.training else 0
- if _efficient_attention_backend == 'torch':
- x = torch.nn.functional.scaled_dot_product_attention(
- q, k, v, is_causal=attn_mask is not None, dropout_p=p)
- else:
- x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p)
- else:
- # We include the dot product as float32, for consistency
- # with the other implementations that include that step
- # as part of the attention. Note that when using `autocast`,
- # the einsums would be done as bfloat16, but the softmax
- # would be done as bfloat16, so `attention_as_float32` will
- # extend a bit the range of operations done in float32,
- # although this should make no difference.
- q = q / q.shape[-1] ** 0.5
- key_layout = layout.replace('t', 'k')
- query_layout = layout
- if self._is_streaming and self.safe_streaming and q.device.type == 'cuda':
- with torch.autocast(device_type=q.device.type, dtype=torch.float32):
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
- else:
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
- if attn_mask is not None:
- pre_w = pre_w + attn_mask
- w = torch.softmax(pre_w, dim=-1)
- w = F.dropout(w, self.dropout, training=self.training).to(v)
- # Key and value have the same format.
- x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v)
- x = x.to(dtype)
- x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads)
- x = self.out_proj(x)
- else:
- key, value = self._complete_kv(key, value)
- if self.attention_as_float32:
- query, key, value = [x.float() for x in [query, key, value]]
- x, _ = self.mha(
- query, key, value, key_padding_mask,
- need_weights, attn_mask, average_attn_weights)
- x = x.to(dtype)
-
- return x, None
-
-
-class StreamingTransformerLayer(nn.TransformerEncoderLayer):
- """TransformerLayer with Streaming / Causal support.
- This also integrates cross_attention, when passing `cross_attention=True`,
- rather than having two separate classes like in PyTorch.
-
- Args:
- d_model (int): Dimension of the data.
- num_heads (int): Number of heads.
- dim_feedforward (int): Intermediate dimension of FF module.
- dropout (float): Dropout both for MHA and FF.
- bias_ff (bool): Use bias for FF.
- bias_attn (bool): Use bias for MHA.
- causal (bool): Causal mask applied automatically.
- past_context (int or None): Receptive field for the causal mask, infinite if None.
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
- memory_efficient (bool): Use xformers based memory efficient attention.
- attention_as_float32 (bool): Perform the attention as float32
- (especially important with memory_efficient as autocast won't do this automatically).
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention.
- qk_layer_norm_cross (bool): Same for the cross attention.
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
- Cross attention will use the default MHA, as it typically won't require
- special treatment.
- layer_scale (float or None): If not None, LayerScale will be used with
- the given value as initial scale.
- rope (`RotaryEmbedding` or None): Rope embedding to use.
- attention_dropout (float or None): If not None, separate the value of the dimension dropout
- in FFN and of the attention dropout.
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
- device (torch.device or None): Device on which to initialize.
- dtype (torch.dtype or None): dtype to use.
- **kwargs: See `nn.TransformerEncoderLayer`.
- """
- def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1,
- bias_ff: bool = True, bias_attn: bool = True, causal: bool = False,
- past_context: tp.Optional[int] = None, custom: bool = False,
- memory_efficient: bool = False, attention_as_float32: bool = False,
- qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False,
- cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
- rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None,
- kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs):
- super().__init__(d_model, num_heads, dim_feedforward, dropout,
- device=device, dtype=dtype, batch_first=True, **kwargs)
- factory_kwargs = {'device': device, 'dtype': dtype}
- # Redefine self_attn to our streaming multi-head attention
- attn_kwargs: tp.Dict[str, tp.Any] = {
- 'embed_dim': d_model,
- 'num_heads': num_heads,
- 'dropout': dropout if attention_dropout is None else attention_dropout,
- 'bias': bias_attn,
- 'custom': custom,
- 'memory_efficient': memory_efficient,
- 'attention_as_float32': attention_as_float32,
- }
- self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention(
- causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm,
- kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore
- # Redefine feedforward layers to expose bias parameter
- self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs)
- self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs)
-
- self.layer_scale_1: nn.Module
- self.layer_scale_2: nn.Module
- if layer_scale is None:
- self.layer_scale_1 = nn.Identity()
- self.layer_scale_2 = nn.Identity()
- else:
- self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs)
- self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs)
-
- self.cross_attention: tp.Optional[nn.Module] = None
- if cross_attention:
- self.cross_attention = StreamingMultiheadAttention(
- cross_attention=True, qk_layer_norm=qk_layer_norm_cross,
- **attn_kwargs, **factory_kwargs)
- # Norm and dropout
- self.dropout_cross = nn.Dropout(dropout)
- # eps value matching that used in PyTorch reference implementation.
- self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs)
- self.layer_scale_cross: nn.Module
- if layer_scale is None:
- self.layer_scale_cross = nn.Identity()
- else:
- self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs)
- self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
- self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
-
- def _cross_attention_block(self, src: torch.Tensor,
- cross_attention_src: torch.Tensor) -> torch.Tensor:
- assert self.cross_attention is not None
- # queries are from src, keys and values from cross_attention_src.
- x = self.cross_attention(
- src, cross_attention_src, cross_attention_src, need_weights=False)[0]
- return self.dropout_cross(x) # type: ignore
-
- def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore
- src_key_padding_mask: tp.Optional[torch.Tensor] = None,
- cross_attention_src: tp.Optional[torch.Tensor] = None):
- if self.cross_attention is None:
- assert cross_attention_src is None
- else:
- assert cross_attention_src is not None
- x = src
- if self.norm_first:
- x = x + self.layer_scale_1(
- self._sa_block(self.norm1(x), src_mask, src_key_padding_mask))
- if cross_attention_src is not None:
- x = x + self.layer_scale_cross(
- self._cross_attention_block(
- self.norm_cross(x), cross_attention_src))
- x = x + self.layer_scale_2(self._ff_block(self.norm2(x)))
- else:
- x = self.norm1(x + self.layer_scale_1(
- self._sa_block(x, src_mask, src_key_padding_mask)))
- if cross_attention_src is not None:
- x = self.norm_cross(
- x + self.layer_scale_cross(
- self._cross_attention_block(src, cross_attention_src)))
- x = self.norm2(x + self.layer_scale_2(self._ff_block(x)))
- return x
-
-
-class StreamingTransformer(StreamingModule):
- """Transformer with Streaming / Causal support.
-
- Args:
- d_model (int): Dimension of the data.
- num_heads (int): Number of heads.
- dim_feedforward (int): Intermediate dimension of FF module.
- dropout (float): Dropout both for MHA and FF.
- bias_ff (bool): Use bias for FF.
- bias_attn (bool): Use bias for MHA.
- causal (bool): Causal mask applied automatically.
- past_context (int or None): Receptive field for the causal mask, infinite if None.
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
- memory_efficient (bool): Use xformers based memory efficient attention.
- attention_as_float32 (bool): Perform the attention as float32
- (especially important with memory_efficient as autocast won't do this automatically).
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
- layer_scale (float or None): If not None, LayerScale will be used
- with the given value as initial scale.
- positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope).
- max_period (float): Maximum period of the time embedding.
- positional_scale (float): Scale of positional embedding, set to 0 to deactivate.
- xpos (bool): Apply xpos exponential decay to positional embedding (rope only).
- lr (float or None): learning rate override through the `make_optim_group` API.
- weight_decay (float or None): Weight_decay override through the `make_optim_group` API.
- layer_class: (subclass of `StreamingTransformerLayer): class to use
- to initialize the layers, allowing further customization outside of Audiocraft.
- checkpointing (str): Checkpointing strategy to reduce memory usage.
- No checkpointing if set to 'none'. Per layer checkpointing using PyTorch
- if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice,
- minimal memory usage, but maximal runtime). Finally, `xformers_default` provide
- a policy for opting-out some operations of the checkpointing like
- linear layers and attention, providing a middle ground between speed and memory.
- device (torch.device or None): Device on which to initialize.
- dtype (torch.dtype or None): dtype to use.
- **kwargs: See `nn.TransformerEncoderLayer`.
- """
- def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048,
- dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True,
- causal: bool = False, past_context: tp.Optional[int] = None,
- custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False,
- cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
- positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1.,
- xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None,
- layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer,
- checkpointing: str = 'none', device=None, dtype=None, **kwargs):
- super().__init__()
- assert d_model % num_heads == 0
-
- self.positional_embedding = positional_embedding
- self.max_period = max_period
- self.positional_scale = positional_scale
- self.weight_decay = weight_decay
- self.lr = lr
-
- assert positional_embedding in ['sin', 'rope', 'sin_rope']
- self.rope: tp.Optional[RotaryEmbedding] = None
- if self.positional_embedding in ['rope', 'sin_rope']:
- assert _is_custom(custom, memory_efficient)
- self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period,
- xpos=xpos, scale=positional_scale, device=device)
-
- self.checkpointing = checkpointing
-
- assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm']
- if self.checkpointing.startswith('xformers'):
- _verify_xformers_internal_compat()
-
- self.layers = nn.ModuleList()
- for idx in range(num_layers):
- self.layers.append(
- layer_class(
- d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward,
- dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn,
- causal=causal, past_context=past_context, custom=custom,
- memory_efficient=memory_efficient, attention_as_float32=attention_as_float32,
- cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope,
- device=device, dtype=dtype, **kwargs))
-
- if self.checkpointing != 'none':
- for layer in self.layers:
- # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the
- # backward hook inside of FSDP...
- layer._magma_checkpointed = True # type: ignore
- assert layer.layer_drop == 0., "Need further checking" # type: ignore
-
- def _apply_layer(self, layer, *args, **kwargs):
- method = self.checkpointing
- if method == 'none':
- return layer(*args, **kwargs)
- elif method == 'torch':
- return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs)
- elif method.startswith('xformers'):
- from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy
- if method == 'xformers_default':
- # those operations will be saved, and not recomputed.
- # According to Francisco we can get smarter policies but this is a good start.
- allow_list = [
- "xformers.efficient_attention_forward_cutlass.default",
- "xformers_flash.flash_fwd.default",
- "aten.addmm.default",
- "aten.mm.default",
- ]
- elif method == 'xformers_mm':
- # those operations will be saved, and not recomputed.
- # According to Francisco we can get smarter policies but this is a good start.
- allow_list = [
- "aten.addmm.default",
- "aten.mm.default",
- ]
- else:
- raise ValueError(f"xformers checkpointing xformers policy {method} is not known.")
- policy_fn = _get_default_policy(allow_list)
- return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs)
- else:
- raise ValueError(f"Checkpointing method {method} is unknown.")
-
- def forward(self, x: torch.Tensor, *args, **kwargs):
- B, T, C = x.shape
-
- if 'offsets' in self._streaming_state:
- offsets = self._streaming_state['offsets']
- else:
- offsets = torch.zeros(B, dtype=torch.long, device=x.device)
-
- if self.positional_embedding in ['sin', 'sin_rope']:
- positions = torch.arange(T, device=x.device).view(1, -1, 1)
- positions = positions + offsets.view(-1, 1, 1)
- pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype)
- x = x + self.positional_scale * pos_emb
-
- for layer in self.layers:
- x = self._apply_layer(layer, x, *args, **kwargs)
-
- if self._is_streaming:
- self._streaming_state['offsets'] = offsets + T
-
- return x
-
- def make_optim_group(self):
- group = {"params": list(self.parameters())}
- if self.lr is not None:
- group["lr"] = self.lr
- if self.weight_decay is not None:
- group["weight_decay"] = self.weight_decay
- return group
-
-
-# special attention attention related function
-
-def _verify_xformers_memory_efficient_compat():
- try:
- from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa
- except ImportError:
- raise ImportError(
- "xformers is not installed. Please install it and try again.\n"
- "To install on AWS and Azure, run \n"
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
- "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n"
- "To install on FAIR Cluster, run \n"
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
- "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n")
-
-
-def _verify_xformers_internal_compat():
- try:
- from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa
- except ImportError:
- raise ImportError(
- "Francisco's fairinternal xformers is not installed. Please install it and try again.\n"
- "To install on AWS and Azure, run \n"
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
- "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n"
- "To install on FAIR Cluster, run \n"
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
- "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n")
-
-
-def _is_custom(custom: bool, memory_efficient: bool):
- return custom or memory_efficient
diff --git a/spaces/GuyYariv/AudioToken/README.md b/spaces/GuyYariv/AudioToken/README.md
deleted file mode 100644
index 62c6d781b29ee92b1c380611952c8e0a1d5d3288..0000000000000000000000000000000000000000
--- a/spaces/GuyYariv/AudioToken/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AudioToken
-emoji: 🏆
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/s_multimae/wandb_manager.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/s_multimae/wandb_manager.py
deleted file mode 100644
index d4727d6ba769998b80b0f2e6455ccd1b1ec7b5ab..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/s_multimae/wandb_manager.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from typing import Dict, Optional, Union
-import wandb
-from wandb.sdk.wandb_run import Run
-import os
-from wandb.apis.public import Run as ApiRun, Artifact
-
-from .configs.base_config import base_cfg
-
-WANDB_PROJECT_NAME = 'RGBD_SOD'
-
-def wandb_login(cfg: base_cfg) -> None:
- wandb.login(key=cfg.wandb_api_key)
-
-def wandb_init(
- name: str,
- config: Union[Dict, str, None] = None
-) -> Run:
- return wandb.init(
- name=name, project=WANDB_PROJECT_NAME,
- id=name, resume='auto', config=config
- )
-
-def wandb_init_sota_benchmark(
- datasets_set: int,
- additional_name: Optional[str] = None,
-) -> Run:
- name = f'SOTAs_v{datasets_set}'
- if additional_name is not None:
- name += f'_{additional_name}'
- return wandb_init(name, config=dict())
-
-def wandb_init_sota_benchmark_sm(datasets_set: int) -> Run:
- return wandb_init(f'SOTAs_SM_v{datasets_set}', config=dict())
-
-# Not working!
-def wandb_delete_artifacts(cfg: base_cfg, wandb_run: Run) -> None:
- wandb_api = wandb.Api(api_key=cfg.wandb_api_key)
- try:
- run: ApiRun = wandb_api.run(
- os.path.join(
- wandb_run.entity,
- wandb_run.project,
- wandb_run.id,
- )
- )
- for artifact in run.logged_artifacts():
- artifact: Artifact = artifact
- artifact.delete(delete_aliases=True)
- except Exception as e:
- print('[ERROR] wandb_delete_artifacts', e)
\ No newline at end of file
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/model_ema.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/model_ema.py
deleted file mode 100644
index 56825bd15d4a5ee418f93ca130f05c887976d9dc..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/model_ema.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# --------------------------------------------------------
-# Based on timm and MAE-priv code bases
-# https://github.com/rwightman/pytorch-image-models/tree/master/timm
-# https://github.com/BUPT-PRIV/MAE-priv
-# --------------------------------------------------------
-
-""" Exponential Moving Average (EMA) of model updates
-
-Hacked together by / Copyright 2020 Ross Wightman
-"""
-from collections import OrderedDict
-from copy import deepcopy
-
-import torch
-import torch.nn as nn
-
-
-class ModelEma:
- """ Model Exponential Moving Average (DEPRECATED)
-
- Keep a moving average of everything in the model state_dict (parameters and buffers).
- This version is deprecated, it does not work with scripted models. Will be removed eventually.
-
- This is intended to allow functionality like
- https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
-
- A smoothed version of the weights is necessary for some training schemes to perform well.
- E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
- RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
- smoothing of weights to match results. Pay attention to the decay constant you are using
- relative to your update count per epoch.
-
- To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
- disable validation of the EMA weights. Validation will have to be done manually in a separate
- process, or after the training stops converging.
-
- This class is sensitive where it is initialized in the sequence of model init,
- GPU assignment and distributed training wrappers.
- """
-
- def __init__(self, model, decay=0.9999, device='', resume=''):
- # make a copy of the model for accumulating moving average of weights
- self.ema = deepcopy(model)
- self.ema.eval()
- self.decay = decay
- self.device = device # perform ema on different device from model if set
- if device:
- self.ema.to(device=device)
- self.ema_has_module = hasattr(self.ema, 'module')
- if resume:
- self._load_checkpoint(resume)
- for p in self.ema.parameters():
- p.requires_grad_(False)
-
- def _load_checkpoint(self, checkpoint_path):
- checkpoint = torch.load(checkpoint_path, map_location='cpu')
- assert isinstance(checkpoint, dict)
- if 'state_dict_ema' in checkpoint:
- new_state_dict = OrderedDict()
- for k, v in checkpoint['state_dict_ema'].items():
- # ema model may have been wrapped by DataParallel, and need module prefix
- if self.ema_has_module:
- name = 'module.' + k if not k.startswith('module') else k
- else:
- name = k
- new_state_dict[name] = v
- self.ema.load_state_dict(new_state_dict)
- print("Loaded state_dict_ema")
- else:
- print("Failed to find state_dict_ema, starting from loaded model weights")
-
- def update(self, model):
- # correct a mismatch in state dict keys
- needs_module = hasattr(model, 'module') and not self.ema_has_module
- with torch.no_grad():
- msd = model.state_dict()
- for k, ema_v in self.ema.state_dict().items():
- if needs_module:
- k = 'module.' + k
- model_v = msd[k].detach()
- if self.device:
- model_v = model_v.to(device=self.device)
- ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
-
-
-class ModelEmaV2(nn.Module):
- """ Model Exponential Moving Average V2
-
- Keep a moving average of everything in the model state_dict (parameters and buffers).
- V2 of this module is simpler, it does not match params/buffers based on name but simply
- iterates in order. It works with torchscript (JIT of full model).
-
- This is intended to allow functionality like
- https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
-
- A smoothed version of the weights is necessary for some training schemes to perform well.
- E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
- RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
- smoothing of weights to match results. Pay attention to the decay constant you are using
- relative to your update count per epoch.
-
- To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
- disable validation of the EMA weights. Validation will have to be done manually in a separate
- process, or after the training stops converging.
-
- This class is sensitive where it is initialized in the sequence of model init,
- GPU assignment and distributed training wrappers.
- """
-
- def __init__(self, model, decay=0.9999, device=None):
- super(ModelEmaV2, self).__init__()
- # make a copy of the model for accumulating moving average of weights
- self.module = deepcopy(model)
- self.module.eval()
- self.decay = decay
- self.device = device # perform ema on different device from model if set
- if self.device is not None:
- self.module.to(device=device)
-
- def _update(self, model, update_fn):
- with torch.no_grad():
- for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
- if self.device is not None:
- model_v = model_v.to(device=self.device)
- ema_v.copy_(update_fn(ema_v, model_v))
-
- def update(self, model):
- self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
-
- def set(self, model):
- self._update(model, update_fn=lambda e, m: m)
diff --git a/spaces/Hallucinate/demo/k_diffusion/models/__init__.py b/spaces/Hallucinate/demo/k_diffusion/models/__init__.py
deleted file mode 100644
index 82608ff1de6137b31eeaf8de6814df6a7e35606a..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/k_diffusion/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .image_v1 import ImageDenoiserModelV1
diff --git a/spaces/HaloMaster/chinesesummary/fengshen/models/model_utils.py b/spaces/HaloMaster/chinesesummary/fengshen/models/model_utils.py
deleted file mode 100644
index 09ce3e4ab99d661ee9f364c35ea0f987c4e47c93..0000000000000000000000000000000000000000
--- a/spaces/HaloMaster/chinesesummary/fengshen/models/model_utils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from pytorch_lightning import LightningModule
-
-from pytorch_lightning.strategies import DeepSpeedStrategy
-from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
-from transformers.optimization import AdamW, get_scheduler
-
-
-def add_module_args(parent_args):
- parser = parent_args.add_argument_group('Basic Module')
- parser.add_argument('--learning_rate', default=5e-5, type=float)
- parser.add_argument('--weight_decay', default=1e-1, type=float)
- parser.add_argument('--warmup_steps', default=0, type=int)
- parser.add_argument('--warmup_ratio', default=0.1, type=float)
- parser.add_argument('--adam_beta1', default=0.9, type=float)
- parser.add_argument('--adam_beta2', default=0.999, type=float)
- parser.add_argument('--adam_epsilon', default=1e-8, type=float)
- parser.add_argument('--model_path', default=None, type=str)
- parser.add_argument('--scheduler_type', default='polynomial', type=str)
- return parent_args
-
-
-def configure_optimizers(pl_model: LightningModule):
- no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight', 'layer_norm.', 'layernorm.']
- optimizer_grouped_params = [
- {'params': [p for n, p in pl_model.named_parameters() if not any(
- nd in n for nd in no_decay)], 'weight_decay': pl_model.hparams.weight_decay},
- {'params': [p for n, p in pl_model.named_parameters() if any(
- nd in n for nd in no_decay)], 'weight_decay': 0.0}
- ]
- # Configure optimizer.
- if isinstance(pl_model.trainer.strategy, DeepSpeedStrategy):
- if 'offload_optimizer' in pl_model.trainer.training_type_plugin.config['zero_optimization']:
- optimizer = DeepSpeedCPUAdam(
- optimizer_grouped_params, adamw_mode=True,
- lr=pl_model.hparams.learning_rate,
- betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2), eps=pl_model.hparams.adam_epsilon)
- else:
- optimizer = FusedAdam(
- optimizer_grouped_params, adam_w_mode=True,
- lr=pl_model.hparams.learning_rate,
- betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2), eps=pl_model.hparams.adam_epsilon)
- else:
- optimizer = AdamW(optimizer_grouped_params, lr=pl_model.hparams.learning_rate,
- betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2),
- eps=pl_model.hparams.adam_epsilon)
- # Configure learning rate scheduler.
- warmup_steps = pl_model.hparams.warmup_ratio * \
- pl_model.total_steps if pl_model.hparams.warmup_steps == 0 else pl_model.hparams.warmup_steps
- scheduler = get_scheduler(name=pl_model.hparams.scheduler_type, optimizer=optimizer,
- num_warmup_steps=warmup_steps, num_training_steps=pl_model.total_steps)
- scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
- return [optimizer], [scheduler]
-
-
-def get_total_steps(trainer, hparams):
- train_loader = trainer._data_connector._train_dataloader_source.dataloader()
- # Calculate total steps
- if trainer.max_epochs > 0:
- world_size = trainer.world_size
- tb_size = hparams.train_batchsize * max(1, world_size)
- ab_size = trainer.accumulate_grad_batches
- total_steps = (len(train_loader.dataset) *
- trainer.max_epochs // tb_size) // ab_size
- else:
- total_steps = trainer.max_steps
- return total_steps
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_noising.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_noising.py
deleted file mode 100644
index b3d0d123c42eaca6f79371aa268049e668fcfcce..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_noising.py
+++ /dev/null
@@ -1,530 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-from typing import Dict, List
-
-import tests.utils as test_utils
-import torch
-from fairseq import utils
-from fairseq.data import (
- Dictionary,
- LanguagePairDataset,
- TransformEosDataset,
- data_utils,
- noising,
-)
-
-
-class TestDataNoising(unittest.TestCase):
- def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
- """
- Args:
- append_eos: if True, each input sentence in the source tokens tensor
- will have an EOS appended to the end.
-
- Returns:
- vocabs: BPE vocab with continuation markers as suffixes to denote
- non-end of word tokens. This is the standard BPE format used in
- fairseq's preprocessing.
- x: input tensor containing numberized source tokens, with EOS at the
- end if append_eos is true
- src_lengths: and source lengths.
- """
- vocab = Dictionary()
- vocab.add_symbol("he@@")
- vocab.add_symbol("llo")
- vocab.add_symbol("how")
- vocab.add_symbol("are")
- vocab.add_symbol("y@@")
- vocab.add_symbol("ou")
- vocab.add_symbol("n@@")
- vocab.add_symbol("ew")
- vocab.add_symbol("or@@")
- vocab.add_symbol("k")
-
- src_tokens = [
- ["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
- ["how", "are", "y@@", "ou"],
- ]
- x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
- vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
- )
- return vocab, x, src_lengths
-
- def _get_test_data_with_bpe_end_marker(self, append_eos=True):
- """
- Args:
- append_eos: if True, each input sentence in the source tokens tensor
- will have an EOS appended to the end.
-
- Returns:
- vocabs: BPE vocab with end-of-word markers as suffixes to denote
- tokens at the end of a word. This is an alternative to fairseq's
- standard preprocessing framework and is not generally supported
- within fairseq.
- x: input tensor containing numberized source tokens, with EOS at the
- end if append_eos is true
- src_lengths: and source lengths.
- """
- vocab = Dictionary()
- vocab.add_symbol("he")
- vocab.add_symbol("llo_EOW")
- vocab.add_symbol("how_EOW")
- vocab.add_symbol("are_EOW")
- vocab.add_symbol("y")
- vocab.add_symbol("ou_EOW")
- vocab.add_symbol("n")
- vocab.add_symbol("ew_EOW")
- vocab.add_symbol("or")
- vocab.add_symbol("k_EOW")
-
- src_tokens = [
- ["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
- ["how_EOW", "are_EOW", "y", "ou_EOW"],
- ]
- x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
- vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
- )
- return vocab, x, src_lengths
-
- def _get_test_data_with_word_vocab(self, append_eos=True):
- """
- Args:
- append_eos: if True, each input sentence in the source tokens tensor
- will have an EOS appended to the end.
-
- Returns:
- vocabs: word vocab
- x: input tensor containing numberized source tokens, with EOS at the
- end if append_eos is true
- src_lengths: and source lengths.
- """
- vocab = Dictionary()
-
- vocab.add_symbol("hello")
- vocab.add_symbol("how")
- vocab.add_symbol("are")
- vocab.add_symbol("you")
- vocab.add_symbol("new")
- vocab.add_symbol("york")
- src_tokens = [
- ["hello", "new", "york", "you"],
- ["how", "are", "you", "new", "york"],
- ]
- x, src_lengths = self._convert_src_tokens_to_tensor(
- vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
- )
- return vocab, x, src_lengths
-
- def _convert_src_tokens_to_tensor(
- self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
- ):
- src_len = [len(x) for x in src_tokens]
- # If we have to append EOS, we include EOS in counting src length
- if append_eos:
- src_len = [length + 1 for length in src_len]
-
- x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
- for i in range(len(src_tokens)):
- for j in range(len(src_tokens[i])):
- x[i][j] = vocab.index(src_tokens[i][j])
- if append_eos:
- x[i][j + 1] = vocab.eos()
-
- x = x.transpose(1, 0)
- return x, torch.LongTensor(src_len)
-
- def assert_eos_at_end(self, x, x_len, eos):
- """Asserts last token of every sentence in x is EOS """
- for i in range(len(x_len)):
- self.assertEqual(
- x[x_len[i] - 1][i],
- eos,
- (
- "Expected eos (token id {eos}) at the end of sentence {i} "
- "but got {other} instead"
- ).format(i=i, eos=eos, other=x[i][-1]),
- )
-
- def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
- # Expect only the first word (2 bpe tokens) of the first example
- # was dropped out
- self.assertEqual(x_len[0] - 2, l_noised[0])
- for i in range(l_noised[0]):
- self.assertEqual(x_noised[i][0], x[i + 2][0])
-
- def test_word_dropout_with_eos(self):
- vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
-
- with data_utils.numpy_seed(1234):
- noising_gen = noising.WordDropout(vocab)
- x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
- self.assert_word_dropout_correct(
- x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
- )
- self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
-
- def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
- # Expect only the first word (2 bpe tokens) of the first example
- # was blanked out
- self.assertEqual(x_len[0], l_noised[0])
- for i in range(l_noised[0]):
- if i < 2:
- self.assertEqual(x_noised[i][0], unk)
- else:
- self.assertEqual(x_noised[i][0], x[i][0])
-
- def test_word_blank_with_eos(self):
- vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
-
- with data_utils.numpy_seed(1234):
- noising_gen = noising.WordDropout(vocab)
- x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
- self.assert_word_blanking_correct(
- x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
- )
- self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
-
- def generate_unchanged_shuffle_map(self, length):
- return {i: i for i in range(length)}
-
- def assert_word_shuffle_matches_expected(
- self,
- x,
- x_len,
- max_shuffle_distance: int,
- vocab: Dictionary,
- expected_shufle_maps: List[Dict[int, int]],
- expect_eos_at_end: bool,
- bpe_end_marker=None,
- ):
- """
- This verifies that with a given x, x_len, max_shuffle_distance, and
- vocab, we get the expected shuffle result.
-
- Args:
- x: Tensor of shape (T x B) = (sequence_length, batch_size)
- x_len: Tensor of length B = batch_size
- max_shuffle_distance: arg to pass to noising
- expected_shuffle_maps: List[mapping] where mapping is a
- Dict[old_index, new_index], mapping x's elements from their
- old positions in x to their new positions in x.
- expect_eos_at_end: if True, check the output to make sure there is
- an EOS at the end.
- bpe_end_marker: str denoting the BPE end token. If this is not None, we
- set the BPE cont token to None in the noising classes.
- """
- bpe_cont_marker = None
- if bpe_end_marker is None:
- bpe_cont_marker = "@@"
-
- with data_utils.numpy_seed(1234):
- word_shuffle = noising.WordShuffle(
- vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
- )
- x_noised, l_noised = word_shuffle.noising(
- x, x_len, max_shuffle_distance=max_shuffle_distance
- )
-
- # For every example, we have a different expected shuffle map. We check
- # that each example is shuffled as expected according to each
- # corresponding shuffle map.
- for i in range(len(expected_shufle_maps)):
- shuffle_map = expected_shufle_maps[i]
- for k, v in shuffle_map.items():
- self.assertEqual(x[k][i], x_noised[v][i])
-
- # Shuffling should not affect the length of each example
- for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
- self.assertEqual(pre_shuffle_length, post_shuffle_length)
- if expect_eos_at_end:
- self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
-
- def test_word_shuffle_with_eos(self):
- vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
-
- # Assert word shuffle with max shuffle distance 0 causes input to be
- # unchanged
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- max_shuffle_distance=0,
- vocab=vocab,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(example_len)
- for example_len in x_len
- ],
- expect_eos_at_end=True,
- )
-
- # Assert word shuffle with max shuffle distance 3 matches our expected
- # shuffle order
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- vocab=vocab,
- max_shuffle_distance=3,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(x_len[0]),
- {0: 0, 1: 3, 2: 1, 3: 2},
- ],
- expect_eos_at_end=True,
- )
-
- def test_word_shuffle_with_eos_nonbpe(self):
- """The purpose of this is to test shuffling logic with word vocabs"""
- vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
-
- # Assert word shuffle with max shuffle distance 0 causes input to be
- # unchanged
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- max_shuffle_distance=0,
- vocab=vocab,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(example_len)
- for example_len in x_len
- ],
- expect_eos_at_end=True,
- )
-
- # Assert word shuffle with max shuffle distance 3 matches our expected
- # shuffle order
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- vocab=vocab,
- max_shuffle_distance=3,
- expected_shufle_maps=[
- {0: 0, 1: 1, 2: 3, 3: 2},
- {0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
- ],
- expect_eos_at_end=True,
- )
-
- def test_word_shuffle_without_eos(self):
- """Same result as word shuffle with eos except no EOS at end"""
- vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
-
- # Assert word shuffle with max shuffle distance 0 causes input to be
- # unchanged
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- max_shuffle_distance=0,
- vocab=vocab,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(example_len)
- for example_len in x_len
- ],
- expect_eos_at_end=False,
- )
-
- # Assert word shuffle with max shuffle distance 3 matches our expected
- # shuffle order
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- vocab=vocab,
- max_shuffle_distance=3,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(x_len[0]),
- {0: 0, 1: 3, 2: 1, 3: 2},
- ],
- expect_eos_at_end=False,
- )
-
- def test_word_shuffle_without_eos_with_bpe_end_marker(self):
- """Same result as word shuffle without eos except using BPE end token"""
- vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
-
- # Assert word shuffle with max shuffle distance 0 causes input to be
- # unchanged
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- max_shuffle_distance=0,
- vocab=vocab,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(example_len)
- for example_len in x_len
- ],
- expect_eos_at_end=False,
- bpe_end_marker="_EOW",
- )
-
- # Assert word shuffle with max shuffle distance 3 matches our expected
- # shuffle order
- self.assert_word_shuffle_matches_expected(
- x=x,
- x_len=x_len,
- vocab=vocab,
- max_shuffle_distance=3,
- expected_shufle_maps=[
- self.generate_unchanged_shuffle_map(x_len[0]),
- {0: 0, 1: 3, 2: 1, 3: 2},
- ],
- expect_eos_at_end=False,
- bpe_end_marker="_EOW",
- )
-
- def assert_no_eos_at_end(self, x, x_len, eos):
- """Asserts that the last token of each sentence in x is not EOS """
- for i in range(len(x_len)):
- self.assertNotEqual(
- x[x_len[i] - 1][i],
- eos,
- "Expected no eos (token id {eos}) at the end of sentence {i}.".format(
- eos=eos, i=i
- ),
- )
-
- def test_word_dropout_without_eos(self):
- """Same result as word dropout with eos except no EOS at end"""
- vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
-
- with data_utils.numpy_seed(1234):
- noising_gen = noising.WordDropout(vocab)
- x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
- self.assert_word_dropout_correct(
- x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
- )
- self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
-
- def test_word_blank_without_eos(self):
- """Same result as word blank with eos except no EOS at end"""
- vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
-
- with data_utils.numpy_seed(1234):
- noising_gen = noising.WordDropout(vocab)
- x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
- self.assert_word_blanking_correct(
- x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
- )
- self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
-
- def _get_noising_dataset_batch(
- self,
- src_tokens_no_pad,
- src_dict,
- append_eos_to_tgt=False,
- ):
- """
- Constructs a NoisingDataset and the corresponding
- ``LanguagePairDataset(NoisingDataset(src), src)``. If
- *append_eos_to_tgt* is True, wrap the source dataset in
- :class:`TransformEosDataset` to append EOS to the clean source when
- using it as the target.
- """
- src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
-
- noising_dataset = noising.NoisingDataset(
- src_dataset=src_dataset,
- src_dict=src_dict,
- seed=1234,
- max_word_shuffle_distance=3,
- word_dropout_prob=0.2,
- word_blanking_prob=0.2,
- noising_class=noising.UnsupervisedMTNoising,
- )
- tgt = src_dataset
- language_pair_dataset = LanguagePairDataset(
- src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
- )
- language_pair_dataset = TransformEosDataset(
- language_pair_dataset,
- src_dict.eos(),
- append_eos_to_tgt=append_eos_to_tgt,
- )
-
- dataloader = torch.utils.data.DataLoader(
- dataset=language_pair_dataset,
- batch_size=2,
- collate_fn=language_pair_dataset.collater,
- )
- denoising_batch_result = next(iter(dataloader))
- return denoising_batch_result
-
- def test_noising_dataset_with_eos(self):
- src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
- append_eos=True
- )
-
- # Format data for src_dataset
- src_tokens = torch.t(src_tokens)
- src_tokens_no_pad = []
- for src_sentence in src_tokens:
- src_tokens_no_pad.append(
- utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
- )
- denoising_batch_result = self._get_noising_dataset_batch(
- src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
- )
-
- eos, pad = src_dict.eos(), src_dict.pad()
-
- # Generated noisy source as source
- expected_src = torch.LongTensor(
- [[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
- )
- # Original clean source as target (right-padded)
- expected_tgt = torch.LongTensor(
- [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
- )
- generated_src = denoising_batch_result["net_input"]["src_tokens"]
- tgt_tokens = denoising_batch_result["target"]
-
- self.assertTensorEqual(expected_src, generated_src)
- self.assertTensorEqual(expected_tgt, tgt_tokens)
-
- def test_noising_dataset_without_eos(self):
- """
- Similar to test noising dataset with eos except that we have to set
- *append_eos_to_tgt* to ``True``.
- """
-
- src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
- append_eos=False
- )
-
- # Format data for src_dataset
- src_tokens = torch.t(src_tokens)
- src_tokens_no_pad = []
- for src_sentence in src_tokens:
- src_tokens_no_pad.append(
- utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
- )
- denoising_batch_result = self._get_noising_dataset_batch(
- src_tokens_no_pad=src_tokens_no_pad,
- src_dict=src_dict,
- append_eos_to_tgt=True,
- )
-
- eos, pad = src_dict.eos(), src_dict.pad()
-
- # Generated noisy source as source
- expected_src = torch.LongTensor(
- [[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
- )
- # Original clean source as target (right-padded)
- expected_tgt = torch.LongTensor(
- [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
- )
-
- generated_src = denoising_batch_result["net_input"]["src_tokens"]
- tgt_tokens = denoising_batch_result["target"]
-
- self.assertTensorEqual(expected_src, generated_src)
- self.assertTensorEqual(expected_tgt, tgt_tokens)
-
- def assertTensorEqual(self, t1, t2):
- self.assertEqual(t1.size(), t2.size(), "size mismatch")
- self.assertEqual(t1.ne(t2).long().sum(), 0)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/Iceclear/StableSR/StableSR/scripts/sr_val_ddpm_text_T_vqganfin_oldcanvas.py b/spaces/Iceclear/StableSR/StableSR/scripts/sr_val_ddpm_text_T_vqganfin_oldcanvas.py
deleted file mode 100644
index 6429a97c2d82c03d93985ac2de970dc7360da03a..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/scripts/sr_val_ddpm_text_T_vqganfin_oldcanvas.py
+++ /dev/null
@@ -1,351 +0,0 @@
-"""make variations of input image"""
-
-import argparse, os, sys, glob
-import PIL
-import torch
-import numpy as np
-import torchvision
-from omegaconf import OmegaConf
-from PIL import Image
-from tqdm import tqdm, trange
-from itertools import islice
-from einops import rearrange, repeat
-from torchvision.utils import make_grid
-from torch import autocast
-from contextlib import nullcontext
-import time
-from pytorch_lightning import seed_everything
-
-from ldm.util import instantiate_from_config
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.models.diffusion.plms import PLMSSampler
-import math
-import copy
-import torch.nn.functional as F
-import cv2
-from scripts.wavelet_color_fix import wavelet_reconstruction, adaptive_instance_normalization
-
-def space_timesteps(num_timesteps, section_counts):
- """
- Create a list of timesteps to use from an original diffusion process,
- given the number of timesteps we want to take from equally-sized portions
- of the original process.
- For example, if there's 300 timesteps and the section counts are [10,15,20]
- then the first 100 timesteps are strided to be 10 timesteps, the second 100
- are strided to be 15 timesteps, and the final 100 are strided to be 20.
- If the stride is a string starting with "ddim", then the fixed striding
- from the DDIM paper is used, and only one section is allowed.
- :param num_timesteps: the number of diffusion steps in the original
- process to divide up.
- :param section_counts: either a list of numbers, or a string containing
- comma-separated numbers, indicating the step count
- per section. As a special case, use "ddimN" where N
- is a number of steps to use the striding from the
- DDIM paper.
- :return: a set of diffusion steps from the original process to use.
- """
- if isinstance(section_counts, str):
- if section_counts.startswith("ddim"):
- desired_count = int(section_counts[len("ddim"):])
- for i in range(1, num_timesteps):
- if len(range(0, num_timesteps, i)) == desired_count:
- return set(range(0, num_timesteps, i))
- raise ValueError(
- f"cannot create exactly {num_timesteps} steps with an integer stride"
- )
- section_counts = [int(x) for x in section_counts.split(",")] #[250,]
- size_per = num_timesteps // len(section_counts)
- extra = num_timesteps % len(section_counts)
- start_idx = 0
- all_steps = []
- for i, section_count in enumerate(section_counts):
- size = size_per + (1 if i < extra else 0)
- if size < section_count:
- raise ValueError(
- f"cannot divide section of {size} steps into {section_count}"
- )
- if section_count <= 1:
- frac_stride = 1
- else:
- frac_stride = (size - 1) / (section_count - 1)
- cur_idx = 0.0
- taken_steps = []
- for _ in range(section_count):
- taken_steps.append(start_idx + round(cur_idx))
- cur_idx += frac_stride
- all_steps += taken_steps
- start_idx += size
- return set(all_steps)
-
-def chunk(it, size):
- it = iter(it)
- return iter(lambda: tuple(islice(it, size)), ())
-
-
-def load_model_from_config(config, ckpt, verbose=False):
- print(f"Loading model from {ckpt}")
- pl_sd = torch.load(ckpt, map_location="cpu")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
- model = instantiate_from_config(config.model)
- m, u = model.load_state_dict(sd, strict=False)
- if len(m) > 0 and verbose:
- print("missing keys:")
- print(m)
- if len(u) > 0 and verbose:
- print("unexpected keys:")
- print(u)
-
- model.cuda()
- model.eval()
- return model
-
-def load_img(path):
- image = Image.open(path).convert("RGB")
- w, h = image.size
- print(f"loaded input image of size ({w}, {h}) from {path}")
- w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 32
- image = image.resize((w, h), resample=PIL.Image.LANCZOS)
- image = np.array(image).astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return 2.*image - 1.
-
-
-def main():
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--init-img",
- type=str,
- nargs="?",
- help="path to the input image",
- default="inputs/user_upload"
- )
- parser.add_argument(
- "--outdir",
- type=str,
- nargs="?",
- help="dir to write results to",
- default="outputs/user_upload"
- )
- parser.add_argument(
- "--ddpm_steps",
- type=int,
- default=1000,
- help="number of ddpm sampling steps",
- )
- parser.add_argument(
- "--C",
- type=int,
- default=4,
- help="latent channels",
- )
- parser.add_argument(
- "--f",
- type=int,
- default=8,
- help="downsampling factor, most often 8 or 16",
- )
- parser.add_argument(
- "--n_samples",
- type=int,
- default=2,
- help="how many samples to produce for each given prompt. A.k.a batch size",
- )
- parser.add_argument(
- "--config",
- type=str,
- default="configs/stableSRNew/v2-finetune_text_T_512.yaml",
- help="path to config which constructs model",
- )
- parser.add_argument(
- "--ckpt",
- type=str,
- default="./stablesr_000117.ckpt",
- help="path to checkpoint of model",
- )
- parser.add_argument(
- "--vqgan_ckpt",
- type=str,
- default="./vqgan_cfw_00011.ckpt",
- help="path to checkpoint of VQGAN model",
- )
- parser.add_argument(
- "--seed",
- type=int,
- default=42,
- help="the seed (for reproducible sampling)",
- )
- parser.add_argument(
- "--precision",
- type=str,
- help="evaluate at this precision",
- choices=["full", "autocast"],
- default="autocast"
- )
- parser.add_argument(
- "--input_size",
- type=int,
- default=512,
- help="input size",
- )
- parser.add_argument(
- "--dec_w",
- type=float,
- default=0.5,
- help="weight for combining VQGAN and Diffusion",
- )
- parser.add_argument(
- "--tile_overlap",
- type=int,
- default=32,
- help="tile overlap size",
- )
- parser.add_argument(
- "--upscale",
- type=float,
- default=4.0,
- help="upsample scale",
- )
- parser.add_argument(
- "--colorfix_type",
- type=str,
- default="nofix",
- help="Color fix type to adjust the color of HR result according to LR input: adain (used in paper); wavelet; nofix",
- )
-
- opt = parser.parse_args()
- seed_everything(opt.seed)
-
- print('>>>>>>>>>>color correction>>>>>>>>>>>')
- if opt.colorfix_type == 'adain':
- print('Use adain color correction')
- elif opt.colorfix_type == 'wavelet':
- print('Use wavelet color correction')
- else:
- print('No color correction')
- print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-
- config = OmegaConf.load(f"{opt.config}")
- model = load_model_from_config(config, f"{opt.ckpt}")
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
- model = model.to(device)
-
- model.configs = config
-
- vqgan_config = OmegaConf.load("configs/autoencoder/autoencoder_kl_64x64x4_resi.yaml")
- vq_model = load_model_from_config(vqgan_config, opt.vqgan_ckpt)
- vq_model = vq_model.to(device)
- vq_model.decoder.fusion_w = opt.dec_w
-
- os.makedirs(opt.outdir, exist_ok=True)
- outpath = opt.outdir
-
- batch_size = opt.n_samples
-
- img_list_ori = os.listdir(opt.init_img)
- img_list = copy.deepcopy(img_list_ori)
- init_image_list = []
- for item in img_list_ori:
- if os.path.exists(os.path.join(outpath, item)):
- img_list.remove(item)
- continue
- cur_image = load_img(os.path.join(opt.init_img, item)).to(device)
- # max size: 1800 x 1800 for V100
- cur_image = F.interpolate(
- cur_image,
- size=(int(cur_image.size(-2)*opt.upscale),
- int(cur_image.size(-1)*opt.upscale)),
- mode='bicubic',
- )
- init_image_list.append(cur_image)
-
- model.register_schedule(given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=0.00085, linear_end=0.0120, cosine_s=8e-3)
- model.num_timesteps = 1000
-
- sqrt_alphas_cumprod = copy.deepcopy(model.sqrt_alphas_cumprod)
- sqrt_one_minus_alphas_cumprod = copy.deepcopy(model.sqrt_one_minus_alphas_cumprod)
-
- use_timesteps = set(space_timesteps(1000, [opt.ddpm_steps]))
- last_alpha_cumprod = 1.0
- new_betas = []
- timestep_map = []
- for i, alpha_cumprod in enumerate(model.alphas_cumprod):
- if i in use_timesteps:
- new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
- last_alpha_cumprod = alpha_cumprod
- timestep_map.append(i)
- new_betas = [beta.data.cpu().numpy() for beta in new_betas]
- model.register_schedule(given_betas=np.array(new_betas), timesteps=len(new_betas))
- model.num_timesteps = 1000
- model.ori_timesteps = list(use_timesteps)
- model.ori_timesteps.sort()
- model = model.to(device)
-
- precision_scope = autocast if opt.precision == "autocast" else nullcontext
- with torch.no_grad():
- with precision_scope("cuda"):
- with model.ema_scope():
- tic = time.time()
- all_samples = list()
- for n in trange(len(init_image_list), desc="Sampling"):
- init_image = init_image_list[n]
- init_image = init_image.clamp(-1.0, 1.0)
- ori_size = None
-
- print('>>>>>>>>>>>>>>>>>>>>>>>')
- print(init_image.size())
-
- if init_image.size(-1) < opt.input_size or init_image.size(-2) < opt.input_size:
- ori_size = init_image.size()
- new_h = max(ori_size[-2], opt.input_size)
- new_w = max(ori_size[-1], opt.input_size)
- init_template = torch.zeros(1, init_image.size(1), new_h, new_w).to(init_image.device)
- init_template[:, :, :ori_size[-2], :ori_size[-1]] = init_image
- else:
- init_template = init_image
-
- init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_template)) # move to latent space
- text_init = ['']*opt.n_samples
- semantic_c = model.cond_stage_model(text_init)
-
- noise = torch.randn_like(init_latent)
- # If you would like to start from the intermediate steps, you can add noise to LR to the specific steps.
- t = repeat(torch.tensor([999]), '1 -> b', b=init_image.size(0))
- t = t.to(device).long()
- x_T = model.q_sample_respace(x_start=init_latent, t=t, sqrt_alphas_cumprod=sqrt_alphas_cumprod, sqrt_one_minus_alphas_cumprod=sqrt_one_minus_alphas_cumprod, noise=noise)
- # x_T = noise
-
- samples, _ = model.sample_canvas(cond=semantic_c, struct_cond=init_latent, batch_size=init_image.size(0), timesteps=opt.ddpm_steps, time_replace=opt.ddpm_steps, x_T=x_T, return_intermediates=True, tile_size=int(opt.input_size/8), tile_overlap=opt.tile_overlap, batch_size_sample=opt.n_samples)
- _, enc_fea_lq = vq_model.encode(init_template)
- x_samples = vq_model.decode(samples * 1. / model.scale_factor, enc_fea_lq)
- if ori_size is not None:
- x_samples = x_samples[:, :, :ori_size[-2], :ori_size[-1]]
- if opt.colorfix_type == 'adain':
- x_samples = adaptive_instance_normalization(x_samples, init_image)
- elif opt.colorfix_type == 'wavelet':
- x_samples = wavelet_reconstruction(x_samples, init_image)
- x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
-
- for i in range(init_image.size(0)):
- img_name = img_list.pop(0)
- basename = os.path.splitext(os.path.basename(img_name))[0]
- x_sample = 255. * rearrange(x_samples[i].cpu().numpy(), 'c h w -> h w c')
- Image.fromarray(x_sample.astype(np.uint8)).save(
- os.path.join(outpath, basename+'.png'))
- init_image = torch.clamp((init_image + 1.0) / 2.0, min=0.0, max=1.0)
- init_image = 255. * rearrange(init_image[i].cpu().numpy(), 'c h w -> h w c')
- Image.fromarray(init_image.astype(np.uint8)).save(
- os.path.join(outpath, basename+'_lq.png'))
-
- toc = time.time()
-
- print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
- f" \nEnjoy.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/losses.py b/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/losses.py
deleted file mode 100644
index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000
--- a/spaces/Ikaros521/VITS-fast-fine-tuning_nymph/losses.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import commons
-
-
-def feature_loss(fmap_r, fmap_g):
- loss = 0
- for dr, dg in zip(fmap_r, fmap_g):
- for rl, gl in zip(dr, dg):
- rl = rl.float().detach()
- gl = gl.float()
- loss += torch.mean(torch.abs(rl - gl))
-
- return loss * 2
-
-
-def discriminator_loss(disc_real_outputs, disc_generated_outputs):
- loss = 0
- r_losses = []
- g_losses = []
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
- dr = dr.float()
- dg = dg.float()
- r_loss = torch.mean((1-dr)**2)
- g_loss = torch.mean(dg**2)
- loss += (r_loss + g_loss)
- r_losses.append(r_loss.item())
- g_losses.append(g_loss.item())
-
- return loss, r_losses, g_losses
-
-
-def generator_loss(disc_outputs):
- loss = 0
- gen_losses = []
- for dg in disc_outputs:
- dg = dg.float()
- l = torch.mean((1-dg)**2)
- gen_losses.append(l)
- loss += l
-
- return loss, gen_losses
-
-
-def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
- """
- z_p, logs_q: [b, h, t_t]
- m_p, logs_p: [b, h, t_t]
- """
- z_p = z_p.float()
- logs_q = logs_q.float()
- m_p = m_p.float()
- logs_p = logs_p.float()
- z_mask = z_mask.float()
-
- kl = logs_p - logs_q - 0.5
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
- kl = torch.sum(kl * z_mask)
- l = kl / torch.sum(z_mask)
- return l
diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/modeling/common.py b/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/modeling/common.py
deleted file mode 100644
index 2bf15236a3eb24d8526073bc4fa2b274cccb3f96..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/modeling/common.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-
-from typing import Type
-
-
-class MLPBlock(nn.Module):
- def __init__(
- self,
- embedding_dim: int,
- mlp_dim: int,
- act: Type[nn.Module] = nn.GELU,
- ) -> None:
- super().__init__()
- self.lin1 = nn.Linear(embedding_dim, mlp_dim)
- self.lin2 = nn.Linear(mlp_dim, embedding_dim)
- self.act = act()
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- return self.lin2(self.act(self.lin1(x)))
-
-
-# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
-# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
-class LayerNorm2d(nn.Module):
- def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
- super().__init__()
- self.weight = nn.Parameter(torch.ones(num_channels))
- self.bias = nn.Parameter(torch.zeros(num_channels))
- self.eps = eps
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- u = x.mean(1, keepdim=True)
- s = (x - u).pow(2).mean(1, keepdim=True)
- x = (x - u) / torch.sqrt(s + self.eps)
- x = self.weight[:, None, None] * x + self.bias[:, None, None]
- return x
diff --git a/spaces/IoMa/stable-diffusion-webui-cpu/README.md b/spaces/IoMa/stable-diffusion-webui-cpu/README.md
deleted file mode 100644
index 4764e29b128c0eb403d4873ca8c8f6eb485132d0..0000000000000000000000000000000000000000
--- a/spaces/IoMa/stable-diffusion-webui-cpu/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Stable Diffusion Webui on Cpu
-emoji: 🏃
-colorFrom: pink
-colorTo: purple
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-python_version: 3.10.6
-duplicated_from: DreamSunny/stable-diffusion-webui-cpu
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jamkonams/AutoGPT/autogpt/agent/agent_manager.py b/spaces/Jamkonams/AutoGPT/autogpt/agent/agent_manager.py
deleted file mode 100644
index 898767a485e50b5e62625a7883edf1b30d5fddf9..0000000000000000000000000000000000000000
--- a/spaces/Jamkonams/AutoGPT/autogpt/agent/agent_manager.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""Agent manager for managing GPT agents"""
-from __future__ import annotations
-
-from typing import Union
-
-from autogpt.config.config import Singleton
-from autogpt.llm_utils import create_chat_completion
-
-
-class AgentManager(metaclass=Singleton):
- """Agent manager for managing GPT agents"""
-
- def __init__(self):
- self.next_key = 0
- self.agents = {} # key, (task, full_message_history, model)
-
- # Create new GPT agent
- # TODO: Centralise use of create_chat_completion() to globally enforce token limit
-
- def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
- """Create a new agent and return its key
-
- Args:
- task: The task to perform
- prompt: The prompt to use
- model: The model to use
-
- Returns:
- The key of the new agent
- """
- messages = [
- {"role": "user", "content": prompt},
- ]
-
- # Start GPT instance
- agent_reply = create_chat_completion(
- model=model,
- messages=messages,
- )
-
- # Update full message history
- messages.append({"role": "assistant", "content": agent_reply})
-
- key = self.next_key
- # This is done instead of len(agents) to make keys unique even if agents
- # are deleted
- self.next_key += 1
-
- self.agents[key] = (task, messages, model)
-
- return key, agent_reply
-
- def message_agent(self, key: str | int, message: str) -> str:
- """Send a message to an agent and return its response
-
- Args:
- key: The key of the agent to message
- message: The message to send to the agent
-
- Returns:
- The agent's response
- """
- task, messages, model = self.agents[int(key)]
-
- # Add user message to message history before sending to agent
- messages.append({"role": "user", "content": message})
-
- # Start GPT instance
- agent_reply = create_chat_completion(
- model=model,
- messages=messages,
- )
-
- # Update full message history
- messages.append({"role": "assistant", "content": agent_reply})
-
- return agent_reply
-
- def list_agents(self) -> list[tuple[str | int, str]]:
- """Return a list of all agents
-
- Returns:
- A list of tuples of the form (key, task)
- """
-
- # Return a list of agent keys and their tasks
- return [(key, task) for key, (task, _, _) in self.agents.items()]
-
- def delete_agent(self, key: Union[str, int]) -> bool:
- """Delete an agent from the agent manager
-
- Args:
- key: The key of the agent to delete
-
- Returns:
- True if successful, False otherwise
- """
-
- try:
- del self.agents[int(key)]
- return True
- except KeyError:
- return False
diff --git a/spaces/JeffJing/ZookChatBot/steamship/data/plugin/plugin_instance.py b/spaces/JeffJing/ZookChatBot/steamship/data/plugin/plugin_instance.py
deleted file mode 100644
index 39d171c1b0e5d966d5f8d7ab12e2e696310d3f18..0000000000000000000000000000000000000000
--- a/spaces/JeffJing/ZookChatBot/steamship/data/plugin/plugin_instance.py
+++ /dev/null
@@ -1,184 +0,0 @@
-from __future__ import annotations
-
-from typing import Any, Dict, Optional, Type, Union
-
-from pydantic import BaseModel, Field
-
-from steamship.base import Task
-from steamship.base.client import Client
-from steamship.base.model import CamelModel
-from steamship.base.request import DeleteRequest, IdentifierRequest, Request
-from steamship.data.block import Block
-from steamship.data.file import File
-from steamship.data.operations.generator import GenerateRequest, GenerateResponse
-from steamship.data.operations.tagger import TagRequest, TagResponse
-from steamship.data.plugin import (
- HostingCpu,
- HostingEnvironment,
- HostingMemory,
- HostingTimeout,
- HostingType,
-)
-from steamship.plugin.inputs.export_plugin_input import ExportPluginInput
-from steamship.plugin.inputs.training_parameter_plugin_input import TrainingParameterPluginInput
-from steamship.plugin.outputs.train_plugin_output import TrainPluginOutput
-from steamship.plugin.outputs.training_parameter_plugin_output import TrainingParameterPluginOutput
-
-
-class CreatePluginInstanceRequest(Request):
- id: str = None
- plugin_id: str = None
- plugin_handle: str = None
- plugin_version_id: str = None
- plugin_version_handle: str = None
- handle: str = None
- fetch_if_exists: bool = None
- config: Dict[str, Any] = None
-
-
-SIGNED_URL_EXPORTER_INSTANCE_HANDLE = "signed-url-exporter-1.0"
-
-
-class PluginInstance(CamelModel):
- client: Client = Field(None, exclude=True)
- id: str = None
- handle: str = None
- plugin_id: str = None
- plugin_version_id: str = None
- plugin_handle: Optional[str] = None
- plugin_version_handle: Optional[str] = None
- workspace_id: Optional[str] = None
- user_id: str = None
- config: Dict[str, Any] = None
- hosting_type: Optional[HostingType] = None
- hosting_cpu: Optional[HostingCpu] = None
- hosting_memory: Optional[HostingMemory] = None
- hosting_timeout: Optional[HostingTimeout] = None
- hosting_environment: Optional[HostingEnvironment] = None
-
- @classmethod
- def parse_obj(cls: Type[BaseModel], obj: Any) -> BaseModel:
- # TODO (enias): This needs to be solved at the engine side
- obj = obj["pluginInstance"] if "pluginInstance" in obj else obj
- return super().parse_obj(obj)
-
- @staticmethod
- def create(
- client: Client,
- plugin_id: str = None,
- plugin_handle: str = None,
- plugin_version_id: str = None,
- plugin_version_handle: str = None,
- handle: str = None,
- fetch_if_exists: bool = True,
- config: Dict[str, Any] = None,
- ) -> PluginInstance:
- """Create a plugin instance
-
- When handle is empty the engine will automatically assign one
- fetch_if_exists controls whether we want to re-use an existing plugin instance or not."""
- req = CreatePluginInstanceRequest(
- handle=handle,
- plugin_id=plugin_id,
- plugin_handle=plugin_handle,
- plugin_version_id=plugin_version_id,
- plugin_version_handle=plugin_version_handle,
- fetch_if_exists=fetch_if_exists,
- config=config,
- )
-
- return client.post("plugin/instance/create", payload=req, expect=PluginInstance)
-
- @staticmethod
- def get(client: Client, handle: str) -> PluginInstance:
- return client.post(
- "plugin/instance/get", IdentifierRequest(handle=handle), expect=PluginInstance
- )
-
- def tag(
- self,
- doc: Union[str, File],
- ) -> Task[
- TagResponse
- ]: # TODO (enias): Should we remove this helper function in favor of always working with files?
- req = TagRequest(
- type="inline",
- file=File(blocks=[Block(text=doc)]) if isinstance(doc, str) else doc,
- plugin_instance=self.handle,
- )
- return self.client.post(
- "plugin/instance/tag",
- req,
- expect=TagResponse,
- )
-
- def generate(
- self,
- input_file_id: str = None,
- input_file_start_block_index: int = None,
- input_file_end_block_index: Optional[int] = None,
- text: Optional[str] = None,
- # bytes: Optional[bytes] = None, [Not yet implemented]
- block_query: Optional[str] = None,
- # url: Optional[str] = None, [Not yet implemented]
- append_output_to_file: bool = False,
- output_file_id: Optional[str] = None,
- options: Optional[dict] = None,
- ):
- req = GenerateRequest(
- plugin_instance=self.handle,
- input_file_id=input_file_id,
- input_file_start_block_index=input_file_start_block_index,
- input_file_end_block_index=input_file_end_block_index,
- text=text,
- # bytes=bytes,
- block_query=block_query,
- # url=url,
- append_output_to_file=append_output_to_file,
- output_file_id=output_file_id,
- options=options,
- )
- return self.client.post("plugin/instance/generate", req, expect=GenerateResponse)
-
- def delete(self) -> PluginInstance:
- req = DeleteRequest(id=self.id)
- return self.client.post("plugin/instance/delete", payload=req, expect=PluginInstance)
-
- def train(
- self,
- training_request: TrainingParameterPluginInput = None,
- training_epochs: Optional[int] = None,
- export_query: Optional[str] = None,
- testing_holdout_percent: Optional[float] = None,
- test_split_seed: Optional[int] = None,
- training_params: Optional[Dict] = None,
- inference_params: Optional[Dict] = None,
- ) -> Task[TrainPluginOutput]:
- """Train a plugin instance. Please provide either training_request OR the other parameters; passing
- training_request ignores all other parameters, but is kept for backwards compatibility.
- """
- input_params = training_request or TrainingParameterPluginInput(
- plugin_instance=self.handle,
- training_epochs=training_epochs,
- export_plugin_input=ExportPluginInput(
- plugin_instance=SIGNED_URL_EXPORTER_INSTANCE_HANDLE, type="file", query=export_query
- ),
- testing_holdout_percent=testing_holdout_percent,
- test_split_seed=test_split_seed,
- training_params=training_params,
- inference_params=inference_params,
- )
- return self.client.post(
- "plugin/instance/train",
- payload=input_params,
- expect=TrainPluginOutput,
- )
-
- def get_training_parameters(
- self, training_request: TrainingParameterPluginInput
- ) -> TrainingParameterPluginOutput:
- return self.client.post(
- "plugin/instance/getTrainingParameters",
- payload=training_request,
- expect=TrainingParameterPluginOutput,
- )
diff --git a/spaces/JingyeChen22/TextDiffuser/README.md b/spaces/JingyeChen22/TextDiffuser/README.md
deleted file mode 100644
index 19b2234b837811a69a2d73b0aad5c14586f93c2e..0000000000000000000000000000000000000000
--- a/spaces/JingyeChen22/TextDiffuser/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-sdk: gradio
-app_file: app.py
-pinned: false
----
diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/models/efficient_net/__init__.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/models/efficient_net/__init__.py
deleted file mode 100644
index 2b529dfe3f61da71f7427fbeb7ab47710450d372..0000000000000000000000000000000000000000
--- a/spaces/JohnnyPittt/audio-styling/deepafx_st/models/efficient_net/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-__version__ = "0.7.1"
-from .model import EfficientNet, VALID_MODELS
-from .utils import (
- GlobalParams,
- BlockArgs,
- BlockDecoder,
- efficientnet,
- get_model_params,
-)
diff --git a/spaces/Jonni/01-3DModel_Gradio/README.md b/spaces/Jonni/01-3DModel_Gradio/README.md
deleted file mode 100644
index 00e528d8f0f397fc7b2746447c6054dbe9918e0f..0000000000000000000000000000000000000000
--- a/spaces/Jonni/01-3DModel_Gradio/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 01-3DModel Gradio
-emoji: 🦆🧊
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Juno360219/Gg/README.md b/spaces/Juno360219/Gg/README.md
deleted file mode 100644
index 6a09749a32375d896ad76db34cd7c82bd819b10d..0000000000000000000000000000000000000000
--- a/spaces/Juno360219/Gg/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Gg
-emoji: 📚
-colorFrom: yellow
-colorTo: gray
-sdk: gradio
-sdk_version: 3.28.1
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/onnx_inference.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/onnx_inference.py
deleted file mode 100644
index 6633659fc83b19d82611d3c9cc840e9c547734d0..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/onnx_inference.py
+++ /dev/null
@@ -1,149 +0,0 @@
-import librosa
-import numpy as np
-import onnxruntime
-import soundfile
-
-import logging
-
-logger = logging.getLogger(__name__)
-
-
-class ContentVec:
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
- logger.info("Load model(s) from {}".format(vec_path))
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- elif device == "dml":
- providers = ["DmlExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
-
- def __call__(self, wav):
- return self.forward(wav)
-
- def forward(self, wav):
- feats = wav
- if feats.ndim == 2: # double channels
- feats = feats.mean(-1)
- assert feats.ndim == 1, feats.ndim
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
- onnx_input = {self.model.get_inputs()[0].name: feats}
- logits = self.model.run(None, onnx_input)[0]
- return logits.transpose(0, 2, 1)
-
-
-def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
- if f0_predictor == "pm":
- from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
-
- f0_predictor_object = PMF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "harvest":
- from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
- HarvestF0Predictor,
- )
-
- f0_predictor_object = HarvestF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "dio":
- from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
-
- f0_predictor_object = DioF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- else:
- raise Exception("Unknown f0 predictor")
- return f0_predictor_object
-
-
-class OnnxRVC:
- def __init__(
- self,
- model_path,
- sr=40000,
- hop_size=512,
- vec_path="vec-768-layer-12",
- device="cpu",
- ):
- vec_path = f"pretrained/{vec_path}.onnx"
- self.vec_model = ContentVec(vec_path, device)
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- elif device == "dml":
- providers = ["DmlExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
- self.sampling_rate = sr
- self.hop_size = hop_size
-
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
- onnx_input = {
- self.model.get_inputs()[0].name: hubert,
- self.model.get_inputs()[1].name: hubert_length,
- self.model.get_inputs()[2].name: pitch,
- self.model.get_inputs()[3].name: pitchf,
- self.model.get_inputs()[4].name: ds,
- self.model.get_inputs()[5].name: rnd,
- }
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
-
- def inference(
- self,
- raw_path,
- sid,
- f0_method="dio",
- f0_up_key=0,
- pad_time=0.5,
- cr_threshold=0.02,
- ):
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- f0_predictor = get_f0_predictor(
- f0_method,
- hop_length=self.hop_size,
- sampling_rate=self.sampling_rate,
- threshold=cr_threshold,
- )
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
- org_length = len(wav)
- if org_length / sr > 50.0:
- raise RuntimeError("Reached Max Length")
-
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
- wav16k = wav16k
-
- hubert = self.vec_model(wav16k)
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
- hubert_length = hubert.shape[1]
-
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
- pitchf = pitchf * 2 ** (f0_up_key / 12)
- pitch = pitchf.copy()
- f0_mel = 1127 * np.log(1 + pitch / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- pitch = np.rint(f0_mel).astype(np.int64)
-
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
- pitch = pitch.reshape(1, len(pitch))
- ds = np.array([sid]).astype(np.int64)
-
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
- hubert_length = np.array([hubert_length]).astype(np.int64)
-
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
- return out_wav[0:org_length]
diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/models/autoencoder.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/models/autoencoder.py
deleted file mode 100644
index 0cb528cb477d7ddd177f735e615d47ad78161008..0000000000000000000000000000000000000000
--- a/spaces/Kayson/InstructDiffusion/stable_diffusion/ldm/models/autoencoder.py
+++ /dev/null
@@ -1,449 +0,0 @@
-# --------------------------------------------------------
-# Stable-Diffusion-Torch
-# Based on Stable-Diffusion (https://github.com/CompVis/stable-diffusion)
-# Removed Pytorch-lightning by Zigang Geng (zigang@mail.ustc.edu.cn)
-# --------------------------------------------------------
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from contextlib import contextmanager
-
-from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
-
-from ldm.modules.diffusionmodules.model import Encoder, Decoder
-from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
-
-from ldm.util import instantiate_from_config
-
-
-class VQModel(nn.Module):
- def __init__(self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- batch_resize_range=None,
- scheduler_config=None,
- lr_g_factor=1.0,
- remap=None,
- sane_index_shape=False, # tell vector quantizer to return indices as bhw
- use_ema=False
- ):
- super().__init__()
- self.embed_dim = embed_dim
- self.n_embed = n_embed
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
- remap=remap,
- sane_index_shape=sane_index_shape)
- self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
- self.batch_resize_range = batch_resize_range
- if self.batch_resize_range is not None:
- print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
-
- self.use_ema = use_ema
- if self.use_ema:
- self.model_ema = LitEma(self)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
-
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
- self.scheduler_config = scheduler_config
- self.lr_g_factor = lr_g_factor
-
- @contextmanager
- def ema_scope(self, context=None):
- if self.use_ema:
- self.model_ema.store(self.parameters())
- self.model_ema.copy_to(self)
- if context is not None:
- print(f"{context}: Switched to EMA weights")
- try:
- yield None
- finally:
- if self.use_ema:
- self.model_ema.restore(self.parameters())
- if context is not None:
- print(f"{context}: Restored training weights")
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- missing, unexpected = self.load_state_dict(sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys: {missing}")
- print(f"Unexpected Keys: {unexpected}")
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self)
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- quant, emb_loss, info = self.quantize(h)
- return quant, emb_loss, info
-
- def encode_to_prequant(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode(self, quant):
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
- def decode_code(self, code_b):
- quant_b = self.quantize.embed_code(code_b)
- dec = self.decode(quant_b)
- return dec
-
- def forward(self, input, return_pred_indices=False):
- quant, diff, (_,_,ind) = self.encode(input)
- dec = self.decode(quant)
- if return_pred_indices:
- return dec, diff, ind
- return dec, diff
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- if self.batch_resize_range is not None:
- lower_size = self.batch_resize_range[0]
- upper_size = self.batch_resize_range[1]
- if self.global_step <= 4:
- # do the first few batches with max size to avoid later oom
- new_resize = upper_size
- else:
- new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
- if new_resize != x.shape[2]:
- x = F.interpolate(x, size=new_resize, mode="bicubic")
- x = x.detach()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- # https://github.com/pytorch/pytorch/issues/37142
- # try not to fool the heuristics
- x = self.get_input(batch, self.image_key)
- xrec, qloss, ind = self(x, return_pred_indices=True)
-
- if optimizer_idx == 0:
- # autoencode
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train",
- predicted_indices=ind)
-
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
- return aeloss
-
- if optimizer_idx == 1:
- # discriminator
- discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- log_dict = self._validation_step(batch, batch_idx)
- with self.ema_scope():
- log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
- return log_dict
-
- def _validation_step(self, batch, batch_idx, suffix=""):
- x = self.get_input(batch, self.image_key)
- xrec, qloss, ind = self(x, return_pred_indices=True)
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val"+suffix,
- predicted_indices=ind
- )
-
- discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val"+suffix,
- predicted_indices=ind
- )
- rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
- self.log(f"val{suffix}/rec_loss", rec_loss,
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
- self.log(f"val{suffix}/aeloss", aeloss,
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
- # if version.parse(pl.__version__) >= version.parse('1.4.0'):
- # del log_dict_ae[f"val{suffix}/rec_loss"]
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr_d = self.learning_rate
- lr_g = self.lr_g_factor*self.learning_rate
- print("lr_d", lr_d)
- print("lr_g", lr_g)
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
- list(self.decoder.parameters())+
- list(self.quantize.parameters())+
- list(self.quant_conv.parameters())+
- list(self.post_quant_conv.parameters()),
- lr=lr_g, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr_d, betas=(0.5, 0.9))
-
- if self.scheduler_config is not None:
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- },
- {
- 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- },
- ]
- return [opt_ae, opt_disc], scheduler
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(batch.device)
- if only_inputs:
- log["inputs"] = x
- return log
- xrec, _ = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["inputs"] = x
- log["reconstructions"] = xrec
- if plot_ema:
- with self.ema_scope():
- xrec_ema, _ = self(x)
- if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
- log["reconstructions_ema"] = xrec_ema
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
- return x
-
-
-class VQModelInterface(VQModel):
- def __init__(self, embed_dim, *args, **kwargs):
- super().__init__(embed_dim=embed_dim, *args, **kwargs)
- self.embed_dim = embed_dim
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode(self, h, force_not_quantize=False):
- # also go through quantization layer
- if not force_not_quantize:
- quant, emb_loss, info = self.quantize(h)
- else:
- quant = h
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
-
-class AutoencoderKL(nn.Module):
- def __init__(self,
- ddconfig,
- lossconfig,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- ):
- super().__init__()
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- assert ddconfig["double_z"]
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- self.embed_dim = embed_dim
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- self.load_state_dict(sd, strict=False)
- print(f"Restored from {path}")
-
- def encode(self, x):
- h = self.encoder(x)
- moments = self.quant_conv(h)
- posterior = DiagonalGaussianDistribution(moments)
- return posterior
-
- def decode(self, z):
- z = self.post_quant_conv(z.type(self.post_quant_conv.weight.dtype))
- dec = self.decoder(z)
- return dec
-
- def forward(self, input, sample_posterior=True):
- posterior = self.encode(input)
- if sample_posterior:
- z = posterior.sample()
- else:
- z = posterior.mode()
- dec = self.decode(z)
- return dec, posterior
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
-
- if optimizer_idx == 0:
- # train encoder+decoder+logvar
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return aeloss
-
- if optimizer_idx == 1:
- # train the discriminator
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
-
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
- last_layer=self.get_last_layer(), split="val")
-
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
- last_layer=self.get_last_layer(), split="val")
-
- self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr = self.learning_rate
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
- list(self.decoder.parameters())+
- list(self.quant_conv.parameters())+
- list(self.post_quant_conv.parameters()),
- lr=lr, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr, betas=(0.5, 0.9))
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- @torch.no_grad()
- def log_images(self, batch, only_inputs=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(batch.device)
- if not only_inputs:
- xrec, posterior = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
- log["reconstructions"] = xrec
- log["inputs"] = x
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
- return x
-
-
-class IdentityFirstStage(torch.nn.Module):
- def __init__(self, *args, vq_interface=False, **kwargs):
- self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
- super().__init__()
-
- def encode(self, x, *args, **kwargs):
- return x
-
- def decode(self, x, *args, **kwargs):
- return x
-
- def quantize(self, x, *args, **kwargs):
- if self.vq_interface:
- return x, None, [None, None, None]
- return x
-
- def forward(self, x, *args, **kwargs):
- return x
diff --git a/spaces/Kayson/InstructDiffusion/stable_diffusion/scripts/img2img.py b/spaces/Kayson/InstructDiffusion/stable_diffusion/scripts/img2img.py
deleted file mode 100644
index 421e2151d9e9de75a142f5d5f532333645a36287..0000000000000000000000000000000000000000
--- a/spaces/Kayson/InstructDiffusion/stable_diffusion/scripts/img2img.py
+++ /dev/null
@@ -1,293 +0,0 @@
-"""make variations of input image"""
-
-import argparse, os, sys, glob
-import PIL
-import torch
-import numpy as np
-from omegaconf import OmegaConf
-from PIL import Image
-from tqdm import tqdm, trange
-from itertools import islice
-from einops import rearrange, repeat
-from torchvision.utils import make_grid
-from torch import autocast
-from contextlib import nullcontext
-import time
-from pytorch_lightning import seed_everything
-
-from ldm.util import instantiate_from_config
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.models.diffusion.plms import PLMSSampler
-
-
-def chunk(it, size):
- it = iter(it)
- return iter(lambda: tuple(islice(it, size)), ())
-
-
-def load_model_from_config(config, ckpt, verbose=False):
- print(f"Loading model from {ckpt}")
- pl_sd = torch.load(ckpt, map_location="cpu")
- if "global_step" in pl_sd:
- print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
- model = instantiate_from_config(config.model)
- m, u = model.load_state_dict(sd, strict=False)
- if len(m) > 0 and verbose:
- print("missing keys:")
- print(m)
- if len(u) > 0 and verbose:
- print("unexpected keys:")
- print(u)
-
- model.cuda()
- model.eval()
- return model
-
-
-def load_img(path):
- image = Image.open(path).convert("RGB")
- w, h = image.size
- print(f"loaded input image of size ({w}, {h}) from {path}")
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
- image = image.resize((w, h), resample=PIL.Image.LANCZOS)
- image = np.array(image).astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image)
- return 2.*image - 1.
-
-
-def main():
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--prompt",
- type=str,
- nargs="?",
- default="a painting of a virus monster playing guitar",
- help="the prompt to render"
- )
-
- parser.add_argument(
- "--init-img",
- type=str,
- nargs="?",
- help="path to the input image"
- )
-
- parser.add_argument(
- "--outdir",
- type=str,
- nargs="?",
- help="dir to write results to",
- default="outputs/img2img-samples"
- )
-
- parser.add_argument(
- "--skip_grid",
- action='store_true',
- help="do not save a grid, only individual samples. Helpful when evaluating lots of samples",
- )
-
- parser.add_argument(
- "--skip_save",
- action='store_true',
- help="do not save indiviual samples. For speed measurements.",
- )
-
- parser.add_argument(
- "--ddim_steps",
- type=int,
- default=50,
- help="number of ddim sampling steps",
- )
-
- parser.add_argument(
- "--plms",
- action='store_true',
- help="use plms sampling",
- )
- parser.add_argument(
- "--fixed_code",
- action='store_true',
- help="if enabled, uses the same starting code across all samples ",
- )
-
- parser.add_argument(
- "--ddim_eta",
- type=float,
- default=0.0,
- help="ddim eta (eta=0.0 corresponds to deterministic sampling",
- )
- parser.add_argument(
- "--n_iter",
- type=int,
- default=1,
- help="sample this often",
- )
- parser.add_argument(
- "--C",
- type=int,
- default=4,
- help="latent channels",
- )
- parser.add_argument(
- "--f",
- type=int,
- default=8,
- help="downsampling factor, most often 8 or 16",
- )
- parser.add_argument(
- "--n_samples",
- type=int,
- default=2,
- help="how many samples to produce for each given prompt. A.k.a batch size",
- )
- parser.add_argument(
- "--n_rows",
- type=int,
- default=0,
- help="rows in the grid (default: n_samples)",
- )
- parser.add_argument(
- "--scale",
- type=float,
- default=5.0,
- help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
- )
-
- parser.add_argument(
- "--strength",
- type=float,
- default=0.75,
- help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image",
- )
- parser.add_argument(
- "--from-file",
- type=str,
- help="if specified, load prompts from this file",
- )
- parser.add_argument(
- "--config",
- type=str,
- default="configs/stable-diffusion/v1-inference.yaml",
- help="path to config which constructs model",
- )
- parser.add_argument(
- "--ckpt",
- type=str,
- default="models/ldm/stable-diffusion-v1/model.ckpt",
- help="path to checkpoint of model",
- )
- parser.add_argument(
- "--seed",
- type=int,
- default=42,
- help="the seed (for reproducible sampling)",
- )
- parser.add_argument(
- "--precision",
- type=str,
- help="evaluate at this precision",
- choices=["full", "autocast"],
- default="autocast"
- )
-
- opt = parser.parse_args()
- seed_everything(opt.seed)
-
- config = OmegaConf.load(f"{opt.config}")
- model = load_model_from_config(config, f"{opt.ckpt}")
-
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
- model = model.to(device)
-
- if opt.plms:
- raise NotImplementedError("PLMS sampler not (yet) supported")
- sampler = PLMSSampler(model)
- else:
- sampler = DDIMSampler(model)
-
- os.makedirs(opt.outdir, exist_ok=True)
- outpath = opt.outdir
-
- batch_size = opt.n_samples
- n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
- if not opt.from_file:
- prompt = opt.prompt
- assert prompt is not None
- data = [batch_size * [prompt]]
-
- else:
- print(f"reading prompts from {opt.from_file}")
- with open(opt.from_file, "r") as f:
- data = f.read().splitlines()
- data = list(chunk(data, batch_size))
-
- sample_path = os.path.join(outpath, "samples")
- os.makedirs(sample_path, exist_ok=True)
- base_count = len(os.listdir(sample_path))
- grid_count = len(os.listdir(outpath)) - 1
-
- assert os.path.isfile(opt.init_img)
- init_image = load_img(opt.init_img).to(device)
- init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
- init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
-
- sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)
-
- assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]'
- t_enc = int(opt.strength * opt.ddim_steps)
- print(f"target t_enc is {t_enc} steps")
-
- precision_scope = autocast if opt.precision == "autocast" else nullcontext
- with torch.no_grad():
- with precision_scope("cuda"):
- with model.ema_scope():
- tic = time.time()
- all_samples = list()
- for n in trange(opt.n_iter, desc="Sampling"):
- for prompts in tqdm(data, desc="data"):
- uc = None
- if opt.scale != 1.0:
- uc = model.get_learned_conditioning(batch_size * [""])
- if isinstance(prompts, tuple):
- prompts = list(prompts)
- c = model.get_learned_conditioning(prompts)
-
- # encode (scaled latent)
- z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(device))
- # decode it
- samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale,
- unconditional_conditioning=uc,)
-
- x_samples = model.decode_first_stage(samples)
- x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
-
- if not opt.skip_save:
- for x_sample in x_samples:
- x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
- Image.fromarray(x_sample.astype(np.uint8)).save(
- os.path.join(sample_path, f"{base_count:05}.png"))
- base_count += 1
- all_samples.append(x_samples)
-
- if not opt.skip_grid:
- # additionally, save as grid
- grid = torch.stack(all_samples, 0)
- grid = rearrange(grid, 'n b c h w -> (n b) c h w')
- grid = make_grid(grid, nrow=n_rows)
-
- # to image
- grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
- Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
- grid_count += 1
-
- toc = time.time()
-
- print(f"Your samples are ready and waiting for you here: \n{outpath} \n"
- f" \nEnjoy.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/hrnet.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/hrnet.py
deleted file mode 100644
index 77bd3cc7125bb7ba03cd201ab3a55174b01dde50..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/hrnet.py
+++ /dev/null
@@ -1,589 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import warnings
-
-import torch.nn as nn
-from mmcv.cnn import build_conv_layer, build_norm_layer
-from mmengine.model import BaseModule, ModuleList, Sequential
-from torch.nn.modules.batchnorm import _BatchNorm
-
-from mmdet.registry import MODELS
-from .resnet import BasicBlock, Bottleneck
-
-
-class HRModule(BaseModule):
- """High-Resolution Module for HRNet.
-
- In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
- is in this module.
- """
-
- def __init__(self,
- num_branches,
- blocks,
- num_blocks,
- in_channels,
- num_channels,
- multiscale_output=True,
- with_cp=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- block_init_cfg=None,
- init_cfg=None):
- super(HRModule, self).__init__(init_cfg)
- self.block_init_cfg = block_init_cfg
- self._check_branches(num_branches, num_blocks, in_channels,
- num_channels)
-
- self.in_channels = in_channels
- self.num_branches = num_branches
-
- self.multiscale_output = multiscale_output
- self.norm_cfg = norm_cfg
- self.conv_cfg = conv_cfg
- self.with_cp = with_cp
- self.branches = self._make_branches(num_branches, blocks, num_blocks,
- num_channels)
- self.fuse_layers = self._make_fuse_layers()
- self.relu = nn.ReLU(inplace=False)
-
- def _check_branches(self, num_branches, num_blocks, in_channels,
- num_channels):
- if num_branches != len(num_blocks):
- error_msg = f'NUM_BRANCHES({num_branches}) ' \
- f'!= NUM_BLOCKS({len(num_blocks)})'
- raise ValueError(error_msg)
-
- if num_branches != len(num_channels):
- error_msg = f'NUM_BRANCHES({num_branches}) ' \
- f'!= NUM_CHANNELS({len(num_channels)})'
- raise ValueError(error_msg)
-
- if num_branches != len(in_channels):
- error_msg = f'NUM_BRANCHES({num_branches}) ' \
- f'!= NUM_INCHANNELS({len(in_channels)})'
- raise ValueError(error_msg)
-
- def _make_one_branch(self,
- branch_index,
- block,
- num_blocks,
- num_channels,
- stride=1):
- downsample = None
- if stride != 1 or \
- self.in_channels[branch_index] != \
- num_channels[branch_index] * block.expansion:
- downsample = nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- self.in_channels[branch_index],
- num_channels[branch_index] * block.expansion,
- kernel_size=1,
- stride=stride,
- bias=False),
- build_norm_layer(self.norm_cfg, num_channels[branch_index] *
- block.expansion)[1])
-
- layers = []
- layers.append(
- block(
- self.in_channels[branch_index],
- num_channels[branch_index],
- stride,
- downsample=downsample,
- with_cp=self.with_cp,
- norm_cfg=self.norm_cfg,
- conv_cfg=self.conv_cfg,
- init_cfg=self.block_init_cfg))
- self.in_channels[branch_index] = \
- num_channels[branch_index] * block.expansion
- for i in range(1, num_blocks[branch_index]):
- layers.append(
- block(
- self.in_channels[branch_index],
- num_channels[branch_index],
- with_cp=self.with_cp,
- norm_cfg=self.norm_cfg,
- conv_cfg=self.conv_cfg,
- init_cfg=self.block_init_cfg))
-
- return Sequential(*layers)
-
- def _make_branches(self, num_branches, block, num_blocks, num_channels):
- branches = []
-
- for i in range(num_branches):
- branches.append(
- self._make_one_branch(i, block, num_blocks, num_channels))
-
- return ModuleList(branches)
-
- def _make_fuse_layers(self):
- if self.num_branches == 1:
- return None
-
- num_branches = self.num_branches
- in_channels = self.in_channels
- fuse_layers = []
- num_out_branches = num_branches if self.multiscale_output else 1
- for i in range(num_out_branches):
- fuse_layer = []
- for j in range(num_branches):
- if j > i:
- fuse_layer.append(
- nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- in_channels[j],
- in_channels[i],
- kernel_size=1,
- stride=1,
- padding=0,
- bias=False),
- build_norm_layer(self.norm_cfg, in_channels[i])[1],
- nn.Upsample(
- scale_factor=2**(j - i), mode='nearest')))
- elif j == i:
- fuse_layer.append(None)
- else:
- conv_downsamples = []
- for k in range(i - j):
- if k == i - j - 1:
- conv_downsamples.append(
- nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- in_channels[j],
- in_channels[i],
- kernel_size=3,
- stride=2,
- padding=1,
- bias=False),
- build_norm_layer(self.norm_cfg,
- in_channels[i])[1]))
- else:
- conv_downsamples.append(
- nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- in_channels[j],
- in_channels[j],
- kernel_size=3,
- stride=2,
- padding=1,
- bias=False),
- build_norm_layer(self.norm_cfg,
- in_channels[j])[1],
- nn.ReLU(inplace=False)))
- fuse_layer.append(nn.Sequential(*conv_downsamples))
- fuse_layers.append(nn.ModuleList(fuse_layer))
-
- return nn.ModuleList(fuse_layers)
-
- def forward(self, x):
- """Forward function."""
- if self.num_branches == 1:
- return [self.branches[0](x[0])]
-
- for i in range(self.num_branches):
- x[i] = self.branches[i](x[i])
-
- x_fuse = []
- for i in range(len(self.fuse_layers)):
- y = 0
- for j in range(self.num_branches):
- if i == j:
- y += x[j]
- else:
- y += self.fuse_layers[i][j](x[j])
- x_fuse.append(self.relu(y))
- return x_fuse
-
-
-@MODELS.register_module()
-class HRNet(BaseModule):
- """HRNet backbone.
-
- `High-Resolution Representations for Labeling Pixels and Regions
- arXiv: `_.
-
- Args:
- extra (dict): Detailed configuration for each stage of HRNet.
- There must be 4 stages, the configuration for each stage must have
- 5 keys:
-
- - num_modules(int): The number of HRModule in this stage.
- - num_branches(int): The number of branches in the HRModule.
- - block(str): The type of convolution block.
- - num_blocks(tuple): The number of blocks in each branch.
- The length must be equal to num_branches.
- - num_channels(tuple): The number of channels in each branch.
- The length must be equal to num_branches.
- in_channels (int): Number of input image channels. Default: 3.
- conv_cfg (dict): Dictionary to construct and config conv layer.
- norm_cfg (dict): Dictionary to construct and config norm layer.
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
- freeze running stats (mean and var). Note: Effect on Batch Norm
- and its variants only. Default: True.
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed. Default: False.
- zero_init_residual (bool): Whether to use zero init for last norm layer
- in resblocks to let them behave as identity. Default: False.
- multiscale_output (bool): Whether to output multi-level features
- produced by multiple branches. If False, only the first level
- feature will be output. Default: True.
- pretrained (str, optional): Model pretrained path. Default: None.
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Default: None.
-
- Example:
- >>> from mmdet.models import HRNet
- >>> import torch
- >>> extra = dict(
- >>> stage1=dict(
- >>> num_modules=1,
- >>> num_branches=1,
- >>> block='BOTTLENECK',
- >>> num_blocks=(4, ),
- >>> num_channels=(64, )),
- >>> stage2=dict(
- >>> num_modules=1,
- >>> num_branches=2,
- >>> block='BASIC',
- >>> num_blocks=(4, 4),
- >>> num_channels=(32, 64)),
- >>> stage3=dict(
- >>> num_modules=4,
- >>> num_branches=3,
- >>> block='BASIC',
- >>> num_blocks=(4, 4, 4),
- >>> num_channels=(32, 64, 128)),
- >>> stage4=dict(
- >>> num_modules=3,
- >>> num_branches=4,
- >>> block='BASIC',
- >>> num_blocks=(4, 4, 4, 4),
- >>> num_channels=(32, 64, 128, 256)))
- >>> self = HRNet(extra, in_channels=1)
- >>> self.eval()
- >>> inputs = torch.rand(1, 1, 32, 32)
- >>> level_outputs = self.forward(inputs)
- >>> for level_out in level_outputs:
- ... print(tuple(level_out.shape))
- (1, 32, 8, 8)
- (1, 64, 4, 4)
- (1, 128, 2, 2)
- (1, 256, 1, 1)
- """
-
- blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
-
- def __init__(self,
- extra,
- in_channels=3,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- norm_eval=True,
- with_cp=False,
- zero_init_residual=False,
- multiscale_output=True,
- pretrained=None,
- init_cfg=None):
- super(HRNet, self).__init__(init_cfg)
-
- self.pretrained = pretrained
- assert not (init_cfg and pretrained), \
- 'init_cfg and pretrained cannot be specified at the same time'
- if isinstance(pretrained, str):
- warnings.warn('DeprecationWarning: pretrained is deprecated, '
- 'please use "init_cfg" instead')
- self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
- elif pretrained is None:
- if init_cfg is None:
- self.init_cfg = [
- dict(type='Kaiming', layer='Conv2d'),
- dict(
- type='Constant',
- val=1,
- layer=['_BatchNorm', 'GroupNorm'])
- ]
- else:
- raise TypeError('pretrained must be a str or None')
-
- # Assert configurations of 4 stages are in extra
- assert 'stage1' in extra and 'stage2' in extra \
- and 'stage3' in extra and 'stage4' in extra
- # Assert whether the length of `num_blocks` and `num_channels` are
- # equal to `num_branches`
- for i in range(4):
- cfg = extra[f'stage{i + 1}']
- assert len(cfg['num_blocks']) == cfg['num_branches'] and \
- len(cfg['num_channels']) == cfg['num_branches']
-
- self.extra = extra
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.norm_eval = norm_eval
- self.with_cp = with_cp
- self.zero_init_residual = zero_init_residual
-
- # stem net
- self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
- self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
-
- self.conv1 = build_conv_layer(
- self.conv_cfg,
- in_channels,
- 64,
- kernel_size=3,
- stride=2,
- padding=1,
- bias=False)
-
- self.add_module(self.norm1_name, norm1)
- self.conv2 = build_conv_layer(
- self.conv_cfg,
- 64,
- 64,
- kernel_size=3,
- stride=2,
- padding=1,
- bias=False)
-
- self.add_module(self.norm2_name, norm2)
- self.relu = nn.ReLU(inplace=True)
-
- # stage 1
- self.stage1_cfg = self.extra['stage1']
- num_channels = self.stage1_cfg['num_channels'][0]
- block_type = self.stage1_cfg['block']
- num_blocks = self.stage1_cfg['num_blocks'][0]
-
- block = self.blocks_dict[block_type]
- stage1_out_channels = num_channels * block.expansion
- self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
-
- # stage 2
- self.stage2_cfg = self.extra['stage2']
- num_channels = self.stage2_cfg['num_channels']
- block_type = self.stage2_cfg['block']
-
- block = self.blocks_dict[block_type]
- num_channels = [channel * block.expansion for channel in num_channels]
- self.transition1 = self._make_transition_layer([stage1_out_channels],
- num_channels)
- self.stage2, pre_stage_channels = self._make_stage(
- self.stage2_cfg, num_channels)
-
- # stage 3
- self.stage3_cfg = self.extra['stage3']
- num_channels = self.stage3_cfg['num_channels']
- block_type = self.stage3_cfg['block']
-
- block = self.blocks_dict[block_type]
- num_channels = [channel * block.expansion for channel in num_channels]
- self.transition2 = self._make_transition_layer(pre_stage_channels,
- num_channels)
- self.stage3, pre_stage_channels = self._make_stage(
- self.stage3_cfg, num_channels)
-
- # stage 4
- self.stage4_cfg = self.extra['stage4']
- num_channels = self.stage4_cfg['num_channels']
- block_type = self.stage4_cfg['block']
-
- block = self.blocks_dict[block_type]
- num_channels = [channel * block.expansion for channel in num_channels]
- self.transition3 = self._make_transition_layer(pre_stage_channels,
- num_channels)
- self.stage4, pre_stage_channels = self._make_stage(
- self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
-
- @property
- def norm1(self):
- """nn.Module: the normalization layer named "norm1" """
- return getattr(self, self.norm1_name)
-
- @property
- def norm2(self):
- """nn.Module: the normalization layer named "norm2" """
- return getattr(self, self.norm2_name)
-
- def _make_transition_layer(self, num_channels_pre_layer,
- num_channels_cur_layer):
- num_branches_cur = len(num_channels_cur_layer)
- num_branches_pre = len(num_channels_pre_layer)
-
- transition_layers = []
- for i in range(num_branches_cur):
- if i < num_branches_pre:
- if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
- transition_layers.append(
- nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- num_channels_pre_layer[i],
- num_channels_cur_layer[i],
- kernel_size=3,
- stride=1,
- padding=1,
- bias=False),
- build_norm_layer(self.norm_cfg,
- num_channels_cur_layer[i])[1],
- nn.ReLU(inplace=True)))
- else:
- transition_layers.append(None)
- else:
- conv_downsamples = []
- for j in range(i + 1 - num_branches_pre):
- in_channels = num_channels_pre_layer[-1]
- out_channels = num_channels_cur_layer[i] \
- if j == i - num_branches_pre else in_channels
- conv_downsamples.append(
- nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- in_channels,
- out_channels,
- kernel_size=3,
- stride=2,
- padding=1,
- bias=False),
- build_norm_layer(self.norm_cfg, out_channels)[1],
- nn.ReLU(inplace=True)))
- transition_layers.append(nn.Sequential(*conv_downsamples))
-
- return nn.ModuleList(transition_layers)
-
- def _make_layer(self, block, inplanes, planes, blocks, stride=1):
- downsample = None
- if stride != 1 or inplanes != planes * block.expansion:
- downsample = nn.Sequential(
- build_conv_layer(
- self.conv_cfg,
- inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=stride,
- bias=False),
- build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
-
- layers = []
- block_init_cfg = None
- if self.pretrained is None and not hasattr(
- self, 'init_cfg') and self.zero_init_residual:
- if block is BasicBlock:
- block_init_cfg = dict(
- type='Constant', val=0, override=dict(name='norm2'))
- elif block is Bottleneck:
- block_init_cfg = dict(
- type='Constant', val=0, override=dict(name='norm3'))
- layers.append(
- block(
- inplanes,
- planes,
- stride,
- downsample=downsample,
- with_cp=self.with_cp,
- norm_cfg=self.norm_cfg,
- conv_cfg=self.conv_cfg,
- init_cfg=block_init_cfg,
- ))
- inplanes = planes * block.expansion
- for i in range(1, blocks):
- layers.append(
- block(
- inplanes,
- planes,
- with_cp=self.with_cp,
- norm_cfg=self.norm_cfg,
- conv_cfg=self.conv_cfg,
- init_cfg=block_init_cfg))
-
- return Sequential(*layers)
-
- def _make_stage(self, layer_config, in_channels, multiscale_output=True):
- num_modules = layer_config['num_modules']
- num_branches = layer_config['num_branches']
- num_blocks = layer_config['num_blocks']
- num_channels = layer_config['num_channels']
- block = self.blocks_dict[layer_config['block']]
-
- hr_modules = []
- block_init_cfg = None
- if self.pretrained is None and not hasattr(
- self, 'init_cfg') and self.zero_init_residual:
- if block is BasicBlock:
- block_init_cfg = dict(
- type='Constant', val=0, override=dict(name='norm2'))
- elif block is Bottleneck:
- block_init_cfg = dict(
- type='Constant', val=0, override=dict(name='norm3'))
-
- for i in range(num_modules):
- # multi_scale_output is only used for the last module
- if not multiscale_output and i == num_modules - 1:
- reset_multiscale_output = False
- else:
- reset_multiscale_output = True
-
- hr_modules.append(
- HRModule(
- num_branches,
- block,
- num_blocks,
- in_channels,
- num_channels,
- reset_multiscale_output,
- with_cp=self.with_cp,
- norm_cfg=self.norm_cfg,
- conv_cfg=self.conv_cfg,
- block_init_cfg=block_init_cfg))
-
- return Sequential(*hr_modules), in_channels
-
- def forward(self, x):
- """Forward function."""
- x = self.conv1(x)
- x = self.norm1(x)
- x = self.relu(x)
- x = self.conv2(x)
- x = self.norm2(x)
- x = self.relu(x)
- x = self.layer1(x)
-
- x_list = []
- for i in range(self.stage2_cfg['num_branches']):
- if self.transition1[i] is not None:
- x_list.append(self.transition1[i](x))
- else:
- x_list.append(x)
- y_list = self.stage2(x_list)
-
- x_list = []
- for i in range(self.stage3_cfg['num_branches']):
- if self.transition2[i] is not None:
- x_list.append(self.transition2[i](y_list[-1]))
- else:
- x_list.append(y_list[i])
- y_list = self.stage3(x_list)
-
- x_list = []
- for i in range(self.stage4_cfg['num_branches']):
- if self.transition3[i] is not None:
- x_list.append(self.transition3[i](y_list[-1]))
- else:
- x_list.append(y_list[i])
- y_list = self.stage4(x_list)
-
- return y_list
-
- def train(self, mode=True):
- """Convert the model into training mode will keeping the normalization
- layer freezed."""
- super(HRNet, self).train(mode)
- if mode and self.norm_eval:
- for m in self.modules():
- # trick: eval have effect on BatchNorm only
- if isinstance(m, _BatchNorm):
- m.eval()
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/iou_loss.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/iou_loss.py
deleted file mode 100644
index cdffb3e0e3461010ed3d0119cf72b809387b4685..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/iou_loss.py
+++ /dev/null
@@ -1,744 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-import warnings
-from typing import Optional
-
-import torch
-import torch.nn as nn
-from torch import Tensor
-
-from mmdet.registry import MODELS
-from mmdet.structures.bbox import bbox_overlaps
-from .utils import weighted_loss
-
-
-@weighted_loss
-def iou_loss(pred: Tensor,
- target: Tensor,
- linear: bool = False,
- mode: str = 'log',
- eps: float = 1e-6) -> Tensor:
- """IoU loss.
-
- Computing the IoU loss between a set of predicted bboxes and target bboxes.
- The loss is calculated as negative log of IoU.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): Corresponding gt bboxes, shape (n, 4).
- linear (bool, optional): If True, use linear scale of loss instead of
- log scale. Default: False.
- mode (str): Loss scaling mode, including "linear", "square", and "log".
- Default: 'log'
- eps (float): Epsilon to avoid log(0).
-
- Return:
- Tensor: Loss tensor.
- """
- assert mode in ['linear', 'square', 'log']
- if linear:
- mode = 'linear'
- warnings.warn('DeprecationWarning: Setting "linear=True" in '
- 'iou_loss is deprecated, please use "mode=`linear`" '
- 'instead.')
- ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
- if mode == 'linear':
- loss = 1 - ious
- elif mode == 'square':
- loss = 1 - ious**2
- elif mode == 'log':
- loss = -ious.log()
- else:
- raise NotImplementedError
- return loss
-
-
-@weighted_loss
-def bounded_iou_loss(pred: Tensor,
- target: Tensor,
- beta: float = 0.2,
- eps: float = 1e-3) -> Tensor:
- """BIoULoss.
-
- This is an implementation of paper
- `Improving Object Localization with Fitness NMS and Bounded IoU Loss.
- `_.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): Corresponding gt bboxes, shape (n, 4).
- beta (float, optional): Beta parameter in smoothl1.
- eps (float, optional): Epsilon to avoid NaN values.
-
- Return:
- Tensor: Loss tensor.
- """
- pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
- pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
- pred_w = pred[:, 2] - pred[:, 0]
- pred_h = pred[:, 3] - pred[:, 1]
- with torch.no_grad():
- target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
- target_ctry = (target[:, 1] + target[:, 3]) * 0.5
- target_w = target[:, 2] - target[:, 0]
- target_h = target[:, 3] - target[:, 1]
-
- dx = target_ctrx - pred_ctrx
- dy = target_ctry - pred_ctry
-
- loss_dx = 1 - torch.max(
- (target_w - 2 * dx.abs()) /
- (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
- loss_dy = 1 - torch.max(
- (target_h - 2 * dy.abs()) /
- (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
- loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
- (target_w + eps))
- loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
- (target_h + eps))
- # view(..., -1) does not work for empty tensor
- loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
- dim=-1).flatten(1)
-
- loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
- loss_comb - 0.5 * beta)
- return loss
-
-
-@weighted_loss
-def giou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:
- r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
- Box Regression `_.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): Corresponding gt bboxes, shape (n, 4).
- eps (float): Epsilon to avoid log(0).
-
- Return:
- Tensor: Loss tensor.
- """
- gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
- loss = 1 - gious
- return loss
-
-
-@weighted_loss
-def diou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:
- r"""Implementation of `Distance-IoU Loss: Faster and Better
- Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_.
-
- Code is modified from https://github.com/Zzh-tju/DIoU.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): Corresponding gt bboxes, shape (n, 4).
- eps (float): Epsilon to avoid log(0).
-
- Return:
- Tensor: Loss tensor.
- """
- # overlap
- lt = torch.max(pred[:, :2], target[:, :2])
- rb = torch.min(pred[:, 2:], target[:, 2:])
- wh = (rb - lt).clamp(min=0)
- overlap = wh[:, 0] * wh[:, 1]
-
- # union
- ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
- ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
- union = ap + ag - overlap + eps
-
- # IoU
- ious = overlap / union
-
- # enclose area
- enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
- enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
- enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
-
- cw = enclose_wh[:, 0]
- ch = enclose_wh[:, 1]
-
- c2 = cw**2 + ch**2 + eps
-
- b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
- b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
- b2_x1, b2_y1 = target[:, 0], target[:, 1]
- b2_x2, b2_y2 = target[:, 2], target[:, 3]
-
- left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
- right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
- rho2 = left + right
-
- # DIoU
- dious = ious - rho2 / c2
- loss = 1 - dious
- return loss
-
-
-@weighted_loss
-def ciou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:
- r"""`Implementation of paper `Enhancing Geometric Factors into
- Model Learning and Inference for Object Detection and Instance
- Segmentation `_.
-
- Code is modified from https://github.com/Zzh-tju/CIoU.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): Corresponding gt bboxes, shape (n, 4).
- eps (float): Epsilon to avoid log(0).
-
- Return:
- Tensor: Loss tensor.
- """
- # overlap
- lt = torch.max(pred[:, :2], target[:, :2])
- rb = torch.min(pred[:, 2:], target[:, 2:])
- wh = (rb - lt).clamp(min=0)
- overlap = wh[:, 0] * wh[:, 1]
-
- # union
- ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
- ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
- union = ap + ag - overlap + eps
-
- # IoU
- ious = overlap / union
-
- # enclose area
- enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
- enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
- enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
-
- cw = enclose_wh[:, 0]
- ch = enclose_wh[:, 1]
-
- c2 = cw**2 + ch**2 + eps
-
- b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
- b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
- b2_x1, b2_y1 = target[:, 0], target[:, 1]
- b2_x2, b2_y2 = target[:, 2], target[:, 3]
-
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
-
- left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
- right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
- rho2 = left + right
-
- factor = 4 / math.pi**2
- v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
-
- with torch.no_grad():
- alpha = (ious > 0.5).float() * v / (1 - ious + v)
-
- # CIoU
- cious = ious - (rho2 / c2 + alpha * v)
- loss = 1 - cious.clamp(min=-1.0, max=1.0)
- return loss
-
-
-@weighted_loss
-def eiou_loss(pred: Tensor,
- target: Tensor,
- smooth_point: float = 0.1,
- eps: float = 1e-7) -> Tensor:
- r"""Implementation of paper `Extended-IoU Loss: A Systematic
- IoU-Related Method: Beyond Simplified Regression for Better
- Localization `_
-
- Code is modified from https://github.com//ShiqiYu/libfacedetection.train.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): Corresponding gt bboxes, shape (n, 4).
- smooth_point (float): hyperparameter, default is 0.1.
- eps (float): Epsilon to avoid log(0).
-
- Return:
- Tensor: Loss tensor.
- """
- px1, py1, px2, py2 = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]
- tx1, ty1, tx2, ty2 = target[:, 0], target[:, 1], target[:, 2], target[:, 3]
-
- # extent top left
- ex1 = torch.min(px1, tx1)
- ey1 = torch.min(py1, ty1)
-
- # intersection coordinates
- ix1 = torch.max(px1, tx1)
- iy1 = torch.max(py1, ty1)
- ix2 = torch.min(px2, tx2)
- iy2 = torch.min(py2, ty2)
-
- # extra
- xmin = torch.min(ix1, ix2)
- ymin = torch.min(iy1, iy2)
- xmax = torch.max(ix1, ix2)
- ymax = torch.max(iy1, iy2)
-
- # Intersection
- intersection = (ix2 - ex1) * (iy2 - ey1) + (xmin - ex1) * (ymin - ey1) - (
- ix1 - ex1) * (ymax - ey1) - (xmax - ex1) * (
- iy1 - ey1)
- # Union
- union = (px2 - px1) * (py2 - py1) + (tx2 - tx1) * (
- ty2 - ty1) - intersection + eps
- # IoU
- ious = 1 - (intersection / union)
-
- # Smooth-EIoU
- smooth_sign = (ious < smooth_point).detach().float()
- loss = 0.5 * smooth_sign * (ious**2) / smooth_point + (1 - smooth_sign) * (
- ious - 0.5 * smooth_point)
- return loss
-
-
-@MODELS.register_module()
-class IoULoss(nn.Module):
- """IoULoss.
-
- Computing the IoU loss between a set of predicted bboxes and target bboxes.
-
- Args:
- linear (bool): If True, use linear scale of loss else determined
- by mode. Default: False.
- eps (float): Epsilon to avoid log(0).
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Weight of loss.
- mode (str): Loss scaling mode, including "linear", "square", and "log".
- Default: 'log'
- """
-
- def __init__(self,
- linear: bool = False,
- eps: float = 1e-6,
- reduction: str = 'mean',
- loss_weight: float = 1.0,
- mode: str = 'log') -> None:
- super().__init__()
- assert mode in ['linear', 'square', 'log']
- if linear:
- mode = 'linear'
- warnings.warn('DeprecationWarning: Setting "linear=True" in '
- 'IOULoss is deprecated, please use "mode=`linear`" '
- 'instead.')
- self.mode = mode
- self.linear = linear
- self.eps = eps
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None,
- **kwargs) -> Tensor:
- """Forward function.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): The learning target of the prediction,
- shape (n, 4).
- weight (Tensor, optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None. Options are "none", "mean" and "sum".
-
- Return:
- Tensor: Loss tensor.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if (weight is not None) and (not torch.any(weight > 0)) and (
- reduction != 'none'):
- if pred.dim() == weight.dim() + 1:
- weight = weight.unsqueeze(1)
- return (pred * weight).sum() # 0
- if weight is not None and weight.dim() > 1:
- # TODO: remove this in the future
- # reduce the weight of shape (n, 4) to (n,) to match the
- # iou_loss of shape (n,)
- assert weight.shape == pred.shape
- weight = weight.mean(-1)
- loss = self.loss_weight * iou_loss(
- pred,
- target,
- weight,
- mode=self.mode,
- eps=self.eps,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss
-
-
-@MODELS.register_module()
-class BoundedIoULoss(nn.Module):
- """BIoULoss.
-
- This is an implementation of paper
- `Improving Object Localization with Fitness NMS and Bounded IoU Loss.
- `_.
-
- Args:
- beta (float, optional): Beta parameter in smoothl1.
- eps (float, optional): Epsilon to avoid NaN values.
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Weight of loss.
- """
-
- def __init__(self,
- beta: float = 0.2,
- eps: float = 1e-3,
- reduction: str = 'mean',
- loss_weight: float = 1.0) -> None:
- super().__init__()
- self.beta = beta
- self.eps = eps
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None,
- **kwargs) -> Tensor:
- """Forward function.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): The learning target of the prediction,
- shape (n, 4).
- weight (Optional[Tensor], optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (Optional[int], optional): Average factor that is used
- to average the loss. Defaults to None.
- reduction_override (Optional[str], optional): The reduction method
- used to override the original reduction method of the loss.
- Defaults to None. Options are "none", "mean" and "sum".
-
- Returns:
- Tensor: Loss tensor.
- """
- if weight is not None and not torch.any(weight > 0):
- if pred.dim() == weight.dim() + 1:
- weight = weight.unsqueeze(1)
- return (pred * weight).sum() # 0
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- loss = self.loss_weight * bounded_iou_loss(
- pred,
- target,
- weight,
- beta=self.beta,
- eps=self.eps,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss
-
-
-@MODELS.register_module()
-class GIoULoss(nn.Module):
- r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
- Box Regression `_.
-
- Args:
- eps (float): Epsilon to avoid log(0).
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Weight of loss.
- """
-
- def __init__(self,
- eps: float = 1e-6,
- reduction: str = 'mean',
- loss_weight: float = 1.0) -> None:
- super().__init__()
- self.eps = eps
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None,
- **kwargs) -> Tensor:
- """Forward function.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): The learning target of the prediction,
- shape (n, 4).
- weight (Optional[Tensor], optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (Optional[int], optional): Average factor that is used
- to average the loss. Defaults to None.
- reduction_override (Optional[str], optional): The reduction method
- used to override the original reduction method of the loss.
- Defaults to None. Options are "none", "mean" and "sum".
-
- Returns:
- Tensor: Loss tensor.
- """
- if weight is not None and not torch.any(weight > 0):
- if pred.dim() == weight.dim() + 1:
- weight = weight.unsqueeze(1)
- return (pred * weight).sum() # 0
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if weight is not None and weight.dim() > 1:
- # TODO: remove this in the future
- # reduce the weight of shape (n, 4) to (n,) to match the
- # giou_loss of shape (n,)
- assert weight.shape == pred.shape
- weight = weight.mean(-1)
- loss = self.loss_weight * giou_loss(
- pred,
- target,
- weight,
- eps=self.eps,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss
-
-
-@MODELS.register_module()
-class DIoULoss(nn.Module):
- r"""Implementation of `Distance-IoU Loss: Faster and Better
- Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_.
-
- Code is modified from https://github.com/Zzh-tju/DIoU.
-
- Args:
- eps (float): Epsilon to avoid log(0).
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Weight of loss.
- """
-
- def __init__(self,
- eps: float = 1e-6,
- reduction: str = 'mean',
- loss_weight: float = 1.0) -> None:
- super().__init__()
- self.eps = eps
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None,
- **kwargs) -> Tensor:
- """Forward function.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): The learning target of the prediction,
- shape (n, 4).
- weight (Optional[Tensor], optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (Optional[int], optional): Average factor that is used
- to average the loss. Defaults to None.
- reduction_override (Optional[str], optional): The reduction method
- used to override the original reduction method of the loss.
- Defaults to None. Options are "none", "mean" and "sum".
-
- Returns:
- Tensor: Loss tensor.
- """
- if weight is not None and not torch.any(weight > 0):
- if pred.dim() == weight.dim() + 1:
- weight = weight.unsqueeze(1)
- return (pred * weight).sum() # 0
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if weight is not None and weight.dim() > 1:
- # TODO: remove this in the future
- # reduce the weight of shape (n, 4) to (n,) to match the
- # giou_loss of shape (n,)
- assert weight.shape == pred.shape
- weight = weight.mean(-1)
- loss = self.loss_weight * diou_loss(
- pred,
- target,
- weight,
- eps=self.eps,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss
-
-
-@MODELS.register_module()
-class CIoULoss(nn.Module):
- r"""`Implementation of paper `Enhancing Geometric Factors into
- Model Learning and Inference for Object Detection and Instance
- Segmentation `_.
-
- Code is modified from https://github.com/Zzh-tju/CIoU.
-
- Args:
- eps (float): Epsilon to avoid log(0).
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Weight of loss.
- """
-
- def __init__(self,
- eps: float = 1e-6,
- reduction: str = 'mean',
- loss_weight: float = 1.0) -> None:
- super().__init__()
- self.eps = eps
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None,
- **kwargs) -> Tensor:
- """Forward function.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): The learning target of the prediction,
- shape (n, 4).
- weight (Optional[Tensor], optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (Optional[int], optional): Average factor that is used
- to average the loss. Defaults to None.
- reduction_override (Optional[str], optional): The reduction method
- used to override the original reduction method of the loss.
- Defaults to None. Options are "none", "mean" and "sum".
-
- Returns:
- Tensor: Loss tensor.
- """
- if weight is not None and not torch.any(weight > 0):
- if pred.dim() == weight.dim() + 1:
- weight = weight.unsqueeze(1)
- return (pred * weight).sum() # 0
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if weight is not None and weight.dim() > 1:
- # TODO: remove this in the future
- # reduce the weight of shape (n, 4) to (n,) to match the
- # giou_loss of shape (n,)
- assert weight.shape == pred.shape
- weight = weight.mean(-1)
- loss = self.loss_weight * ciou_loss(
- pred,
- target,
- weight,
- eps=self.eps,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss
-
-
-@MODELS.register_module()
-class EIoULoss(nn.Module):
- r"""Implementation of paper `Extended-IoU Loss: A Systematic
- IoU-Related Method: Beyond Simplified Regression for Better
- Localization `_
-
- Code is modified from https://github.com//ShiqiYu/libfacedetection.train.
-
- Args:
- eps (float): Epsilon to avoid log(0).
- reduction (str): Options are "none", "mean" and "sum".
- loss_weight (float): Weight of loss.
- smooth_point (float): hyperparameter, default is 0.1.
- """
-
- def __init__(self,
- eps: float = 1e-6,
- reduction: str = 'mean',
- loss_weight: float = 1.0,
- smooth_point: float = 0.1) -> None:
- super().__init__()
- self.eps = eps
- self.reduction = reduction
- self.loss_weight = loss_weight
- self.smooth_point = smooth_point
-
- def forward(self,
- pred: Tensor,
- target: Tensor,
- weight: Optional[Tensor] = None,
- avg_factor: Optional[int] = None,
- reduction_override: Optional[str] = None,
- **kwargs) -> Tensor:
- """Forward function.
-
- Args:
- pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
- shape (n, 4).
- target (Tensor): The learning target of the prediction,
- shape (n, 4).
- weight (Optional[Tensor], optional): The weight of loss for each
- prediction. Defaults to None.
- avg_factor (Optional[int], optional): Average factor that is used
- to average the loss. Defaults to None.
- reduction_override (Optional[str], optional): The reduction method
- used to override the original reduction method of the loss.
- Defaults to None. Options are "none", "mean" and "sum".
-
- Returns:
- Tensor: Loss tensor.
- """
- if weight is not None and not torch.any(weight > 0):
- if pred.dim() == weight.dim() + 1:
- weight = weight.unsqueeze(1)
- return (pred * weight).sum() # 0
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- if weight is not None and weight.dim() > 1:
- assert weight.shape == pred.shape
- weight = weight.mean(-1)
- loss = self.loss_weight * eiou_loss(
- pred,
- target,
- weight,
- smooth_point=self.smooth_point,
- eps=self.eps,
- reduction=reduction,
- avg_factor=avg_factor,
- **kwargs)
- return loss
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/sabl_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/sabl_head.py
deleted file mode 100644
index 9a9ee6aba9669514ec8ce7218e8c97e026830f6c..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/sabl_head.py
+++ /dev/null
@@ -1,684 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List, Optional, Sequence, Tuple
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule
-from mmengine.config import ConfigDict
-from mmengine.structures import InstanceData
-from torch import Tensor
-
-from mmdet.models.layers import multiclass_nms
-from mmdet.models.losses import accuracy
-from mmdet.models.task_modules import SamplingResult
-from mmdet.models.utils import multi_apply
-from mmdet.registry import MODELS, TASK_UTILS
-from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig
-from .bbox_head import BBoxHead
-
-
-@MODELS.register_module()
-class SABLHead(BBoxHead):
- """Side-Aware Boundary Localization (SABL) for RoI-Head.
-
- Side-Aware features are extracted by conv layers
- with an attention mechanism.
- Boundary Localization with Bucketing and Bucketing Guided Rescoring
- are implemented in BucketingBBoxCoder.
-
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
-
- Args:
- cls_in_channels (int): Input channels of cls RoI feature. \
- Defaults to 256.
- reg_in_channels (int): Input channels of reg RoI feature. \
- Defaults to 256.
- roi_feat_size (int): Size of RoI features. Defaults to 7.
- reg_feat_up_ratio (int): Upsample ratio of reg features. \
- Defaults to 2.
- reg_pre_kernel (int): Kernel of 2D conv layers before \
- attention pooling. Defaults to 3.
- reg_post_kernel (int): Kernel of 1D conv layers after \
- attention pooling. Defaults to 3.
- reg_pre_num (int): Number of pre convs. Defaults to 2.
- reg_post_num (int): Number of post convs. Defaults to 1.
- num_classes (int): Number of classes in dataset. Defaults to 80.
- cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
- reg_offset_out_channels (int): Hidden and output channel \
- of reg offset branch. Defaults to 256.
- reg_cls_out_channels (int): Hidden and output channel \
- of reg cls branch. Defaults to 256.
- num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
- num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
- reg_class_agnostic (bool): Class agnostic regression or not. \
- Defaults to True.
- norm_cfg (dict): Config of norm layers. Defaults to None.
- bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
- loss_cls (dict): Config of classification loss.
- loss_bbox_cls (dict): Config of classification loss for bbox branch.
- loss_bbox_reg (dict): Config of regression loss for bbox branch.
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Defaults to None.
- """
-
- def __init__(self,
- num_classes: int,
- cls_in_channels: int = 256,
- reg_in_channels: int = 256,
- roi_feat_size: int = 7,
- reg_feat_up_ratio: int = 2,
- reg_pre_kernel: int = 3,
- reg_post_kernel: int = 3,
- reg_pre_num: int = 2,
- reg_post_num: int = 1,
- cls_out_channels: int = 1024,
- reg_offset_out_channels: int = 256,
- reg_cls_out_channels: int = 256,
- num_cls_fcs: int = 1,
- num_reg_fcs: int = 0,
- reg_class_agnostic: bool = True,
- norm_cfg: OptConfigType = None,
- bbox_coder: ConfigType = dict(
- type='BucketingBBoxCoder',
- num_buckets=14,
- scale_factor=1.7),
- loss_cls: ConfigType = dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox_cls: ConfigType = dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.0),
- loss_bbox_reg: ConfigType = dict(
- type='SmoothL1Loss', beta=0.1, loss_weight=1.0),
- init_cfg: OptMultiConfig = None) -> None:
- super(BBoxHead, self).__init__(init_cfg=init_cfg)
- self.cls_in_channels = cls_in_channels
- self.reg_in_channels = reg_in_channels
- self.roi_feat_size = roi_feat_size
- self.reg_feat_up_ratio = int(reg_feat_up_ratio)
- self.num_buckets = bbox_coder['num_buckets']
- assert self.reg_feat_up_ratio // 2 >= 1
- self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
- assert self.up_reg_feat_size == bbox_coder['num_buckets']
- self.reg_pre_kernel = reg_pre_kernel
- self.reg_post_kernel = reg_post_kernel
- self.reg_pre_num = reg_pre_num
- self.reg_post_num = reg_post_num
- self.num_classes = num_classes
- self.cls_out_channels = cls_out_channels
- self.reg_offset_out_channels = reg_offset_out_channels
- self.reg_cls_out_channels = reg_cls_out_channels
- self.num_cls_fcs = num_cls_fcs
- self.num_reg_fcs = num_reg_fcs
- self.reg_class_agnostic = reg_class_agnostic
- assert self.reg_class_agnostic
- self.norm_cfg = norm_cfg
-
- self.bbox_coder = TASK_UTILS.build(bbox_coder)
- self.loss_cls = MODELS.build(loss_cls)
- self.loss_bbox_cls = MODELS.build(loss_bbox_cls)
- self.loss_bbox_reg = MODELS.build(loss_bbox_reg)
-
- self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
- self.cls_in_channels,
- self.roi_feat_size,
- self.cls_out_channels)
-
- self.side_num = int(np.ceil(self.num_buckets / 2))
-
- if self.reg_feat_up_ratio > 1:
- self.upsample_x = nn.ConvTranspose1d(
- reg_in_channels,
- reg_in_channels,
- self.reg_feat_up_ratio,
- stride=self.reg_feat_up_ratio)
- self.upsample_y = nn.ConvTranspose1d(
- reg_in_channels,
- reg_in_channels,
- self.reg_feat_up_ratio,
- stride=self.reg_feat_up_ratio)
-
- self.reg_pre_convs = nn.ModuleList()
- for i in range(self.reg_pre_num):
- reg_pre_conv = ConvModule(
- reg_in_channels,
- reg_in_channels,
- kernel_size=reg_pre_kernel,
- padding=reg_pre_kernel // 2,
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'))
- self.reg_pre_convs.append(reg_pre_conv)
-
- self.reg_post_conv_xs = nn.ModuleList()
- for i in range(self.reg_post_num):
- reg_post_conv_x = ConvModule(
- reg_in_channels,
- reg_in_channels,
- kernel_size=(1, reg_post_kernel),
- padding=(0, reg_post_kernel // 2),
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'))
- self.reg_post_conv_xs.append(reg_post_conv_x)
- self.reg_post_conv_ys = nn.ModuleList()
- for i in range(self.reg_post_num):
- reg_post_conv_y = ConvModule(
- reg_in_channels,
- reg_in_channels,
- kernel_size=(reg_post_kernel, 1),
- padding=(reg_post_kernel // 2, 0),
- norm_cfg=norm_cfg,
- act_cfg=dict(type='ReLU'))
- self.reg_post_conv_ys.append(reg_post_conv_y)
-
- self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
- self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
-
- self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
- self.relu = nn.ReLU(inplace=True)
-
- self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
- self.reg_in_channels, 1,
- self.reg_cls_out_channels)
- self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
- self.reg_in_channels, 1,
- self.reg_offset_out_channels)
- self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
- self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
-
- if init_cfg is None:
- self.init_cfg = [
- dict(
- type='Xavier',
- layer='Linear',
- distribution='uniform',
- override=[
- dict(type='Normal', name='reg_conv_att_x', std=0.01),
- dict(type='Normal', name='reg_conv_att_y', std=0.01),
- dict(type='Normal', name='fc_reg_cls', std=0.01),
- dict(type='Normal', name='fc_cls', std=0.01),
- dict(type='Normal', name='fc_reg_offset', std=0.001)
- ])
- ]
- if self.reg_feat_up_ratio > 1:
- self.init_cfg += [
- dict(
- type='Kaiming',
- distribution='normal',
- override=[
- dict(name='upsample_x'),
- dict(name='upsample_y')
- ])
- ]
-
- def _add_fc_branch(self, num_branch_fcs: int, in_channels: int,
- roi_feat_size: int,
- fc_out_channels: int) -> nn.ModuleList:
- """build fc layers."""
- in_channels = in_channels * roi_feat_size * roi_feat_size
- branch_fcs = nn.ModuleList()
- for i in range(num_branch_fcs):
- fc_in_channels = (in_channels if i == 0 else fc_out_channels)
- branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
- return branch_fcs
-
- def cls_forward(self, cls_x: Tensor) -> Tensor:
- """forward of classification fc layers."""
- cls_x = cls_x.view(cls_x.size(0), -1)
- for fc in self.cls_fcs:
- cls_x = self.relu(fc(cls_x))
- cls_score = self.fc_cls(cls_x)
- return cls_score
-
- def attention_pool(self, reg_x: Tensor) -> tuple:
- """Extract direction-specific features fx and fy with attention
- methanism."""
- reg_fx = reg_x
- reg_fy = reg_x
- reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
- reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
- reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
- reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
- reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
- reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
- return reg_fx, reg_fy
-
- def side_aware_feature_extractor(self, reg_x: Tensor) -> tuple:
- """Refine and extract side-aware features without split them."""
- for reg_pre_conv in self.reg_pre_convs:
- reg_x = reg_pre_conv(reg_x)
- reg_fx, reg_fy = self.attention_pool(reg_x)
-
- if self.reg_post_num > 0:
- reg_fx = reg_fx.unsqueeze(2)
- reg_fy = reg_fy.unsqueeze(3)
- for i in range(self.reg_post_num):
- reg_fx = self.reg_post_conv_xs[i](reg_fx)
- reg_fy = self.reg_post_conv_ys[i](reg_fy)
- reg_fx = reg_fx.squeeze(2)
- reg_fy = reg_fy.squeeze(3)
- if self.reg_feat_up_ratio > 1:
- reg_fx = self.relu(self.upsample_x(reg_fx))
- reg_fy = self.relu(self.upsample_y(reg_fy))
- reg_fx = torch.transpose(reg_fx, 1, 2)
- reg_fy = torch.transpose(reg_fy, 1, 2)
- return reg_fx.contiguous(), reg_fy.contiguous()
-
- def reg_pred(self, x: Tensor, offset_fcs: nn.ModuleList,
- cls_fcs: nn.ModuleList) -> tuple:
- """Predict bucketing estimation (cls_pred) and fine regression (offset
- pred) with side-aware features."""
- x_offset = x.view(-1, self.reg_in_channels)
- x_cls = x.view(-1, self.reg_in_channels)
-
- for fc in offset_fcs:
- x_offset = self.relu(fc(x_offset))
- for fc in cls_fcs:
- x_cls = self.relu(fc(x_cls))
- offset_pred = self.fc_reg_offset(x_offset)
- cls_pred = self.fc_reg_cls(x_cls)
-
- offset_pred = offset_pred.view(x.size(0), -1)
- cls_pred = cls_pred.view(x.size(0), -1)
-
- return offset_pred, cls_pred
-
- def side_aware_split(self, feat: Tensor) -> Tensor:
- """Split side-aware features aligned with orders of bucketing
- targets."""
- l_end = int(np.ceil(self.up_reg_feat_size / 2))
- r_start = int(np.floor(self.up_reg_feat_size / 2))
- feat_fl = feat[:, :l_end]
- feat_fr = feat[:, r_start:].flip(dims=(1, ))
- feat_fl = feat_fl.contiguous()
- feat_fr = feat_fr.contiguous()
- feat = torch.cat([feat_fl, feat_fr], dim=-1)
- return feat
-
- def bbox_pred_split(self, bbox_pred: tuple,
- num_proposals_per_img: Sequence[int]) -> tuple:
- """Split batch bbox prediction back to each image."""
- bucket_cls_preds, bucket_offset_preds = bbox_pred
- bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
- bucket_offset_preds = bucket_offset_preds.split(
- num_proposals_per_img, 0)
- bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
- return bbox_pred
-
- def reg_forward(self, reg_x: Tensor) -> tuple:
- """forward of regression branch."""
- outs = self.side_aware_feature_extractor(reg_x)
- edge_offset_preds = []
- edge_cls_preds = []
- reg_fx = outs[0]
- reg_fy = outs[1]
- offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
- self.reg_cls_fcs)
- offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
- self.reg_cls_fcs)
- offset_pred_x = self.side_aware_split(offset_pred_x)
- offset_pred_y = self.side_aware_split(offset_pred_y)
- cls_pred_x = self.side_aware_split(cls_pred_x)
- cls_pred_y = self.side_aware_split(cls_pred_y)
- edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
- edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
-
- return edge_cls_preds, edge_offset_preds
-
- def forward(self, x: Tensor) -> tuple:
- """Forward features from the upstream network."""
- bbox_pred = self.reg_forward(x)
- cls_score = self.cls_forward(x)
-
- return cls_score, bbox_pred
-
- def get_targets(self,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict,
- concat: bool = True) -> tuple:
- """Calculate the ground truth for all samples in a batch according to
- the sampling_results."""
- pos_proposals = [res.pos_bboxes for res in sampling_results]
- neg_proposals = [res.neg_bboxes for res in sampling_results]
- pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
- pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
- cls_reg_targets = self.bucket_target(
- pos_proposals,
- neg_proposals,
- pos_gt_bboxes,
- pos_gt_labels,
- rcnn_train_cfg,
- concat=concat)
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
- return (labels, label_weights, (bucket_cls_targets,
- bucket_offset_targets),
- (bucket_cls_weights, bucket_offset_weights))
-
- def bucket_target(self,
- pos_proposals_list: list,
- neg_proposals_list: list,
- pos_gt_bboxes_list: list,
- pos_gt_labels_list: list,
- rcnn_train_cfg: ConfigDict,
- concat: bool = True) -> tuple:
- """Compute bucketing estimation targets and fine regression targets for
- a batch of images."""
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights) = multi_apply(
- self._bucket_target_single,
- pos_proposals_list,
- neg_proposals_list,
- pos_gt_bboxes_list,
- pos_gt_labels_list,
- cfg=rcnn_train_cfg)
-
- if concat:
- labels = torch.cat(labels, 0)
- label_weights = torch.cat(label_weights, 0)
- bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
- bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
- bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
- bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights)
-
- def _bucket_target_single(self, pos_proposals: Tensor,
- neg_proposals: Tensor, pos_gt_bboxes: Tensor,
- pos_gt_labels: Tensor, cfg: ConfigDict) -> tuple:
- """Compute bucketing estimation targets and fine regression targets for
- a single image.
-
- Args:
- pos_proposals (Tensor): positive proposals of a single image,
- Shape (n_pos, 4)
- neg_proposals (Tensor): negative proposals of a single image,
- Shape (n_neg, 4).
- pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
- of a single image, Shape (n_pos, 4).
- pos_gt_labels (Tensor): gt labels assigned to positive proposals
- of a single image, Shape (n_pos, ).
- cfg (dict): Config of calculating targets
-
- Returns:
- tuple:
-
- - labels (Tensor): Labels in a single image. Shape (n,).
- - label_weights (Tensor): Label weights in a single image.
- Shape (n,)
- - bucket_cls_targets (Tensor): Bucket cls targets in
- a single image. Shape (n, num_buckets*2).
- - bucket_cls_weights (Tensor): Bucket cls weights in
- a single image. Shape (n, num_buckets*2).
- - bucket_offset_targets (Tensor): Bucket offset targets
- in a single image. Shape (n, num_buckets*2).
- - bucket_offset_targets (Tensor): Bucket offset weights
- in a single image. Shape (n, num_buckets*2).
- """
- num_pos = pos_proposals.size(0)
- num_neg = neg_proposals.size(0)
- num_samples = num_pos + num_neg
- labels = pos_gt_bboxes.new_full((num_samples, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = pos_proposals.new_zeros(num_samples)
- bucket_cls_targets = pos_proposals.new_zeros(num_samples,
- 4 * self.side_num)
- bucket_cls_weights = pos_proposals.new_zeros(num_samples,
- 4 * self.side_num)
- bucket_offset_targets = pos_proposals.new_zeros(
- num_samples, 4 * self.side_num)
- bucket_offset_weights = pos_proposals.new_zeros(
- num_samples, 4 * self.side_num)
- if num_pos > 0:
- labels[:num_pos] = pos_gt_labels
- label_weights[:num_pos] = 1.0
- (pos_bucket_offset_targets, pos_bucket_offset_weights,
- pos_bucket_cls_targets,
- pos_bucket_cls_weights) = self.bbox_coder.encode(
- pos_proposals, pos_gt_bboxes)
- bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
- bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
- bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
- bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
- if num_neg > 0:
- label_weights[-num_neg:] = 1.0
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
- bucket_offset_targets, bucket_offset_weights)
-
- def loss(self,
- cls_score: Tensor,
- bbox_pred: Tuple[Tensor, Tensor],
- rois: Tensor,
- labels: Tensor,
- label_weights: Tensor,
- bbox_targets: Tuple[Tensor, Tensor],
- bbox_weights: Tuple[Tensor, Tensor],
- reduction_override: Optional[str] = None) -> dict:
- """Calculate the loss based on the network predictions and targets.
-
- Args:
- cls_score (Tensor): Classification prediction
- results of all class, has shape
- (batch_size * num_proposals_single_image, num_classes)
- bbox_pred (Tensor): A tuple of regression prediction results
- containing `bucket_cls_preds and` `bucket_offset_preds`.
- rois (Tensor): RoIs with the shape
- (batch_size * num_proposals_single_image, 5) where the first
- column indicates batch id of each RoI.
- labels (Tensor): Gt_labels for all proposals in a batch, has
- shape (batch_size * num_proposals_single_image, ).
- label_weights (Tensor): Labels_weights for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image, ).
- bbox_targets (Tuple[Tensor, Tensor]): A tuple of regression target
- containing `bucket_cls_targets` and `bucket_offset_targets`.
- the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights (Tuple[Tensor, Tensor]): A tuple of regression
- weights containing `bucket_cls_weights` and
- `bucket_offset_weights`.
- reduction_override (str, optional): The reduction
- method used to override the original reduction
- method of the loss. Options are "none",
- "mean" and "sum". Defaults to None,
-
- Returns:
- dict: A dictionary of loss.
- """
- losses = dict()
- if cls_score is not None:
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
- losses['loss_cls'] = self.loss_cls(
- cls_score,
- labels,
- label_weights,
- avg_factor=avg_factor,
- reduction_override=reduction_override)
- losses['acc'] = accuracy(cls_score, labels)
-
- if bbox_pred is not None:
- bucket_cls_preds, bucket_offset_preds = bbox_pred
- bucket_cls_targets, bucket_offset_targets = bbox_targets
- bucket_cls_weights, bucket_offset_weights = bbox_weights
- # edge cls
- bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
- bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
- bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
- losses['loss_bbox_cls'] = self.loss_bbox_cls(
- bucket_cls_preds,
- bucket_cls_targets,
- bucket_cls_weights,
- avg_factor=bucket_cls_targets.size(0),
- reduction_override=reduction_override)
-
- losses['loss_bbox_reg'] = self.loss_bbox_reg(
- bucket_offset_preds,
- bucket_offset_targets,
- bucket_offset_weights,
- avg_factor=bucket_offset_targets.size(0),
- reduction_override=reduction_override)
-
- return losses
-
- def _predict_by_feat_single(
- self,
- roi: Tensor,
- cls_score: Tensor,
- bbox_pred: Tuple[Tensor, Tensor],
- img_meta: dict,
- rescale: bool = False,
- rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
- """Transform a single image's features extracted from the head into
- bbox results.
-
- Args:
- roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
- last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
- cls_score (Tensor): Box scores, has shape
- (num_boxes, num_classes + 1).
- bbox_pred (Tuple[Tensor, Tensor]): Box cls preds and offset preds.
- img_meta (dict): image information.
- rescale (bool): If True, return boxes in original image space.
- Defaults to False.
- rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
- Defaults to None
-
- Returns:
- :obj:`InstanceData`: Detection results of each image
- Each item usually contains following keys.
-
- - scores (Tensor): Classification scores, has a shape
- (num_instance, )
- - labels (Tensor): Labels of bboxes, has a shape
- (num_instances, ).
- - bboxes (Tensor): Has a shape (num_instances, 4),
- the last dimension 4 arrange as (x1, y1, x2, y2).
- """
- results = InstanceData()
- if isinstance(cls_score, list):
- cls_score = sum(cls_score) / float(len(cls_score))
- scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
- img_shape = img_meta['img_shape']
- if bbox_pred is not None:
- bboxes, confidences = self.bbox_coder.decode(
- roi[:, 1:], bbox_pred, img_shape)
- else:
- bboxes = roi[:, 1:].clone()
- confidences = None
- if img_shape is not None:
- bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
- bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
-
- if rescale and bboxes.size(0) > 0:
- assert img_meta.get('scale_factor') is not None
- scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(
- (1, 2))
- bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(
- bboxes.size()[0], -1)
-
- if rcnn_test_cfg is None:
- results.bboxes = bboxes
- results.scores = scores
- else:
- det_bboxes, det_labels = multiclass_nms(
- bboxes,
- scores,
- rcnn_test_cfg.score_thr,
- rcnn_test_cfg.nms,
- rcnn_test_cfg.max_per_img,
- score_factors=confidences)
- results.bboxes = det_bboxes[:, :4]
- results.scores = det_bboxes[:, -1]
- results.labels = det_labels
- return results
-
- def refine_bboxes(self, sampling_results: List[SamplingResult],
- bbox_results: dict,
- batch_img_metas: List[dict]) -> InstanceList:
- """Refine bboxes during training.
-
- Args:
- sampling_results (List[:obj:`SamplingResult`]): Sampling results.
- bbox_results (dict): Usually is a dictionary with keys:
-
- - `cls_score` (Tensor): Classification scores.
- - `bbox_pred` (Tensor): Box energies / deltas.
- - `rois` (Tensor): RoIs with the shape (n, 5) where the first
- column indicates batch id of each RoI.
- - `bbox_targets` (tuple): Ground truth for proposals in a
- single image. Containing the following list of Tensors:
- (labels, label_weights, bbox_targets, bbox_weights)
- batch_img_metas (List[dict]): List of image information.
-
- Returns:
- list[:obj:`InstanceData`]: Refined bboxes of each image.
- """
- pos_is_gts = [res.pos_is_gt for res in sampling_results]
- # bbox_targets is a tuple
- labels = bbox_results['bbox_targets'][0]
- cls_scores = bbox_results['cls_score']
- rois = bbox_results['rois']
- bbox_preds = bbox_results['bbox_pred']
-
- if cls_scores.numel() == 0:
- return None
-
- labels = torch.where(labels == self.num_classes,
- cls_scores[:, :-1].argmax(1), labels)
-
- img_ids = rois[:, 0].long().unique(sorted=True)
- assert img_ids.numel() <= len(batch_img_metas)
-
- results_list = []
- for i in range(len(batch_img_metas)):
- inds = torch.nonzero(
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
- num_rois = inds.numel()
-
- bboxes_ = rois[inds, 1:]
- label_ = labels[inds]
- edge_cls_preds, edge_offset_preds = bbox_preds
- edge_cls_preds_ = edge_cls_preds[inds]
- edge_offset_preds_ = edge_offset_preds[inds]
- bbox_pred_ = (edge_cls_preds_, edge_offset_preds_)
- img_meta_ = batch_img_metas[i]
- pos_is_gts_ = pos_is_gts[i]
-
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
- img_meta_)
- # filter gt bboxes
- pos_keep = 1 - pos_is_gts_
- keep_inds = pos_is_gts_.new_ones(num_rois)
- keep_inds[:len(pos_is_gts_)] = pos_keep
- results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])
- results_list.append(results)
-
- return results_list
-
- def regress_by_class(self, rois: Tensor, label: Tensor, bbox_pred: tuple,
- img_meta: dict) -> Tensor:
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
-
- Args:
- rois (Tensor): shape (n, 4) or (n, 5)
- label (Tensor): shape (n, )
- bbox_pred (Tuple[Tensor]): shape [(n, num_buckets *2), \
- (n, num_buckets *2)]
- img_meta (dict): Image meta info.
-
- Returns:
- Tensor: Regressed bboxes, the same shape as input rois.
- """
- assert rois.size(1) == 4 or rois.size(1) == 5
-
- if rois.size(1) == 4:
- new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
- img_meta['img_shape'])
- else:
- bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
- img_meta['img_shape'])
- new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
-
- return new_rois
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/utils/gaussian_target.py b/spaces/KyanChen/RSPrompter/mmdet/models/utils/gaussian_target.py
deleted file mode 100644
index 5bf4d558ce05c4f953e1c3fcf75016e5874afce1..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/utils/gaussian_target.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from math import sqrt
-
-import torch
-import torch.nn.functional as F
-
-
-def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
- """Generate 2D gaussian kernel.
-
- Args:
- radius (int): Radius of gaussian kernel.
- sigma (int): Sigma of gaussian function. Default: 1.
- dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
- device (str): Device of gaussian tensor. Default: 'cpu'.
-
- Returns:
- h (Tensor): Gaussian kernel with a
- ``(2 * radius + 1) * (2 * radius + 1)`` shape.
- """
- x = torch.arange(
- -radius, radius + 1, dtype=dtype, device=device).view(1, -1)
- y = torch.arange(
- -radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
-
- h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
-
- h[h < torch.finfo(h.dtype).eps * h.max()] = 0
- return h
-
-
-def gen_gaussian_target(heatmap, center, radius, k=1):
- """Generate 2D gaussian heatmap.
-
- Args:
- heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
- it and maintain the max value.
- center (list[int]): Coord of gaussian kernel's center.
- radius (int): Radius of gaussian kernel.
- k (int): Coefficient of gaussian kernel. Default: 1.
-
- Returns:
- out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
- """
- diameter = 2 * radius + 1
- gaussian_kernel = gaussian2D(
- radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)
-
- x, y = center
-
- height, width = heatmap.shape[:2]
-
- left, right = min(x, radius), min(width - x, radius + 1)
- top, bottom = min(y, radius), min(height - y, radius + 1)
-
- masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
- masked_gaussian = gaussian_kernel[radius - top:radius + bottom,
- radius - left:radius + right]
- out_heatmap = heatmap
- torch.max(
- masked_heatmap,
- masked_gaussian * k,
- out=out_heatmap[y - top:y + bottom, x - left:x + right])
-
- return out_heatmap
-
-
-def gaussian_radius(det_size, min_overlap):
- r"""Generate 2D gaussian radius.
-
- This function is modified from the `official github repo
- `_.
-
- Given ``min_overlap``, radius could computed by a quadratic equation
- according to Vieta's formulas.
-
- There are 3 cases for computing gaussian radius, details are following:
-
- - Explanation of figure: ``lt`` and ``br`` indicates the left-top and
- bottom-right corner of ground truth box. ``x`` indicates the
- generated corner at the limited position when ``radius=r``.
-
- - Case1: one corner is inside the gt box and the other is outside.
-
- .. code:: text
-
- |< width >|
-
- lt-+----------+ -
- | | | ^
- +--x----------+--+
- | | | |
- | | | | height
- | | overlap | |
- | | | |
- | | | | v
- +--+---------br--+ -
- | | |
- +----------+--x
-
- To ensure IoU of generated box and gt box is larger than ``min_overlap``:
-
- .. math::
- \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad
- {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\
- {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h}
- {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
-
- - Case2: both two corners are inside the gt box.
-
- .. code:: text
-
- |< width >|
-
- lt-+----------+ -
- | | | ^
- +--x-------+ |
- | | | |
- | |overlap| | height
- | | | |
- | +-------x--+
- | | | v
- +----------+-br -
-
- To ensure IoU of generated box and gt box is larger than ``min_overlap``:
-
- .. math::
- \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad
- {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\
- {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h}
- {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
-
- - Case3: both two corners are outside the gt box.
-
- .. code:: text
-
- |< width >|
-
- x--+----------------+
- | | |
- +-lt-------------+ | -
- | | | | ^
- | | | |
- | | overlap | | height
- | | | |
- | | | | v
- | +------------br--+ -
- | | |
- +----------------+--x
-
- To ensure IoU of generated box and gt box is larger than ``min_overlap``:
-
- .. math::
- \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad
- {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\
- {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\
- {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a}
-
- Args:
- det_size (list[int]): Shape of object.
- min_overlap (float): Min IoU with ground truth for boxes generated by
- keypoints inside the gaussian kernel.
-
- Returns:
- radius (int): Radius of gaussian kernel.
- """
- height, width = det_size
-
- a1 = 1
- b1 = (height + width)
- c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
- sq1 = sqrt(b1**2 - 4 * a1 * c1)
- r1 = (b1 - sq1) / (2 * a1)
-
- a2 = 4
- b2 = 2 * (height + width)
- c2 = (1 - min_overlap) * width * height
- sq2 = sqrt(b2**2 - 4 * a2 * c2)
- r2 = (b2 - sq2) / (2 * a2)
-
- a3 = 4 * min_overlap
- b3 = -2 * min_overlap * (height + width)
- c3 = (min_overlap - 1) * width * height
- sq3 = sqrt(b3**2 - 4 * a3 * c3)
- r3 = (b3 + sq3) / (2 * a3)
- return min(r1, r2, r3)
-
-
-def get_local_maximum(heat, kernel=3):
- """Extract local maximum pixel with given kernel.
-
- Args:
- heat (Tensor): Target heatmap.
- kernel (int): Kernel size of max pooling. Default: 3.
-
- Returns:
- heat (Tensor): A heatmap where local maximum pixels maintain its
- own value and other positions are 0.
- """
- pad = (kernel - 1) // 2
- hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
- keep = (hmax == heat).float()
- return heat * keep
-
-
-def get_topk_from_heatmap(scores, k=20):
- """Get top k positions from heatmap.
-
- Args:
- scores (Tensor): Target heatmap with shape
- [batch, num_classes, height, width].
- k (int): Target number. Default: 20.
-
- Returns:
- tuple[torch.Tensor]: Scores, indexes, categories and coords of
- topk keypoint. Containing following Tensors:
-
- - topk_scores (Tensor): Max scores of each topk keypoint.
- - topk_inds (Tensor): Indexes of each topk keypoint.
- - topk_clses (Tensor): Categories of each topk keypoint.
- - topk_ys (Tensor): Y-coord of each topk keypoint.
- - topk_xs (Tensor): X-coord of each topk keypoint.
- """
- batch, _, height, width = scores.size()
- topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
- topk_clses = topk_inds // (height * width)
- topk_inds = topk_inds % (height * width)
- topk_ys = topk_inds // width
- topk_xs = (topk_inds % width).int().float()
- return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
-
-
-def gather_feat(feat, ind, mask=None):
- """Gather feature according to index.
-
- Args:
- feat (Tensor): Target feature map.
- ind (Tensor): Target coord index.
- mask (Tensor | None): Mask of feature map. Default: None.
-
- Returns:
- feat (Tensor): Gathered feature.
- """
- dim = feat.size(2)
- ind = ind.unsqueeze(2).repeat(1, 1, dim)
- feat = feat.gather(1, ind)
- if mask is not None:
- mask = mask.unsqueeze(2).expand_as(feat)
- feat = feat[mask]
- feat = feat.view(-1, dim)
- return feat
-
-
-def transpose_and_gather_feat(feat, ind):
- """Transpose and gather feature according to index.
-
- Args:
- feat (Tensor): Target feature map.
- ind (Tensor): Target coord index.
-
- Returns:
- feat (Tensor): Transposed and gathered feature.
- """
- feat = feat.permute(0, 2, 3, 1).contiguous()
- feat = feat.view(feat.size(0), -1, feat.size(3))
- feat = gather_feat(feat, ind)
- return feat
diff --git a/spaces/LCaligari/deepsynthbody-deepfake_ecg/app.py b/spaces/LCaligari/deepsynthbody-deepfake_ecg/app.py
deleted file mode 100644
index ee9f7ab55dc0efc6756d4efabf8914277b944a30..0000000000000000000000000000000000000000
--- a/spaces/LCaligari/deepsynthbody-deepfake_ecg/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/deepsynthbody/deepfake_ecg").launch()
\ No newline at end of file
diff --git a/spaces/Lamai/LAMAIGPT/tests/milvus_memory_test.py b/spaces/Lamai/LAMAIGPT/tests/milvus_memory_test.py
deleted file mode 100644
index 84fd6e6d5006e781fa5e1065f949b2160537d913..0000000000000000000000000000000000000000
--- a/spaces/Lamai/LAMAIGPT/tests/milvus_memory_test.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# sourcery skip: snake-case-functions
-"""Tests for the MilvusMemory class."""
-import os
-import sys
-import unittest
-
-try:
- from autogpt.memory.milvus import MilvusMemory
-
- def mock_config() -> dict:
- """Mock the Config class"""
- return type(
- "MockConfig",
- (object,),
- {
- "debug_mode": False,
- "continuous_mode": False,
- "speak_mode": False,
- "milvus_collection": "autogpt",
- "milvus_addr": "localhost:19530",
- },
- )
-
- class TestMilvusMemory(unittest.TestCase):
- """Tests for the MilvusMemory class."""
-
- def setUp(self) -> None:
- """Set up the test environment"""
- self.cfg = mock_config()
- self.memory = MilvusMemory(self.cfg)
-
- def test_add(self) -> None:
- """Test adding a text to the cache"""
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual([text], result)
-
- def test_clear(self) -> None:
- """Test clearing the cache"""
- self.memory.clear()
- self.assertEqual(self.memory.collection.num_entities, 0)
-
- def test_get(self) -> None:
- """Test getting a text from the cache"""
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- result = self.memory.get(text)
- self.assertEqual(result, [text])
-
- def test_get_relevant(self) -> None:
- """Test getting relevant texts from the cache"""
- text1 = "Sample text 1"
- text2 = "Sample text 2"
- self.memory.clear()
- self.memory.add(text1)
- self.memory.add(text2)
- result = self.memory.get_relevant(text1, 1)
- self.assertEqual(result, [text1])
-
- def test_get_stats(self) -> None:
- """Test getting the cache stats"""
- text = "Sample text"
- self.memory.clear()
- self.memory.add(text)
- stats = self.memory.get_stats()
- self.assertEqual(15, len(stats))
-
-except:
- print("Milvus not installed, skipping tests")
diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py b/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py
deleted file mode 100644
index 1bb350fc3f49418f2841df2d65f183c34e08db0e..0000000000000000000000000000000000000000
--- a/spaces/Loren/Streamlit_OCR_comparator/configs/textrecog/nrtr/nrtr_modality_transform_toy_dataset.py
+++ /dev/null
@@ -1,31 +0,0 @@
-_base_ = [
- '../../_base_/default_runtime.py',
- '../../_base_/recog_models/nrtr_modality_transform.py',
- '../../_base_/schedules/schedule_adam_step_6e.py',
- '../../_base_/recog_datasets/toy_data.py',
- '../../_base_/recog_pipelines/nrtr_pipeline.py'
-]
-
-train_list = {{_base_.train_list}}
-test_list = {{_base_.test_list}}
-
-train_pipeline = {{_base_.train_pipeline}}
-test_pipeline = {{_base_.test_pipeline}}
-
-data = dict(
- samples_per_gpu=16,
- workers_per_gpu=2,
- train=dict(
- type='UniformConcatDataset',
- datasets=train_list,
- pipeline=train_pipeline),
- val=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline),
- test=dict(
- type='UniformConcatDataset',
- datasets=test_list,
- pipeline=test_pipeline))
-
-evaluation = dict(interval=1, metric='acc')
diff --git a/spaces/MAGAer13/mPLUG-Owl2/mplug_owl2/model/__init__.py b/spaces/MAGAer13/mPLUG-Owl2/mplug_owl2/model/__init__.py
deleted file mode 100644
index 6d6f0775a0abb2c3e220343a4feb05c70c2c7779..0000000000000000000000000000000000000000
--- a/spaces/MAGAer13/mPLUG-Owl2/mplug_owl2/model/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .modeling_mplug_owl2 import MPLUGOwl2LlamaForCausalLM
-from .configuration_mplug_owl2 import MPLUGOwl2Config
\ No newline at end of file
diff --git a/spaces/MAli7319/Comment_Analysis/comment_analyzer.py b/spaces/MAli7319/Comment_Analysis/comment_analyzer.py
deleted file mode 100644
index 8a2d27a85e6290d456ea82a5b47fad3ceef75280..0000000000000000000000000000000000000000
--- a/spaces/MAli7319/Comment_Analysis/comment_analyzer.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import gradio as gr
-import pandas as pd
-from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
-from sklearn.model_selection import train_test_split
-from sklearn.svm import SVR
-
-
-data = pd.read_csv("modeled_data.csv")
-analyzer = SentimentIntensityAnalyzer()
-
-
-def sample_model(df, regressor):
- X = df.drop("rate",axis=1)
- y = df["rate"]
-
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=1)
-
- model = regressor
- model.fit(X_train, y_train)
-
- return model
-
-
-def calculate_sentiments(comment, model):
-
- negative_score = analyzer.polarity_scores(comment)["neg"]
- neutral_score = analyzer.polarity_scores(comment)["neu"]
- positive_score = analyzer.polarity_scores(comment)["pos"]
- compound_score = analyzer.polarity_scores(comment)["compound"]
- rate_pred = model.predict([[negative_score, neutral_score, positive_score, compound_score]])
-
- return round(negative_score,2), round(neutral_score,2), round(positive_score,2), round(compound_score,2), round(rate_pred[0],2)
-
-
-def take_input(comment):
-
- cons_tuned_svr = sample_model(data, SVR(C=3, kernel="rbf", tol=0.001))
- return calculate_sentiments(comment, cons_tuned_svr)
-
-
-with gr.Blocks() as demo:
- gr.Markdown("# AIN311 Project P05 - MOOC Recommendation")
- gr.Markdown("## Generating a Rating from User Comment")
- with gr.Column():
- gr.Markdown("""
- ##### Thanks for your interest and taking your time.
- ##### Tell us about your personal experience enrolling in this course. Was it the right match for you?
- """)
- input_comment = gr.Textbox(placeholder="Write your comment here...", show_label = False, lines=2)
- button = gr.Button("What is the Rating I Have Given? Click me to Learn", variant="secondary").style(full_width=True)
- with gr.Row():
- with gr.Column():
- gr.Markdown("#### Generated Rating from Your Comment")
- rating = gr.Number().style(show_label=False)
- with gr.Column():
- gr.Markdown("#### Sentiment Scores of Your Comment")
- with gr.Row():
- negscore = gr.Number(label="Negativity Score")
- neuscore = gr.Number(label="Neutrality Score")
- posscore = gr.Number(label="Positivity Score")
- compscore = gr.Number(label="Compound Score")
-
- gr.Markdown("Example comments has taken from https://www.udemy.com/course/statistics-for-data-science-and-business-analysis/")
- gr.Examples(
- [["Not really a basic course. Goes too fast and sometimes the explanations aren't clear. The solutions to the exercises are not explained, and should be. Not much better than the other course I started and didn't finish.(3)"],
- ["Presentation style was bad. Too much detail on the simpler topics, and glossed over some of the more complicated ones.(2)"],
- ["This course is the worst Course i have ever watched (1)"],
- ["The course is really great! The didatic in explain all concepts and pratical examples are amazing. Better than brazilian universities.(4)"],
- ["The best!!!!!!!!!!(5)"],
- ["Excelent description and view. The exercises were prepared very carefully. I suggest it.(5)"],
- ["Yes, This course is a very good way to update/ refresh statistical knowledge (2)"],
- ["explaination is good but practical examples are not so good (2.5)"],
- ["Thanks for content. Good to know and understand the things easily. (3.5)"],
- ["good (3)"],
- ["The course navigation is very bad ..It is very tedious to navigate the course.(5)"]],
- [input_comment],
- [[negscore, neuscore, posscore, compscore, rating]],
- fn=take_input
- )
- button.click(fn=take_input, inputs=input_comment, outputs=[negscore, neuscore, posscore, compscore, rating])
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/Makiing/coolb-in-gtest/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/Makiing/coolb-in-gtest/src/lib/hooks/use-copy-to-clipboard.tsx
deleted file mode 100644
index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000
--- a/spaces/Makiing/coolb-in-gtest/src/lib/hooks/use-copy-to-clipboard.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-export interface useCopyToClipboardProps {
- timeout?: number
-}
-
-export function useCopyToClipboard({
- timeout = 2000
-}: useCopyToClipboardProps) {
- const [isCopied, setIsCopied] = React.useState(false)
-
- const copyToClipboard = (value: string) => {
- if (typeof window === 'undefined' || !navigator.clipboard?.writeText) {
- return
- }
-
- if (!value) {
- return
- }
-
- navigator.clipboard.writeText(value).then(() => {
- setIsCopied(true)
-
- setTimeout(() => {
- setIsCopied(false)
- }, timeout)
- })
- }
-
- return { isCopied, copyToClipboard }
-}
diff --git "a/spaces/MaxReimann/Whitebox-Style-Transfer-Editing/pages/2_\360\237\226\214\357\270\217_Local_edits.py" "b/spaces/MaxReimann/Whitebox-Style-Transfer-Editing/pages/2_\360\237\226\214\357\270\217_Local_edits.py"
deleted file mode 100644
index 2b67c3c4142876fd45c82a8637d6fe7678bcd99e..0000000000000000000000000000000000000000
--- "a/spaces/MaxReimann/Whitebox-Style-Transfer-Editing/pages/2_\360\237\226\214\357\270\217_Local_edits.py"
+++ /dev/null
@@ -1,244 +0,0 @@
-import os
-import sys
-
-import torch.nn.functional as F
-import torch
-import numpy as np
-import matplotlib
-from matplotlib import pyplot as plt
-import matplotlib.cm
-from PIL import Image
-
-import streamlit as st
-from streamlit_drawable_canvas import st_canvas
-
-
-PACKAGE_PARENT = '..'
-WISE_DIR = '../wise/'
-SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
-sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
-sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, WISE_DIR)))
-
-
-
-from effects.gauss2d_xy_separated import Gauss2DEffect
-from effects.minimal_pipeline import MinimalPipelineEffect
-from helpers import torch_to_np, np_to_torch
-from effects import get_default_settings
-from demo_config import HUGGING_FACE
-
-st.set_page_config(page_title="Editing Demo", layout="wide")
-
-# @st.cache(hash_funcs={OilPaintEffect: id})
-@st.cache(hash_funcs={MinimalPipelineEffect: id})
-def local_edits_create_effect():
- effect, preset, param_set = get_default_settings("minimal_pipeline")
- effect.enable_checkpoints()
- effect.cuda()
- return effect, param_set
-
-
-effect, param_set = local_edits_create_effect()
-
-@st.experimental_memo
-def gen_param_strength_fig():
- cmap = matplotlib.cm.get_cmap('plasma')
- # cmap show
- gradient = np.linspace(0, 1, 256)
- gradient = np.vstack((gradient, gradient))
- fig, ax = plt.subplots(figsize=(3, 0.1))
- fig.patch.set_alpha(0.0)
- ax.set_title("parameter strength", fontsize=6.5, loc="left")
- ax.imshow(gradient, aspect='auto', cmap=cmap)
- ax.set_axis_off()
- return fig, cmap
-
-cmap_fig, cmap = gen_param_strength_fig()
-
-st.session_state["canvas_key"] = "canvas"
-try:
- vp = st.session_state["result_vp"]
- org_cuda = st.session_state["effect_input"]
-except KeyError as e:
- print("init run, certain keys not found. If this happens once its ok.")
-
-if st.session_state["action"] != "switch_page_from_local_edits":
- st.session_state.local_edit_action = "init"
-
-st.session_state["action"] = "switch_page_from_local_edits" # on switchback, remember effect input
-
-if "mask_edit_counter" not in st.session_state:
- st.session_state["mask_edit_counter"] = 1
-if "initial_drawing" not in st.session_state:
- st.session_state["initial_drawing"] = {"random": st.session_state["mask_edit_counter"], "background": "#eee"}
-
-def on_slider_change():
- if st.session_state.local_edit_action == "init":
- st.stop()
- st.session_state.local_edit_action = "slider"
-
-def on_param_change():
- st.session_state.local_edit_action = "param_change"
-
-active_param = st.sidebar.selectbox("active parameter: ", param_set + ["smooth"], index=2, on_change=on_param_change)
-
-st.sidebar.text("Drawing options")
-if active_param != "smooth":
- plus_or_minus = st.sidebar.slider("Increase or decrease param map: ", -1.0, 1.0, 0.8, 0.05,
- on_change=on_slider_change)
-else:
- sigma = st.sidebar.slider("Sigma: ", 0.1, 10.0, 0.5, 0.1, on_change=on_slider_change)
-
-stroke_width = st.sidebar.slider("Stroke width: ", 1, 50, 20, on_change=on_slider_change)
-drawing_mode = st.sidebar.selectbox(
- "Drawing tool:", ("freedraw", "line", "rect", "circle", "transform"), on_change=on_slider_change,
-)
-
-st.sidebar.text("Viewing options")
-if active_param != "smooth":
- overlay = st.sidebar.slider("show parameter overlay: ", 0.0, 1.0, 0.8, 0.02, on_change=on_slider_change)
- st.sidebar.pyplot(cmap_fig, bbox_inches='tight', pad_inches=0)
-
-st.sidebar.text("Update:")
-realtime_update = st.sidebar.checkbox("Update in realtime", True)
-clear_after_draw = st.sidebar.checkbox("Clear Canvas after each Stroke", False)
-invert_selection = st.sidebar.checkbox("Invert Selection", False)
-
-
-@st.experimental_memo
-def greyscale_org(_org_cuda, content_id): #content_id is used for hashing
- if HUGGING_FACE:
- wsize = 450
- img_org_height, img_org_width = _org_cuda.shape[-2:]
- wpercent = (wsize / float(img_org_width))
- hsize = int((float(img_org_height) * float(wpercent)))
- else:
- longest_edge = 670
- img_org_height, img_org_width = _org_cuda.shape[-2:]
- max_width_height = max(img_org_width, img_org_height)
- hsize = int((float(longest_edge) * float(float(img_org_height) / max_width_height)))
- wsize = int((float(longest_edge) * float(float(img_org_width) / max_width_height)))
-
- org_img = F.interpolate(_org_cuda, (hsize, wsize), mode="bilinear")
- org_img = torch.mean(org_img, dim=1, keepdim=True) / 2.0
- org_img = torch_to_np(org_img)[..., np.newaxis].repeat(3, axis=2)
- return org_img, hsize, wsize
-
-def generate_param_mask(vp):
- greyscale_img, hsize, wsize = greyscale_org(org_cuda, st.session_state["Content_id"])
- if active_param != "smooth":
- scaled_vp = F.interpolate(vp, (hsize, wsize))[:, effect.vpd.name2idx[active_param]]
- param_cmapped = cmap((scaled_vp + 0.5).cpu().numpy())[...,:3][0]
- greyscale_img = greyscale_img * (1 - overlay) + param_cmapped * overlay
- return Image.fromarray((greyscale_img * 255).astype(np.uint8))
-
-def compute_results(_vp):
- if "cached_canvas" in st.session_state and st.session_state["cached_canvas"].image_data is not None:
- canvas_result = st.session_state["cached_canvas"]
- abc = np_to_torch(canvas_result.image_data.astype(np.float32)).sum(dim=1, keepdim=True).cuda()
-
- if invert_selection:
- abc = abc * (- 1.0) + 1.0
-
- img_org_width = org_cuda.shape[-1]
- img_org_height = org_cuda.shape[-2]
- res_data = F.interpolate(abc, (img_org_height, img_org_width)).squeeze(1)
-
- if active_param != "smooth":
- _vp[:, effect.vpd.name2idx[active_param]] += plus_or_minus * res_data
- _vp.clamp_(-0.5, 0.5)
- else:
- gauss2dx = Gauss2DEffect(dxdy=[1.0, 0.0], dim_kernsize=5)
- gauss2dy = Gauss2DEffect(dxdy=[0.0, 1.0], dim_kernsize=5)
-
- vp_smoothed = gauss2dx(_vp, torch.tensor(sigma).cuda())
- vp_smoothed = gauss2dy(vp_smoothed, torch.tensor(sigma).cuda())
-
- print(res_data.shape)
- print(_vp.shape)
- print(vp_smoothed.shape)
- _vp = torch.lerp(_vp, vp_smoothed, res_data.unsqueeze(1))
-
- with torch.no_grad():
- result_cuda = effect(org_cuda, _vp)
-
- _, hsize, wsize = greyscale_org(org_cuda, st.session_state["Content_id"])
- result_cuda = F.interpolate(result_cuda, (hsize, wsize), mode="bilinear")
-
- return Image.fromarray((torch_to_np(result_cuda) * 255.0).astype(np.uint8)), _vp
-
-coll1, coll2 = st.columns(2)
-coll1.header("Draw Mask:")
-coll2.header("Live Result")
-
-# there is no way of removing the canvas history/state without rerunning the whole program.
-# therefore, giving the canvas a initial_drawing that differs from the canvas state will clear the background
-def mark_canvas_for_redraw():
- print("mark for redraw")
- st.session_state["mask_edit_counter"] += 1 # change state of initial drawing
- initial_drawing = {"random": st.session_state["mask_edit_counter"], "background": "#eee"}
- st.session_state["initial_drawing"] = initial_drawing
-
-
-with coll1:
- print("edit action", st.session_state.local_edit_action)
- if clear_after_draw and st.session_state.local_edit_action not in ("slider", "param_change", "init"):
- if st.session_state.local_edit_action == "redraw":
- st.session_state.local_edit_action = "draw"
- mark_canvas_for_redraw()
- else:
- st.session_state.local_edit_action = "redraw"
-
- mask = generate_param_mask(st.session_state["result_vp"])
- st.session_state["last_mask"] = mask
-
- # Create a canvas component
- canvas_result = st_canvas(
- fill_color="rgba(0, 0, 0, 1)",
- stroke_width=stroke_width,
- background_image=mask,
- update_streamlit=realtime_update,
- width=mask.width,
- height=mask.height,
- initial_drawing=st.session_state["initial_drawing"],
- drawing_mode=drawing_mode,
- key=st.session_state.canvas_key,
- )
-
- if canvas_result.json_data is None:
- print("stops")
- st.stop()
-
- st.session_state["cached_canvas"] = canvas_result
-
- print("compute result")
- img_res, vp = compute_results(vp)
- st.session_state["last_result"] = img_res
- st.session_state["result_vp"] = vp
-
- st.markdown("### Mask: " + active_param)
-
-if st.session_state.local_edit_action in ("slider", "param_change", "init"):
- print("set redraw")
- st.session_state.local_edit_action = "redraw"
-
-if "objects" in canvas_result.json_data and canvas_result.json_data["objects"] != []:
- print(st.session_state["user"], " edited local param canvas")
-
-print("plot masks")
-texts = []
-preview_masks = []
-img = st.session_state["last_mask"]
-for i, p in enumerate(param_set):
- idx = effect.vpd.name2idx[p]
- iii = F.interpolate(vp[:, idx:idx + 1] + 0.5, (int(img.height * 0.2), int(img.width * 0.2)))
- texts.append(p[:15])
- preview_masks.append(torch_to_np(iii))
-
-coll2.image(img_res) # , use_column_width="auto")
-ppp = st.columns(len(param_set))
-for i, (txt, im) in enumerate(zip(texts, preview_masks)):
- ppp[i].text(txt)
- ppp[i].image(im, clamp=True)
-
-print("....")
diff --git a/spaces/MesutUnutur/germanToEnglishTextToImage/app.py b/spaces/MesutUnutur/germanToEnglishTextToImage/app.py
deleted file mode 100644
index c035fb80ea8dbc96ba57098bbac2d46cc9350fc0..0000000000000000000000000000000000000000
--- a/spaces/MesutUnutur/germanToEnglishTextToImage/app.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import gradio as gr
-deToEng = gr.Interface.load("huggingface/Helsinki-NLP/opus-mt-tr-en")
-text_to_image = gr.Interface.load("huggingface/runwayml/stable-diffusion-v1-5")
-
-with gr.Blocks() as demo:
- title = "Translate from German to English and English text to image"
-
- de_text = gr.Textbox(placeholder="Almanca cümle")
- translate_btn = gr.Button("translate from german to english")
-
- eng_text = gr.Textbox(label="ingilizce")
- translate_btn.click(deToEng, de_text, eng_text)
-
- text_to_image_btn = gr.Button("text to image")
-
- out = gr.Image()
-
- text_to_image_btn.click(text_to_image, eng_text, out)
-
-demo.launch()
-
\ No newline at end of file
diff --git a/spaces/Monelmo/Testing/Dockerfile b/spaces/Monelmo/Testing/Dockerfile
deleted file mode 100644
index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000
--- a/spaces/Monelmo/Testing/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM node:18-bullseye-slim
-
-RUN apt-get update && \
-
-apt-get install -y git
-
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-
-WORKDIR /app
-
-RUN npm install
-
-COPY Dockerfile greeting.md* .env* ./
-
-RUN npm run build
-
-EXPOSE 7860
-
-ENV NODE_ENV=production
-
-CMD [ "npm", "start" ]
\ No newline at end of file
diff --git a/spaces/Monster/GPT4ALL/README.md b/spaces/Monster/GPT4ALL/README.md
deleted file mode 100644
index 5f72e0ae0db51efd550f9997fa6615d9313fd2f3..0000000000000000000000000000000000000000
--- a/spaces/Monster/GPT4ALL/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Gpt4all
-emoji: 🦀
-colorFrom: gray
-colorTo: pink
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MoonQiu/LongerCrafter/scripts/run_text2video_freenoise_1024.sh b/spaces/MoonQiu/LongerCrafter/scripts/run_text2video_freenoise_1024.sh
deleted file mode 100644
index 5069fe833ab96fb3826e3535b5c92302dbe158cb..0000000000000000000000000000000000000000
--- a/spaces/MoonQiu/LongerCrafter/scripts/run_text2video_freenoise_1024.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-name="base_1024_test"
-
-ckpt='checkpoints/base_1024_v1/model.ckpt'
-config='configs/inference_t2v_1024_v1.0_freenoise.yaml'
-
-prompt_file="prompts/single_prompts.txt"
-res_dir="results_freenoise_single_1024"
-
-python3 scripts/evaluation/inference_freenoise.py \
---seed 123 \
---mode 'base' \
---ckpt_path $ckpt \
---config $config \
---savedir $res_dir/$name \
---n_samples 3 \
---bs 1 --height 576 --width 1024 \
---unconditional_guidance_scale 12.0 \
---ddim_steps 50 \
---ddim_eta 0.0 \
---prompt_file $prompt_file \
---fps 28 \
---frames 64 \
---window_size 16 \
---window_stride 4
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/parsers/mjsynth_parser.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/parsers/mjsynth_parser.py
deleted file mode 100644
index 3eee6e29a373bfb9689de1845f7a22587750816c..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/parsers/mjsynth_parser.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import os.path as osp
-from typing import List
-
-from mmocr.registry import DATA_PARSERS
-from .icdar_txt_parser import ICDARTxtTextRecogAnnParser
-
-
-@DATA_PARSERS.register_module()
-class MJSynthAnnParser(ICDARTxtTextRecogAnnParser):
- """MJSynth Text Recognition Annotation Parser.
-
- The original annotation format of this dataset is stored in txt files,
- which is formed as the following format:
- img_path, transcription
-
- Args:
- separator (str): The separator between each element in a line. Defaults
- to ','.
- ignore (str): The text to be ignored. Defaults to '#'.
- format (str): The format of the annotation. Defaults to 'img, text'.
- encoding (str): The encoding of the annotation file. Defaults to
- 'utf-8-sig'.
- nproc (int): The number of processes to parse the annotation. Defaults
- to 1.
- base_name (bool): Whether to use the basename of the image path as the
- image name. Defaults to False.
- remove_strs (List[str], Optional): Used to remove redundant strings in
- the transcription. Defaults to ['"'].
- """
-
- def parse_files(self, img_dir: str, ann_path: str) -> List:
- """Parse annotations."""
- assert isinstance(ann_path, str)
- samples = list()
- for anno in self.loader(
- file_path=ann_path,
- format=self.format,
- encoding=self.encoding,
- separator=self.sep):
- text = osp.basename(anno['img']).split('_')[1]
- if self.remove_strs is not None:
- for strs in self.remove_strs:
- text = text.replace(strs, '')
- if text == self.ignore:
- continue
- img_name = anno['img']
- samples.append((osp.join(img_dir, img_name), text))
-
- return samples
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/losses/ce_loss.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/losses/ce_loss.py
deleted file mode 100644
index 9ff498723d9cbae1d71808ab028cd870da86b3b1..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/common/losses/ce_loss.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch.nn as nn
-
-from mmocr.registry import MODELS
-
-
-@MODELS.register_module()
-class CrossEntropyLoss(nn.CrossEntropyLoss):
- """Cross entropy loss."""
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/recognizers/sar.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/recognizers/sar.py
deleted file mode 100644
index 8ba8306232b2598416c0149c8baf786338b07ab4..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/recognizers/sar.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmocr.registry import MODELS
-from .encoder_decoder_recognizer import EncoderDecoderRecognizer
-
-
-@MODELS.register_module()
-class SARNet(EncoderDecoderRecognizer):
- """Implementation of `SAR `_"""
diff --git a/spaces/NATSpeech/PortaSpeech/modules/tts/fs.py b/spaces/NATSpeech/PortaSpeech/modules/tts/fs.py
deleted file mode 100644
index b15b4348c1abf58a476c12115b5b088dc7b46979..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/PortaSpeech/modules/tts/fs.py
+++ /dev/null
@@ -1,172 +0,0 @@
-from copy import deepcopy
-
-import torch
-from torch import nn
-import torch.nn.functional as F
-from modules.commons.conv import TextConvEncoder, ConvBlocks
-from modules.commons.layers import Embedding
-from modules.commons.nar_tts_modules import PitchPredictor, DurationPredictor, LengthRegulator
-from modules.commons.rel_transformer import RelTransformerEncoder
-from modules.commons.rnn import TacotronEncoder, RNNEncoder, DecoderRNN
-from modules.commons.transformer import FastSpeechEncoder, FastSpeechDecoder
-from modules.commons.wavenet import WN
-from modules.tts.commons.align_ops import clip_mel2token_to_multiple, expand_states
-from utils.audio.pitch.utils import denorm_f0, f0_to_coarse
-
-FS_ENCODERS = {
- 'fft': lambda hp, dict_size: FastSpeechEncoder(
- dict_size, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'],
- num_heads=hp['num_heads']),
- 'tacotron': lambda hp, dict_size: TacotronEncoder(
- hp['hidden_size'], dict_size, hp['hidden_size'],
- K=hp['encoder_K'], num_highways=4, dropout=hp['dropout']),
- 'tacotron2': lambda hp, dict_size: RNNEncoder(dict_size, hp['hidden_size']),
- 'conv': lambda hp, dict_size: TextConvEncoder(dict_size, hp['hidden_size'], hp['hidden_size'],
- hp['enc_dilations'], hp['enc_kernel_size'],
- layers_in_block=hp['layers_in_block'],
- norm_type=hp['enc_dec_norm'],
- post_net_kernel=hp.get('enc_post_net_kernel', 3)),
- 'rel_fft': lambda hp, dict_size: RelTransformerEncoder(
- dict_size, hp['hidden_size'], hp['hidden_size'],
- hp['ffn_hidden_size'], hp['num_heads'], hp['enc_layers'],
- hp['enc_ffn_kernel_size'], hp['dropout'], prenet=hp['enc_prenet'], pre_ln=hp['enc_pre_ln']),
-}
-
-FS_DECODERS = {
- 'fft': lambda hp: FastSpeechDecoder(
- hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
- 'rnn': lambda hp: DecoderRNN(hp['hidden_size'], hp['decoder_rnn_dim'], hp['dropout']),
- 'conv': lambda hp: ConvBlocks(hp['hidden_size'], hp['hidden_size'], hp['dec_dilations'],
- hp['dec_kernel_size'], layers_in_block=hp['layers_in_block'],
- norm_type=hp['enc_dec_norm'], dropout=hp['dropout'],
- post_net_kernel=hp.get('dec_post_net_kernel', 3)),
- 'wn': lambda hp: WN(hp['hidden_size'], kernel_size=5, dilation_rate=1, n_layers=hp['dec_layers'],
- is_BTC=True),
-}
-
-
-class FastSpeech(nn.Module):
- def __init__(self, dict_size, hparams, out_dims=None):
- super().__init__()
- self.hparams = deepcopy(hparams)
- self.enc_layers = hparams['enc_layers']
- self.dec_layers = hparams['dec_layers']
- self.hidden_size = hparams['hidden_size']
- self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams, dict_size)
- self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
- self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims
- self.mel_out = nn.Linear(self.hidden_size, self.out_dims, bias=True)
- if hparams['use_spk_id']:
- self.spk_id_proj = Embedding(hparams['num_spk'], self.hidden_size)
- if hparams['use_spk_embed']:
- self.spk_embed_proj = nn.Linear(256, self.hidden_size, bias=True)
- predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
- self.dur_predictor = DurationPredictor(
- self.hidden_size,
- n_chans=predictor_hidden,
- n_layers=hparams['dur_predictor_layers'],
- dropout_rate=hparams['predictor_dropout'],
- kernel_size=hparams['dur_predictor_kernel'])
- self.length_regulator = LengthRegulator()
- if hparams['use_pitch_embed']:
- self.pitch_embed = Embedding(300, self.hidden_size, 0)
- self.pitch_predictor = PitchPredictor(
- self.hidden_size, n_chans=predictor_hidden,
- n_layers=5, dropout_rate=0.1, odim=2,
- kernel_size=hparams['predictor_kernel'])
- if hparams['dec_inp_add_noise']:
- self.z_channels = hparams['z_channels']
- self.dec_inp_noise_proj = nn.Linear(self.hidden_size + self.z_channels, self.hidden_size)
-
- def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None,
- f0=None, uv=None, infer=False, **kwargs):
- ret = {}
- encoder_out = self.encoder(txt_tokens) # [B, T, C]
- src_nonpadding = (txt_tokens > 0).float()[:, :, None]
- style_embed = self.forward_style_embed(spk_embed, spk_id)
-
- # add dur
- dur_inp = (encoder_out + style_embed) * src_nonpadding
- mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret)
- tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
- decoder_inp = expand_states(encoder_out, mel2ph)
-
- # add pitch embed
- if self.hparams['use_pitch_embed']:
- pitch_inp = (decoder_inp + style_embed) * tgt_nonpadding
- decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out)
-
- # decoder input
- ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding
- if self.hparams['dec_inp_add_noise']:
- B, T, _ = decoder_inp.shape
- z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device)
- ret['adv_z'] = z
- decoder_inp = torch.cat([decoder_inp, z], -1)
- decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding
- ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
- return ret
-
- def forward_style_embed(self, spk_embed=None, spk_id=None):
- # add spk embed
- style_embed = 0
- if self.hparams['use_spk_embed']:
- style_embed = style_embed + self.spk_embed_proj(spk_embed)[:, None, :]
- if self.hparams['use_spk_id']:
- style_embed = style_embed + self.spk_id_proj(spk_id)[:, None, :]
- return style_embed
-
- def forward_dur(self, dur_input, mel2ph, txt_tokens, ret):
- """
-
- :param dur_input: [B, T_txt, H]
- :param mel2ph: [B, T_mel]
- :param txt_tokens: [B, T_txt]
- :param ret:
- :return:
- """
- src_padding = txt_tokens == 0
- if self.hparams['predictor_grad'] != 1:
- dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach())
- dur = self.dur_predictor(dur_input, src_padding)
- ret['dur'] = dur
- if mel2ph is None:
- mel2ph = self.length_regulator(dur, src_padding).detach()
- ret['mel2ph'] = mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple'])
- return mel2ph
-
- def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
- if self.hparams['pitch_type'] == 'frame':
- pitch_pred_inp = decoder_inp
- pitch_padding = mel2ph == 0
- else:
- pitch_pred_inp = encoder_out
- pitch_padding = encoder_out.abs().sum(-1) == 0
- uv = None
- if self.hparams['predictor_grad'] != 1:
- pitch_pred_inp = pitch_pred_inp.detach() + \
- self.hparams['predictor_grad'] * (pitch_pred_inp - pitch_pred_inp.detach())
- ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp)
- use_uv = self.hparams['pitch_type'] == 'frame' and self.hparams['use_uv']
- if f0 is None:
- f0 = pitch_pred[:, :, 0]
- if use_uv:
- uv = pitch_pred[:, :, 1] > 0
- f0_denorm = denorm_f0(f0, uv if use_uv else None, pitch_padding=pitch_padding)
- pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]
- ret['f0_denorm'] = f0_denorm
- ret['f0_denorm_pred'] = denorm_f0(
- pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None,
- pitch_padding=pitch_padding)
- if self.hparams['pitch_type'] == 'ph':
- pitch = torch.gather(F.pad(pitch, [1, 0]), 1, mel2ph)
- ret['f0_denorm'] = torch.gather(F.pad(ret['f0_denorm'], [1, 0]), 1, mel2ph)
- ret['f0_denorm_pred'] = torch.gather(F.pad(ret['f0_denorm_pred'], [1, 0]), 1, mel2ph)
- pitch_embed = self.pitch_embed(pitch)
- return pitch_embed
-
- def forward_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs):
- x = decoder_inp # [B, T, H]
- x = self.decoder(x)
- x = self.mel_out(x)
- return x * tgt_nonpadding
diff --git a/spaces/NCTCMumbai/NCTC/models/official/benchmark/models/shakespeare/shakespeare_main.py b/spaces/NCTCMumbai/NCTC/models/official/benchmark/models/shakespeare/shakespeare_main.py
deleted file mode 100644
index 6928dd1d61491acf84b969a52c7f0693617ac7f0..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/benchmark/models/shakespeare/shakespeare_main.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Runs a character LSTM model trained on Shakespeare."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import functools
-import os
-
-# pylint: disable=wrong-import-order
-from absl import app
-from absl import flags
-import numpy as np
-import tensorflow as tf
-# pylint: enable=wrong-import-order
-
-from official.utils.flags import core as flags_core
-from official.utils.misc import distribution_utils
-from official.utils.misc import keras_utils
-
-EMBEDDING_DIM = 256
-RNN_UNITS = 1024
-SEQ_LENGTH = 100
-# Calculated by running batch_size=1
-BATCHES_PER_EPOCH = 11043
-
-
-def define_flags():
- """Define the flags for the Shakespeare character LSTM."""
- flags_core.define_base(data_dir=False,
- clean=False,
- train_epochs=True,
- epochs_between_evals=False,
- stop_threshold=False,
- num_gpu=True,
- export_dir=False,
- run_eagerly=True,
- distribution_strategy=True)
-
- flags_core.define_performance(num_parallel_calls=False,
- inter_op=False,
- intra_op=False,
- synthetic_data=False,
- max_train_steps=False,
- dtype=True,
- loss_scale=True,
- enable_xla=True)
-
- flags_core.set_defaults(train_epochs=43,
- batch_size=64)
-
- flags.DEFINE_boolean(name='enable_eager', default=True, help='Enable eager?')
- flags.DEFINE_boolean(
- name='train', default=True,
- help='If true trains the model.')
- flags.DEFINE_string(
- name='predict_context', default=None,
- help='If set, makes a prediction with the given context.')
- flags.DEFINE_integer(
- name='predict_length', default=1000,
- help='Length of the predicted text including the context.')
- flags.DEFINE_integer(name='train_steps', default=None,
- help='Overrides train_steps per epoch if not None.')
- flags.DEFINE_integer(
- name='log_steps', default=100,
- help='For every log_steps, we log the timing information such as '
- 'examples per second.')
- flags.DEFINE_string(
- name='training_data', default=None,
- help='Path to file containing the training data.')
- flags.DEFINE_boolean(name='cudnn', default=True, help='Use CuDNN LSTM.')
-
-
-def get_dataset(path_to_file, batch_size=None, seq_length=SEQ_LENGTH):
- """Creates a dataset from a given text file.
-
- Args:
- path_to_file: The path to the training data.
- batch_size: Batch size to use.
- seq_length: The length of the LSTM sequence.
-
- Returns:
- A tuple, consisting of the Dataset and the class to character mapping
- and character to class mapping.
- """
- with tf.io.gfile.GFile(path_to_file, 'rb') as train_data:
- text = train_data.read().decode(encoding='utf-8')
-
- # Create vocab
- vocab = sorted(set(text))
- char2idx = {u: i for i, u in enumerate(vocab)}
- idx2char = np.array(vocab)
-
- # Split text into sequence length + 1 chucks to create examples
- text_as_int = np.array([char2idx[c] for c in text])
- char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
- sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
-
- def split_input_target(chunk):
- input_text = chunk[:-1]
- target_text = chunk[1:]
- return input_text, tf.one_hot(target_text, len(vocab))
- dataset = sequences.map(split_input_target)
- dataset = dataset.shuffle(10000).repeat()
- dataset = dataset.batch(batch_size, drop_remainder=True)
-
- return dataset, idx2char, char2idx
-
-
-def build_model(vocab_size,
- embedding_dim=EMBEDDING_DIM,
- rnn_units=RNN_UNITS,
- batch_size=None,
- stateful=False,
- use_cudnn=True):
- """Builds the Shakespeare model.
-
- Args:
- vocab_size: The number of character classes in the input.
- embedding_dim: The dimension of the embedding space for each class.
- rnn_units: The number of RNN units in the layer.
- batch_size: When predicting, the batch size of the predictions.
- stateful: If true, the LSTM is stateful.
-
- Returns:
- A Keras Model.
- """
- LSTM = functools.partial(tf.keras.layers.LSTM, implementation=2)
-
- # By indirecting the activation through a lambda layer, the logic to dispatch
- # to CuDNN in V2 doesn't trigger and we force the LSTM to run in non-CuDNN
- # mode.
- lstm_activation = ('tanh' if use_cudnn else
- lambda x: tf.math.tanh(x))
-
- batch_shape = [batch_size if stateful else None, None]
- return tf.keras.Sequential([
- tf.keras.layers.Embedding(vocab_size, embedding_dim,
- batch_input_shape=batch_shape),
- LSTM(rnn_units,
- activation=lstm_activation,
- return_sequences=True,
- stateful=stateful,
- recurrent_initializer='glorot_uniform'),
- tf.keras.layers.Dense(vocab_size),
- tf.keras.layers.Softmax(dtype=tf.float32)])
-
-
-def train_model(flags_obj, dataset, vocab_size, strategy, checkpoint_dir=None):
- """Trains a Shakespeare model.
-
- Args:
- flags_obj: An object containing parsed flag values.s
- dataset: the training data set.
- vocab_size: the number of unique character classes.
- strategy: distribution strategy to use.
- checkpoint_dir: if not None, the directory in which to make checkpoints.
-
- Returns:
- The training history and callbacks.
- """
- if flags_obj.train_steps:
- train_steps = flags_obj.train_steps
- else:
- train_steps = BATCHES_PER_EPOCH // flags_obj.batch_size
- strategy_scope = distribution_utils.get_strategy_scope(strategy)
-
- with strategy_scope:
- model = build_model(vocab_size=vocab_size, batch_size=flags_obj.batch_size,
- use_cudnn=flags_obj.cudnn)
-
- # When keras_use_ctl is False, Model.fit() automatically applies
- # loss scaling so we don't need to create a LossScaleOptimizer.
- model.compile(
- optimizer=tf.keras.optimizers.Adam(),
- loss=tf.keras.losses.CategoricalCrossentropy(),
- metrics=[tf.keras.metrics.Recall(top_k=1, name='RecallAt1'),
- tf.keras.metrics.Recall(top_k=5, name='RecallAt5')],
- run_eagerly=flags_obj.run_eagerly)
-
- callbacks = []
- if checkpoint_dir:
- checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}')
- checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath=checkpoint_prefix,
- save_weights_only=True)
- callbacks.append(checkpoint_callback)
- time_callback = keras_utils.TimeHistory(flags_obj.batch_size,
- flags_obj.log_steps)
- callbacks.append(time_callback)
- history = model.fit(dataset,
- epochs=flags_obj.train_epochs,
- steps_per_epoch=train_steps,
- callbacks=callbacks,
- verbose=2)
- return history, callbacks
-
-
-def make_prediction(checkpoint_dir, length, context, idx2char, char2idx):
- """Make predictions from a Shakespeare model.
-
- Args:
- checkpoint_dir: the directory from which to load checkpoints
- length: the total length of the generated text (including the context).
- context: the initial text with which the LSTM is primed.
- idx2char: the character class to character mapping.
- char2idx: the character to character class mapping.
-
- Returns:
- A generated string of text of the given length.
- """
- prediction_model = build_model(
- vocab_size=len(idx2char), batch_size=1, stateful=True)
- prediction_model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
- prediction_model.build(tf.TensorShape([1, None]))
-
- input_eval = [char2idx[s] for s in context]
- input_eval = tf.expand_dims(input_eval, 0)
-
- text_generated = []
-
- prediction_model.reset_states()
- for _ in range(length - len(context)):
- predictions = prediction_model(input_eval)
- predictions = tf.squeeze(predictions, 0)
-
- # We applied a softmax to the output of the model so that
- # tf.keras.metrics.Recall would work. We need logits for
- # tf.random.categorical, so we convert the probabilities back to log odds
- predictions = tf.math.log(predictions / (1 - predictions))
-
- random_output = tf.random.categorical(predictions, num_samples=1)
- selected_id = random_output[-1, 0].numpy()
- input_eval = tf.expand_dims([selected_id], 0)
- text_generated.append(idx2char[selected_id])
-
- return context + ''.join(text_generated)
-
-
-def run(flags_obj):
- """Run Shakespeare training and predict.
-
- Args:
- flags_obj: An object containing parsed flag values.
-
- Returns:
- Dictionary with status from the run.
- """
- if not flags_obj.training_data:
- raise ValueError(
- 'Must set the path to a training data file. e.g download the following '
- 'https://storage.googleapis.com/download.tensorflow.org/data/'
- 'shakespeare.txt')
-
- if flags_obj.dtype == 'fp16':
- policy = tf.keras.mixed_precision.experimental.Policy(
- 'mixed_float16',
- loss_scale=flags_core.get_loss_scale(flags_obj,
- default_for_fp16='dynamic'))
- tf.keras.mixed_precision.experimental.set_policy(policy)
-
- keras_utils.set_session_config(
- enable_xla=flags_obj.enable_xla)
-
- strategy = distribution_utils.get_distribution_strategy(
- distribution_strategy=flags_obj.distribution_strategy,
- num_gpus=flags_obj.num_gpus)
-
- dataset, idx2char, char2idx = get_dataset(flags_obj.training_data,
- batch_size=flags_obj.batch_size)
- stats = {}
- if flags_obj.train:
- history, callbacks = train_model(flags_obj, dataset,
- len(idx2char), strategy,
- checkpoint_dir=flags_obj.model_dir)
-
- stats['history'] = history.history
- stats['callbacks'] = callbacks
-
- if flags_obj.predict_context:
- if not flags_obj.model_dir:
- raise ValueError('Must set model_dir to get predictions.')
- print(make_prediction(flags_obj.model_dir,
- flags_obj.predict_length,
- flags_obj.predict_context,
- idx2char,
- char2idx))
-
- return stats
-
-
-def main(_):
- flags_obj = flags.FLAGS
- run(flags_obj)
-
-
-if __name__ == '__main__':
- define_flags()
- app.run(main)
diff --git a/spaces/Nee001/bing0/src/components/header.tsx b/spaces/Nee001/bing0/src/components/header.tsx
deleted file mode 100644
index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000
--- a/spaces/Nee001/bing0/src/components/header.tsx
+++ /dev/null
@@ -1,12 +0,0 @@
-import * as React from 'react'
-import { UserMenu } from './user-menu'
-
-export async function Header() {
- return (
-
-
-
-
-
- )
-}
diff --git a/spaces/Nee001/bing0/src/components/ui/codeblock.tsx b/spaces/Nee001/bing0/src/components/ui/codeblock.tsx
deleted file mode 100644
index aabda4e3b59f4e36b6ab79feb19d8d18b70e881b..0000000000000000000000000000000000000000
--- a/spaces/Nee001/bing0/src/components/ui/codeblock.tsx
+++ /dev/null
@@ -1,142 +0,0 @@
-'use client'
-
-import { FC, memo } from 'react'
-import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'
-import { coldarkDark } from 'react-syntax-highlighter/dist/cjs/styles/prism'
-
-import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
-import { IconCheck, IconCopy, IconDownload } from '@/components/ui/icons'
-import { Button } from '@/components/ui/button'
-
-interface Props {
- language: string
- value: string
-}
-
-interface languageMap {
- [key: string]: string | undefined
-}
-
-export const programmingLanguages: languageMap = {
- javascript: '.js',
- python: '.py',
- java: '.java',
- c: '.c',
- cpp: '.cpp',
- 'c++': '.cpp',
- 'c#': '.cs',
- ruby: '.rb',
- php: '.php',
- swift: '.swift',
- 'objective-c': '.m',
- kotlin: '.kt',
- typescript: '.ts',
- go: '.go',
- perl: '.pl',
- rust: '.rs',
- scala: '.scala',
- haskell: '.hs',
- lua: '.lua',
- shell: '.sh',
- sql: '.sql',
- html: '.html',
- css: '.css'
- // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component
-}
-
-export const generateRandomString = (length: number, lowercase = false) => {
- const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789' // excluding similar looking characters like Z, 2, I, 1, O, 0
- let result = ''
- for (let i = 0; i < length; i++) {
- result += chars.charAt(Math.floor(Math.random() * chars.length))
- }
- return lowercase ? result.toLowerCase() : result
-}
-
-const CodeBlock: FC = memo(({ language, value }) => {
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
-
- const downloadAsFile = () => {
- if (typeof window === 'undefined') {
- return
- }
- const fileExtension = programmingLanguages[language] || '.file'
- const suggestedFileName = `file-${generateRandomString(
- 3,
- true
- )}${fileExtension}`
- const fileName = window.prompt('Enter file name' || '', suggestedFileName)
-
- if (!fileName) {
- // User pressed cancel on prompt.
- return
- }
-
- const blob = new Blob([value], { type: 'text/plain' })
- const url = URL.createObjectURL(blob)
- const link = document.createElement('a')
- link.download = fileName
- link.href = url
- link.style.display = 'none'
- document.body.appendChild(link)
- link.click()
- document.body.removeChild(link)
- URL.revokeObjectURL(url)
- }
-
- const onCopy = () => {
- if (isCopied) return
- copyToClipboard(value)
- }
-
- return (
-
-
- {language}
-
-
-
-
-
-
- {value}
-
-
- )
-})
-CodeBlock.displayName = 'CodeBlock'
-
-export { CodeBlock }
diff --git a/spaces/Nightwing25/AICoverGen/src/infer_pack/commons.py b/spaces/Nightwing25/AICoverGen/src/infer_pack/commons.py
deleted file mode 100644
index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000
--- a/spaces/Nightwing25/AICoverGen/src/infer_pack/commons.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size * dilation - dilation) / 2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += (
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
- )
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def slice_segments2(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
- num_timescales - 1
- )
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
- )
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2, 3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1.0 / norm_type)
- return total_norm
diff --git a/spaces/NomiWai/anime-collaborative-filtering-space/anime.py b/spaces/NomiWai/anime-collaborative-filtering-space/anime.py
deleted file mode 100644
index 86837d2862f8849b7ed36717a74f82b243f35e88..0000000000000000000000000000000000000000
--- a/spaces/NomiWai/anime-collaborative-filtering-space/anime.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from surprise import dump
-import os
-import pandas as pd
-import gradio as gr
-
-model_lite_filename = "model_KNNWithMeans.pickle"
-file_name = os.path.expanduser(model_lite_filename)
-_, loaded_model = dump.load(file_name)
-print(f"{loaded_model.__class__.__name__} loaded.")
-
-def stringfy_with_new_lines(list_of_similar):
- return '\n'.join(list_of_similar)
-
-def get_similar_items(anime_name):
- print(anime_name)
- return stringfy_with_new_lines(extract_similar_items_from_model(loaded_model, anime_name)["Title"].tolist())
-
-
-def extract_similar_items_from_model(loaded_knn_model, anime_title, k=30):
- iid = loaded_knn_model.trainset.to_inner_iid(anime_title)
- neighbor_ids = loaded_knn_model.get_neighbors(iid, k=k)
- neightbors = (
- loaded_knn_model.trainset.to_raw_iid(inner_id) for inner_id in neighbor_ids
- )
- df = pd.DataFrame(neightbors, columns=["Title"])
- return df
-
-
-demo = gr.Interface(fn=get_similar_items, inputs="text", outputs="text", title="Anime Show Recommender", description="Please use the titles in MyAnimeList exactly as it is. (https://myanimelist.net/). If you see an error either the title doesn't match exactly how it is on MyAnimeList or they are new animes (after 2019). I'm looking into live updates! :)")
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/tokenizers/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/tokenizers/README.md
deleted file mode 100644
index e116932bc80572f221cff6472a7b1eea7032925d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/tokenizers/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# M2M-100 Tokenization
-
-We apply different tokenization strategies for different languages following the existing literature. Here we provide tok.sh a tokenizer that can be used to reproduce our results.
-
-To reproduce the results, follow these steps:
-
-```
-tgt_lang=...
-reference_translation=...
-cat generation_output | grep -P "^H" | sort -V | cut -f 3- | sh tok.sh $tgt_lang > hyp
-cat $reference_translation |sh tok.sh $tgt_lang > ref
-sacrebleu -tok 'none' ref < hyp
-```
-
-## Installation
-
-Tools needed for all the languages except Arabic can be installed by running install_dependencies.sh
-If you want to evaluate Arabic models, please follow the instructions provided here: http://alt.qcri.org/tools/arabic-normalizer/ to install
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/pointer_generator/pointer_generator_src/transformer_pg.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/pointer_generator/pointer_generator_src/transformer_pg.py
deleted file mode 100644
index 4ccf30f4eb154f8fab1e285934fb973a2d1166cb..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/pointer_generator/pointer_generator_src/transformer_pg.py
+++ /dev/null
@@ -1,518 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from typing import Any, Dict, Optional, List, Tuple
-
-import torch
-import torch.nn as nn
-from fairseq import utils
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.transformer import (
- DEFAULT_MAX_SOURCE_POSITIONS,
- DEFAULT_MAX_TARGET_POSITIONS,
- TransformerDecoder,
- TransformerEncoder,
- TransformerModel,
- base_architecture,
-)
-from torch import Tensor
-
-
-logger = logging.getLogger(__name__)
-
-
-@register_model("transformer_pointer_generator")
-class TransformerPointerGeneratorModel(TransformerModel):
- """
- Transformer model from `"Attention Is All You Need" (Vaswani et al, 2017)
- `_, augmented with a pointer-generator
- network from `"Get To The Point: Summarization with Pointer-Generator
- Networks" (See et al, 2017) `_.
-
- Args:
- encoder (TransformerPointerGeneratorEncoder): the encoder
- decoder (TransformerPointerGeneratorDecoder): the decoder
-
- The Transformer pointer-generator model provides the following named
- architectures and command-line arguments:
-
- .. argparse::
- :ref: fairseq.models.transformer_pointer_generator_parser
- :prog:
- """
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- # fmt: off
- TransformerModel.add_args(parser)
- parser.add_argument('--alignment-heads', type=int, metavar='N',
- help='number of attention heads to be used for '
- 'pointing')
- parser.add_argument('--alignment-layer', type=int, metavar='I',
- help='layer number to be used for pointing (0 '
- 'corresponding to the bottommost layer)')
- parser.add_argument('--source-position-markers', type=int, metavar='N',
- help='dictionary includes N additional items that '
- 'represent an OOV token at a particular input '
- 'position')
- parser.add_argument('--force-generation', type=float, metavar='P',
- default=None,
- help='set the vocabulary distribution weight to P, '
- 'instead of predicting it from the input (1.0 '
- 'corresponding to generation, 0.0 to pointing)')
- # fmt: on
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
-
- # make sure all arguments are present in older models
- base_architecture(args)
-
- if args.encoder_layers_to_keep:
- args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
- if args.decoder_layers_to_keep:
- args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
-
- if getattr(args, "max_source_positions", None) is None:
- args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
- if getattr(args, "max_target_positions", None) is None:
- args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
- if getattr(args, "source_position_markers", None) is None:
- args.source_position_markers = args.max_source_positions
-
- src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
- if src_dict != tgt_dict:
- raise ValueError("Pointer-generator requires a joined dictionary")
-
- def build_embedding(dictionary, embed_dim, path=None):
- # The dictionary may include additional items that can be used in
- # place of the normal OOV token and that all map to the same
- # embedding. Using a different token for each input position allows
- # one to restore the word identities from the original source text.
- num_embeddings = len(dictionary) - args.source_position_markers
- padding_idx = dictionary.pad()
- unk_idx = dictionary.unk()
- logger.info(
- "dictionary indices from {0} to {1} will be mapped to {2}".format(
- num_embeddings, len(dictionary) - 1, unk_idx
- )
- )
- emb = Embedding(num_embeddings, embed_dim, padding_idx, unk_idx)
- # if provided, load from preloaded dictionaries
- if path:
- embed_dict = utils.parse_embedding(path)
- utils.load_embedding(embed_dict, dictionary, emb)
- return emb
-
- if args.share_all_embeddings:
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
- )
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embeddings not compatible with --decoder-embed-path"
- )
- encoder_embed_tokens = build_embedding(
- src_dict, args.encoder_embed_dim, args.encoder_embed_path
- )
- decoder_embed_tokens = encoder_embed_tokens
- args.share_decoder_input_output_embed = True
- else:
- encoder_embed_tokens = build_embedding(
- src_dict, args.encoder_embed_dim, args.encoder_embed_path
- )
- decoder_embed_tokens = build_embedding(
- tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
- )
-
- encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
- decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
- return cls(args, encoder, decoder)
-
- @classmethod
- def build_encoder(cls, args, src_dict, embed_tokens):
- return TransformerPointerGeneratorEncoder(args, src_dict, embed_tokens)
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- return TransformerPointerGeneratorDecoder(args, tgt_dict, embed_tokens)
-
-
-class TransformerPointerGeneratorEncoder(TransformerEncoder):
- """
- Transformer encoder consisting of *args.encoder_layers* layers. Each layer
- is a :class:`TransformerEncoderLayer`. The pointer-generator variant adds
- the source tokens to the encoder output as these are otherwise not passed
- to the decoder.
- """
-
- def forward(
- self,
- src_tokens,
- src_lengths: Optional[Tensor] = None,
- return_all_hiddens: bool = False,
- token_embeddings: Optional[Tensor] = None
- ):
- """
- Runs the `forward()` method of the parent Transformer class. Then adds
- the source tokens into the encoder output tuple.
-
- While it might be more elegant that the model would pass the source
- tokens to the `forward()` method of the decoder too, this would require
- changes to `SequenceGenerator`.
-
- Args:
- src_tokens (torch.LongTensor): tokens in the source language of
- shape `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- return_all_hiddens (bool, optional): also return all of the
- intermediate hidden states (default: False).
- token_embeddings (torch.Tensor, optional): precomputed embeddings
- default `None` will recompute embeddings
-
- Returns:
- namedtuple:
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - **encoder_embedding** (Tensor): the (scaled) embedding lookup
- of shape `(batch, src_len, embed_dim)`
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- - **src_tokens** (Tensor): input token ids of shape
- `(batch, src_len)`
- """
- encoder_out = self.forward_scriptable(src_tokens,
- src_lengths,
- return_all_hiddens,
- token_embeddings)
-
- # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
- # `forward` so we use a dictionary instead.
- # TorchScript does not support mixed values so the values are all lists.
- # The empty list is equivalent to None.
- return {
- "encoder_out": encoder_out["encoder_out"], # T x B x C
- "encoder_padding_mask": encoder_out["encoder_padding_mask"], # B x T
- "encoder_embedding": encoder_out["encoder_embedding"], # B x T x C
- "encoder_states": encoder_out["encoder_states"], # List[T x B x C]
- "src_tokens": [src_tokens], # B x T
- "src_lengths": [],
- }
-
-
-class TransformerPointerGeneratorDecoder(TransformerDecoder):
- """
- Transformer decoder consisting of *args.decoder_layers* layers. Each layer
- is a :class:`TransformerDecoderLayer`. The pointer-generator variant mixes
- the output probabilities with an attention distribution in the output layer.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): decoding dictionary
- embed_tokens (torch.nn.Embedding): output embedding
- """
-
- def __init__(self, args, dictionary, embed_tokens):
- super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False)
-
- # In the pointer-generator model these arguments define the decoder
- # layer and the number of attention heads that will be averaged to
- # create the alignment for pointing.
- self.alignment_heads = args.alignment_heads
- self.alignment_layer = args.alignment_layer
-
- input_embed_dim = embed_tokens.embedding_dim
-
- # Generation probabilities / interpolation coefficients are predicted
- # from the current decoder input embedding and the decoder output, which
- # is the size of output_embed_dim.
- p_gen_input_size = input_embed_dim + self.output_embed_dim
- self.project_p_gens = nn.Linear(p_gen_input_size, 1)
- nn.init.zeros_(self.project_p_gens.bias)
-
- # The dictionary may include a separate entry for an OOV token in each
- # input position, so that their identity can be restored from the
- # original source text.
- self.num_types = len(dictionary)
- self.num_oov_types = args.source_position_markers
- self.num_embeddings = self.num_types - self.num_oov_types
- self.force_p_gen = args.force_generation
-
- def forward(
- self,
- prev_output_tokens,
- encoder_out: Optional[Dict[str, List[Tensor]]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- features_only: bool = False,
- alignment_layer: Optional[int] = 0,
- alignment_heads: Optional[int] = 1,
- src_lengths: Optional[Any] = None,
- return_all_hiddens: bool = False,
- ):
- """
- Args:
- prev_output_tokens (LongTensor): previous decoder outputs of shape
- `(batch, tgt_len)`, for teacher forcing
- encoder_out (optional): output from the encoder, used for
- encoder-side attention
- incremental_state (dict, optional): dictionary used for storing
- state during :ref:`Incremental decoding`
- features_only (bool, optional): only return features without
- applying output layer (default: False)
- alignment_layer (int, optional): 0-based index of the layer to be
- used for pointing (default: 0)
- alignment_heads (int, optional): number of attention heads to be
- used for pointing (default: 1)
-
- Returns:
- tuple:
- - the decoder's output of shape `(batch, tgt_len, vocab)`
- - a dictionary with any model-specific outputs
- """
- # The normal Transformer model doesn't pass the alignment_layer and
- # alignment_heads parameters correctly. We use our local variables.
- x, extra = self.extract_features(
- prev_output_tokens,
- encoder_out=encoder_out,
- incremental_state=incremental_state,
- alignment_layer=self.alignment_layer,
- alignment_heads=self.alignment_heads,
- )
- if not features_only:
- # Embedding the tokens again for generation probability prediction,
- # so that we don't have to reimplement the whole extract_features()
- # method.
- if incremental_state is not None:
- prev_output_tokens = prev_output_tokens[:, -1:]
- prev_output_embed = self.embed_tokens(prev_output_tokens)
- prev_output_embed *= self.embed_scale
- predictors = torch.cat((prev_output_embed, x), 2)
- p_gens = self.project_p_gens(predictors)
- p_gens = torch.sigmoid(p_gens.float())
- # Torchscript complains if encoder_out or attn are None because
- # `output_layer()` signature expects tensors instead
- attn: Optional[Tensor] = extra["attn"][0]
- assert encoder_out is not None
- assert attn is not None
- x = self.output_layer(x, attn, encoder_out["src_tokens"][0], p_gens)
- return x, extra
-
- def output_layer(
- self,
- features: Tensor,
- attn: Tensor,
- src_tokens: Tensor,
- p_gens: Tensor
- ) -> Tensor:
- """
- Project features to the vocabulary size and mix with the attention
- distributions.
- """
- if self.force_p_gen is not None:
- p_gens = self.force_p_gen
-
- # project back to size of vocabulary
- if self.adaptive_softmax is None:
- logits = self.output_projection(features)
- else:
- logits = features
-
- batch_size = logits.shape[0]
- output_length = logits.shape[1]
- assert logits.shape[2] == self.num_embeddings
- assert src_tokens.shape[0] == batch_size
- src_length = src_tokens.shape[1]
-
- # The final output distribution will be a mixture of the normal output
- # distribution (softmax of logits) and attention weights.
- gen_dists = self.get_normalized_probs_scriptable(
- (logits, None), log_probs=False, sample=None
- )
- gen_dists = torch.mul(gen_dists, p_gens)
- padding_size = (batch_size, output_length, self.num_oov_types)
- padding = gen_dists.new_zeros(padding_size)
- gen_dists = torch.cat((gen_dists, padding), 2)
- assert gen_dists.shape[2] == self.num_types
-
- # Scatter attention distributions to distributions over the extended
- # vocabulary in a tensor of shape [batch_size, output_length,
- # vocab_size]. Each attention weight will be written into a location
- # that is for other dimensions the same as in the index tensor, but for
- # the third dimension it's the value of the index tensor (the token ID).
- attn = torch.mul(attn.float(), 1 - p_gens)
- index = src_tokens[:, None, :]
- index = index.expand(batch_size, output_length, src_length)
- attn_dists_size = (batch_size, output_length, self.num_types)
- attn_dists = attn.new_zeros(attn_dists_size)
- attn_dists.scatter_add_(2, index, attn.float())
-
- # Final distributions, [batch_size, output_length, num_types].
- return gen_dists + attn_dists
-
- def get_normalized_probs(
- self,
- net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
- log_probs: bool,
- sample: Optional[Dict[str, Tensor]] = None,
- ):
- """
- Get normalized probabilities (or log probs) from a net's output.
- Pointer-generator network output is already normalized.
- """
- probs = net_output[0]
- # Make sure the probabilities are greater than zero when returning log
- # probabilities.
- return probs.clamp(1e-10, 1.0).log() if log_probs else probs
-
-
-class Embedding(nn.Embedding):
- r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
- This module is often used to store word embeddings and retrieve them using indices.
- The input to the module is a list of indices, and the output is the corresponding
- word embeddings. This subclass differs from the standard PyTorch Embedding class by
- allowing additional vocabulary entries that will be mapped to the unknown token
- embedding.
- Args:
- num_embeddings (int): size of the dictionary of embeddings
- embedding_dim (int): the size of each embedding vector
- padding_idx (int): Pads the output with the embedding vector at :attr:`padding_idx`
- (initialized to zeros) whenever it encounters the index.
- unk_idx (int): Maps all token indices that are greater than or equal to
- num_embeddings to this index.
- Attributes:
- weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
- initialized from :math:`\mathcal{N}(0, 1)`
- Shape:
- - Input: :math:`(*)`, LongTensor of arbitrary shape containing the indices to extract
- - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
- .. note::
- Keep in mind that only a limited number of optimizers support
- sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
- :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
- .. note::
- With :attr:`padding_idx` set, the embedding vector at
- :attr:`padding_idx` is initialized to all zeros. However, note that this
- vector can be modified afterwards, e.g., using a customized
- initialization method, and thus changing the vector used to pad the
- output. The gradient for this vector from :class:`~torch.nn.Embedding`
- is always zero.
- """
- __constants__ = ["unk_idx"]
-
- # Torchscript: Inheriting from Embedding class produces an error when exporting to Torchscript
- # -> RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details
- # It's happening because max_norm attribute from nn.Embedding is None by default and it cannot be
- # cast to a C++ type
- def __init__(
- self,
- num_embeddings: int,
- embedding_dim: int,
- padding_idx: Optional[int],
- unk_idx: int,
- max_norm: Optional[float] = float("inf"),
- ):
- super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx, max_norm=max_norm)
- self.unk_idx = unk_idx
- nn.init.normal_(self.weight, mean=0, std=embedding_dim ** -0.5)
- nn.init.constant_(self.weight[padding_idx], 0)
-
- def forward(self, input):
- input = torch.where(
- input >= self.num_embeddings, torch.ones_like(input) * self.unk_idx, input
- )
- return nn.functional.embedding(
- input, self.weight, self.padding_idx, self.max_norm,
- self.norm_type, self.scale_grad_by_freq, self.sparse
- )
-
-
-@register_model_architecture(
- "transformer_pointer_generator", "transformer_pointer_generator"
-)
-def transformer_pointer_generator(args):
- args.alignment_heads = getattr(args, "alignment_heads", 1)
- args.alignment_layer = getattr(args, "alignment_layer", -1)
- base_architecture(args)
- if args.alignment_layer < 0:
- args.alignment_layer = args.decoder_layers + args.alignment_layer
-
-
-@register_model_architecture(
- "transformer_pointer_generator", "transformer_pointer_generator_iwslt_de_en"
-)
-def transformer_pointer_generator_iwslt_de_en(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
- args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- transformer_pointer_generator(args)
-
-
-@register_model_architecture(
- "transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de"
-)
-def transformer_pointer_generator_wmt_en_de(args):
- transformer_pointer_generator(args)
-
-
-# Transformer pointer-generator with the base Transformer parameters as used in
-# the "Attention Is All You Need" paper (Vaswani et al., 2017)
-@register_model_architecture(
- "transformer_pointer_generator",
- "transformer_pointer_generator_vaswani_wmt_en_de_big",
-)
-def transformer_pointer_generator_vaswani_wmt_en_de_big(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
- args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
- args.dropout = getattr(args, "dropout", 0.3)
- transformer_pointer_generator(args)
-
-
-@register_model_architecture(
- "transformer_pointer_generator",
- "transformer_pointer_generator_vaswani_wmt_en_fr_big",
-)
-def transformer_pointer_generator_vaswani_wmt_en_fr_big(args):
- args.dropout = getattr(args, "dropout", 0.1)
- transformer_pointer_generator_vaswani_wmt_en_de_big(args)
-
-
-@register_model_architecture(
- "transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big"
-)
-def transformer_pointer_generator_wmt_en_de_big(args):
- args.attention_dropout = getattr(args, "attention_dropout", 0.1)
- transformer_pointer_generator_vaswani_wmt_en_de_big(args)
-
-
-# default parameters used in tensor2tensor implementation
-@register_model_architecture(
- "transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big_t2t"
-)
-def transformer_pointer_generator_wmt_en_de_big_t2t(args):
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
- args.attention_dropout = getattr(args, "attention_dropout", 0.1)
- args.activation_dropout = getattr(args, "activation_dropout", 0.1)
- transformer_pointer_generator_vaswani_wmt_en_de_big(args)
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/models/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/models/__init__.py
deleted file mode 100644
index 54b5a1c31243e55d384f80ef9514461cd35b15c6..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/models/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import importlib
-import os
-
-
-for file in sorted(os.listdir(os.path.dirname(__file__))):
- if file.endswith(".py") and not file.startswith("_"):
- model_name = file[: file.find(".py")]
- importlib.import_module("examples.speech_recognition.models." + model_name)
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/text_to_speech/vocoder.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/text_to_speech/vocoder.py
deleted file mode 100644
index 65d9f9f06bfe7ffa3ed332bb41c4cdd65ac2b916..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/text_to_speech/vocoder.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import json
-from typing import Dict
-
-import numpy as np
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from fairseq.data.audio.audio_utils import (
- get_window, get_fourier_basis, get_mel_filters, TTSSpectrogram
-)
-from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig
-from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel
-
-logger = logging.getLogger(__name__)
-
-
-class PseudoInverseMelScale(torch.nn.Module):
- def __init__(self, n_stft, n_mels, sample_rate, f_min, f_max) -> None:
- super(PseudoInverseMelScale, self).__init__()
- self.n_mels = n_mels
- basis = get_mel_filters(
- sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max
- )
- basis = torch.pinverse(basis) # F x F_mel
- self.register_buffer('basis', basis)
-
- def forward(self, melspec: torch.Tensor) -> torch.Tensor:
- # pack batch
- shape = melspec.shape # B_1 x ... x B_K x F_mel x T
- n_mels, time = shape[-2], shape[-1]
- melspec = melspec.view(-1, n_mels, time)
-
- freq, _ = self.basis.size() # F x F_mel
- assert self.n_mels == n_mels, (self.n_mels, n_mels)
- specgram = self.basis.matmul(melspec).clamp(min=0)
-
- # unpack batch
- specgram = specgram.view(shape[:-2] + (freq, time))
- return specgram
-
-
-class GriffinLim(torch.nn.Module):
- def __init__(
- self, n_fft: int, win_length: int, hop_length: int, n_iter: int,
- window_fn=torch.hann_window
- ):
- super(GriffinLim, self).__init__()
- self.transform = TTSSpectrogram(
- n_fft, win_length, hop_length, return_phase=True
- )
-
- basis = get_fourier_basis(n_fft)
- basis = torch.pinverse(n_fft / hop_length * basis).T[:, None, :]
- basis *= get_window(window_fn, n_fft, win_length)
- self.register_buffer('basis', basis)
-
- self.n_fft = n_fft
- self.win_length = win_length
- self.hop_length = hop_length
- self.n_iter = n_iter
-
- self.tiny = 1.1754944e-38
-
- @classmethod
- def get_window_sum_square(
- cls, n_frames, hop_length, win_length, n_fft,
- window_fn=torch.hann_window
- ) -> torch.Tensor:
- w_sq = get_window(window_fn, n_fft, win_length) ** 2
- n = n_fft + hop_length * (n_frames - 1)
- x = torch.zeros(n, dtype=torch.float32)
- for i in range(n_frames):
- ofst = i * hop_length
- x[ofst: min(n, ofst + n_fft)] += w_sq[:max(0, min(n_fft, n - ofst))]
- return x
-
- def inverse(self, magnitude: torch.Tensor, phase) -> torch.Tensor:
- x = torch.cat(
- [magnitude * torch.cos(phase), magnitude * torch.sin(phase)],
- dim=1
- )
- x = F.conv_transpose1d(x, self.basis, stride=self.hop_length)
- win_sum_sq = self.get_window_sum_square(
- magnitude.shape[-1], hop_length=self.hop_length,
- win_length=self.win_length, n_fft=self.n_fft
- ).to(magnitude.device)
- # remove modulation effects
- approx_nonzero_indices = win_sum_sq > self.tiny
- x[:, :, approx_nonzero_indices] /= win_sum_sq[approx_nonzero_indices]
- x *= self.n_fft / self.hop_length
- x = x[:, :, self.n_fft // 2:]
- x = x[:, :, :-self.n_fft // 2:]
- return x
-
- def forward(self, specgram: torch.Tensor) -> torch.Tensor:
- angles = np.angle(np.exp(2j * np.pi * np.random.rand(*specgram.shape)))
- angles = torch.from_numpy(angles).to(specgram)
- _specgram = specgram.view(-1, specgram.shape[-2], specgram.shape[-1])
- waveform = self.inverse(_specgram, angles).squeeze(1)
- for _ in range(self.n_iter):
- _, angles = self.transform(waveform)
- waveform = self.inverse(_specgram, angles).squeeze(1)
- return waveform.squeeze(0)
-
-
-class GriffinLimVocoder(nn.Module):
- def __init__(self, sample_rate, win_size, hop_size, n_fft,
- n_mels, f_min, f_max, window_fn,
- spec_bwd_max_iter=32,
- fp16=False):
- super().__init__()
- self.inv_mel_transform = PseudoInverseMelScale(
- n_stft=n_fft // 2 + 1, n_mels=n_mels, sample_rate=sample_rate,
- f_min=f_min, f_max=f_max
- )
- self.gl_transform = GriffinLim(
- n_fft=n_fft, win_length=win_size, hop_length=hop_size,
- window_fn=window_fn, n_iter=spec_bwd_max_iter
- )
- if fp16:
- self.half()
- self.inv_mel_transform.half()
- self.gl_transform.half()
- else:
- self.float()
- self.inv_mel_transform.float()
- self.gl_transform.float()
-
- def forward(self, x):
- # x: (B x) T x D -> (B x) 1 x T
- # NOTE: batched forward produces noisier waveform. recommend running
- # one utterance at a time
- self.eval()
- x = x.exp().transpose(-1, -2)
- x = self.inv_mel_transform(x)
- x = self.gl_transform(x)
- return x
-
- @classmethod
- def from_data_cfg(cls, args, data_cfg: S2TDataConfig):
- feat_cfg = data_cfg.config["features"]
- window_fn = getattr(torch, feat_cfg["window_fn"] + "_window")
- return cls(
- sample_rate=feat_cfg["sample_rate"],
- win_size=int(feat_cfg["win_len_t"] * feat_cfg["sample_rate"]),
- hop_size=int(feat_cfg["hop_len_t"] * feat_cfg["sample_rate"]),
- n_fft=feat_cfg["n_fft"], n_mels=feat_cfg["n_mels"],
- f_min=feat_cfg["f_min"], f_max=feat_cfg["f_max"],
- window_fn=window_fn, spec_bwd_max_iter=args.spec_bwd_max_iter,
- fp16=args.fp16
- )
-
-
-class HiFiGANVocoder(nn.Module):
- def __init__(
- self, checkpoint_path: str, model_cfg: Dict[str, str],
- fp16: bool = False
- ) -> None:
- super().__init__()
- self.model = HiFiGANModel(model_cfg)
- state_dict = torch.load(checkpoint_path)
- self.model.load_state_dict(state_dict["generator"])
- if fp16:
- self.model.half()
- logger.info(f"loaded HiFiGAN checkpoint from {checkpoint_path}")
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- # (B x) T x D -> (B x) 1 x T
- model = self.model.eval()
- if len(x.shape) == 2:
- return model(x.unsqueeze(0).transpose(1, 2)).detach().squeeze(0)
- else:
- return model(x.transpose(-1, -2)).detach()
-
- @classmethod
- def from_data_cfg(cls, args, data_cfg: S2TDataConfig):
- vocoder_cfg = data_cfg.vocoder
- assert vocoder_cfg.get("type", "griffin_lim") == "hifigan"
- with open(vocoder_cfg["config"]) as f:
- model_cfg = json.load(f)
- return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16)
-
-
-def get_vocoder(args, data_cfg: S2TDataConfig):
- if args.vocoder == "griffin_lim":
- return GriffinLimVocoder.from_data_cfg(args, data_cfg)
- elif args.vocoder == "hifigan":
- return HiFiGANVocoder.from_data_cfg(args, data_cfg)
- else:
- raise ValueError("Unknown vocoder")
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/pq/modules/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/pq/modules/__init__.py
deleted file mode 100644
index b67c8e8ad691aa01e9e10e904d69d94595387668..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/pq/modules/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .qconv import PQConv2d # NOQA
-from .qemb import PQEmbedding # NOQA
-from .qlinear import PQLinear # NOQA
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_transformer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_transformer.py
deleted file mode 100644
index de5c5bdbd49692e63fb1cb50108a791304425dc1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_transformer.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import argparse
-import unittest
-from typing import Any, Dict, Sequence
-
-import torch
-from fairseq.models import transformer
-
-from tests.test_roberta import FakeTask
-
-
-def mk_sample(tok: Sequence[int] = None, batch_size: int = 2) -> Dict[str, Any]:
- if not tok:
- tok = [10, 11, 12, 13, 14, 15, 2]
-
- batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size)
- sample = {
- "net_input": {
- "src_tokens": batch,
- "prev_output_tokens": batch,
- "src_lengths": torch.tensor(
- [len(tok)] * batch_size, dtype=torch.long, device=batch.device
- ),
- },
- "target": batch[:, 1:],
- }
- return sample
-
-
-def mk_transformer(**extra_args: Any):
- overrides = {
- # Use characteristics dimensions
- "encoder_embed_dim": 12,
- "encoder_ffn_embed_dim": 14,
- "decoder_embed_dim": 12,
- "decoder_ffn_embed_dim": 14,
- # Disable dropout so we have comparable tests.
- "dropout": 0,
- "attention_dropout": 0,
- "activation_dropout": 0,
- "encoder_layerdrop": 0,
- }
- overrides.update(extra_args)
- # Overrides the defaults from the parser
- args = argparse.Namespace(**overrides)
- transformer.tiny_architecture(args)
-
- torch.manual_seed(0)
- task = FakeTask(args)
- return transformer.TransformerModel.build_model(args, task)
-
-
-class TransformerTestCase(unittest.TestCase):
- def test_forward_backward(self):
- model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=12)
- sample = mk_sample()
- o, _ = model.forward(**sample["net_input"])
- loss = o.sum()
- loss.backward()
-
- def test_different_encoder_decoder_embed_dim(self):
- model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=16)
- sample = mk_sample()
- o, _ = model.forward(**sample["net_input"])
- loss = o.sum()
- loss.backward()
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py
deleted file mode 100644
index 0b02ce18772454697e61f827d96d76ad361b9cd1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-
-import torch
-import torch.nn.functional as F
-
-from fairseq import metrics, utils
-from fairseq.criterions import FairseqCriterion, register_criterion
-from fairseq.dataclass import ChoiceEnum, FairseqDataclass
-
-
-_EPSILON = torch.finfo(torch.float32).eps
-TARGET_DIST_NORM_CHOICES = ChoiceEnum(["none", "minmax"])
-
-
-@dataclass
-class KLDivergenceRerankingCriterionConfig(FairseqDataclass):
- target_dist_norm: TARGET_DIST_NORM_CHOICES = field(
- default="none",
- metadata={"help": "method to normalize the range of target scores"},
- )
- temperature: float = field(
- default=1.0,
- metadata={"help": "temperature in softmax for target distributions"},
- )
- forward_batch_size: int = field(
- default=32,
- metadata={
- "help": "number of hypotheses per batch for model forward (set a value smaller than --mt-beam to avoid OOM when training with a large beam size)"
- },
- )
-
-
-@register_criterion(
- "kl_divergence_rereanking", dataclass=KLDivergenceRerankingCriterionConfig
-)
-class KLDivergenceRerankingCriterion(FairseqCriterion):
- def __init__(
- self, task, target_dist_norm, temperature, forward_batch_size,
- ):
- super().__init__(task)
- self.target_dist_norm = target_dist_norm
- self.temperature = temperature
- self.forward_batch_size = forward_batch_size
-
- def forward(self, model, sample, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
-
- sample_size = sample["id"].numel()
- assert sample_size % self.task.cfg.mt_beam == 0, (
- f"sample_size ({sample_size}) cannot be divided by beam size ({self.task.cfg.mt_beam})."
- f"Please set --required-batch-size-multiple={self.task.cfg.mt_beam}."
- )
-
- # split into smaller batches for model forward
- batch_out = []
- for i in range(0, sample_size, self.forward_batch_size):
- j = min(i + self.forward_batch_size, sample_size)
-
- out = model(
- src_tokens=sample["net_input"]["src_tokens"][i:j, :],
- src_lengths=sample["net_input"]["src_lengths"][i:j],
- )
-
- batch_out.append(
- model.sentence_forward(out, sample["net_input"]["src_tokens"][i:j, :])
- )
-
- batch_out = torch.cat(batch_out, dim=0).view(
- self.task.cfg.mt_beam, sample_size // self.task.cfg.mt_beam, -1
- ) # T x B x C
- if model.joint_classification == "sent":
- batch_out = model.joint_forward(batch_out)
- scores = model.classification_forward(batch_out.view(sample_size, 1, -1)).view(
- -1, self.task.cfg.mt_beam
- ) # input: B x T x C
-
- loss = self.compute_kl_loss(
- scores, sample["target"][:, 0].view(-1, self.task.cfg.mt_beam)
- )
-
- sample_size = sample_size // self.task.cfg.mt_beam
-
- logging_output = {
- "loss": loss.detach(),
- "ntokens": sample["ntokens"],
- "nsentences": sample_size * self.task.cfg.mt_beam,
- "sample_size": sample_size,
- "scores": scores.detach(),
- }
-
- return loss, sample_size, logging_output
-
- def compute_kl_loss(self, logits, target):
- norm_target = target
- if self.target_dist_norm == "minmax":
- min_v = torch.min(target, 1, keepdim=True).values
- max_v = torch.max(target, 1, keepdim=True).values
- norm_target = (target - min_v) / (max_v - min_v + _EPSILON)
-
- target_dist = F.softmax(
- norm_target / self.temperature, dim=-1, dtype=torch.float32
- )
- model_dist = F.log_softmax(logits, dim=-1, dtype=torch.float32)
- loss = -(target_dist * model_dist - target_dist * target_dist.log()).sum()
- return loss
-
- @staticmethod
- def reduce_metrics(logging_outputs) -> None:
- """Aggregate logging outputs from data parallel training."""
- loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
-
- sample_size = utils.item(
- sum(log.get("sample_size", 0) for log in logging_outputs)
- )
-
- loss = loss_sum / sample_size / math.log(2)
- metrics.log_scalar("loss", loss, sample_size, round=3)
-
- @staticmethod
- def logging_outputs_can_be_summed() -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- return True
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/models/convtransformer_simul_trans.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/models/convtransformer_simul_trans.py
deleted file mode 100644
index 4a26422f650cf13ee7d4e8d2228b50ec49876fb8..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/models/convtransformer_simul_trans.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-
-from fairseq import checkpoint_utils
-from fairseq.models import (
- register_model,
- register_model_architecture,
-)
-from fairseq.models.speech_to_text import (
- ConvTransformerModel,
- convtransformer_espnet,
- ConvTransformerEncoder,
-)
-from fairseq.models.speech_to_text.modules.augmented_memory_attention import (
- augmented_memory,
- SequenceEncoder,
- AugmentedMemoryConvTransformerEncoder,
-)
-
-from torch import nn, Tensor
-from typing import Dict, List
-from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer
-
-@register_model("convtransformer_simul_trans")
-class SimulConvTransformerModel(ConvTransformerModel):
- """
- Implementation of the paper:
-
- SimulMT to SimulST: Adapting Simultaneous Text Translation to
- End-to-End Simultaneous Speech Translation
-
- https://www.aclweb.org/anthology/2020.aacl-main.58.pdf
- """
-
- @staticmethod
- def add_args(parser):
- super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser)
- parser.add_argument(
- "--train-monotonic-only",
- action="store_true",
- default=False,
- help="Only train monotonic attention",
- )
-
- @classmethod
- def build_decoder(cls, args, task, embed_tokens):
- tgt_dict = task.tgt_dict
-
- from examples.simultaneous_translation.models.transformer_monotonic_attention import (
- TransformerMonotonicDecoder,
- )
-
- decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
-
- if getattr(args, "load_pretrained_decoder_from", None):
- decoder = checkpoint_utils.load_pretrained_component_from_model(
- component=decoder, checkpoint=args.load_pretrained_decoder_from
- )
- return decoder
-
-
-@register_model_architecture(
- "convtransformer_simul_trans", "convtransformer_simul_trans_espnet"
-)
-def convtransformer_simul_trans_espnet(args):
- convtransformer_espnet(args)
-
-
-@register_model("convtransformer_augmented_memory")
-@augmented_memory
-class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel):
- @classmethod
- def build_encoder(cls, args):
- encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args))
-
- if getattr(args, "load_pretrained_encoder_from", None) is not None:
- encoder = checkpoint_utils.load_pretrained_component_from_model(
- component=encoder, checkpoint=args.load_pretrained_encoder_from
- )
-
- return encoder
-
-
-@register_model_architecture(
- "convtransformer_augmented_memory", "convtransformer_augmented_memory"
-)
-def augmented_memory_convtransformer_espnet(args):
- convtransformer_espnet(args)
-
-
-# ============================================================================ #
-# Convtransformer
-# with monotonic attention decoder
-# with emformer encoder
-# ============================================================================ #
-
-
-class ConvTransformerEmformerEncoder(ConvTransformerEncoder):
- def __init__(self, args):
- super().__init__(args)
- stride = self.conv_layer_stride(args)
- trf_left_context = args.segment_left_context // stride
- trf_right_context = args.segment_right_context // stride
- context_config = [trf_left_context, trf_right_context]
- self.transformer_layers = nn.ModuleList(
- [
- NoSegAugmentedMemoryTransformerEncoderLayer(
- input_dim=args.encoder_embed_dim,
- num_heads=args.encoder_attention_heads,
- ffn_dim=args.encoder_ffn_embed_dim,
- num_layers=args.encoder_layers,
- dropout_in_attn=args.dropout,
- dropout_on_attn=args.dropout,
- dropout_on_fc1=args.dropout,
- dropout_on_fc2=args.dropout,
- activation_fn=args.activation_fn,
- context_config=context_config,
- segment_size=args.segment_length,
- max_memory_size=args.max_memory_size,
- scaled_init=True, # TODO: use constant for now.
- tanh_on_mem=args.amtrf_tanh_on_mem,
- )
- ]
- )
- self.conv_transformer_encoder = ConvTransformerEncoder(args)
-
- def forward(self, src_tokens, src_lengths):
- encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device))
- output = encoder_out["encoder_out"][0]
- encoder_padding_masks = encoder_out["encoder_padding_mask"]
-
- return {
- "encoder_out": [output],
- # This is because that in the original implementation
- # the output didn't consider the last segment as right context.
- "encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0
- else [],
- "encoder_embedding": [],
- "encoder_states": [],
- "src_tokens": [],
- "src_lengths": [],
- }
-
- @staticmethod
- def conv_layer_stride(args):
- # TODO: make it configurable from the args
- return 4
-
-
-@register_model("convtransformer_emformer")
-class ConvtransformerEmformer(SimulConvTransformerModel):
- @staticmethod
- def add_args(parser):
- super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser)
-
- parser.add_argument(
- "--segment-length",
- type=int,
- metavar="N",
- help="length of each segment (not including left context / right context)",
- )
- parser.add_argument(
- "--segment-left-context",
- type=int,
- help="length of left context in a segment",
- )
- parser.add_argument(
- "--segment-right-context",
- type=int,
- help="length of right context in a segment",
- )
- parser.add_argument(
- "--max-memory-size",
- type=int,
- default=-1,
- help="Right context for the segment.",
- )
- parser.add_argument(
- "--amtrf-tanh-on-mem",
- default=False,
- action="store_true",
- help="whether to use tanh on memory vector",
- )
-
- @classmethod
- def build_encoder(cls, args):
- encoder = ConvTransformerEmformerEncoder(args)
- if getattr(args, "load_pretrained_encoder_from", None):
- encoder = checkpoint_utils.load_pretrained_component_from_model(
- component=encoder, checkpoint=args.load_pretrained_encoder_from
- )
- return encoder
-
-
-@register_model_architecture(
- "convtransformer_emformer",
- "convtransformer_emformer",
-)
-def convtransformer_emformer_base(args):
- convtransformer_espnet(args)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/translation_from_pretrained_xlm.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/translation_from_pretrained_xlm.py
deleted file mode 100644
index a05f2891524a8b23482e206c1742c3b816b77afb..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/tasks/translation_from_pretrained_xlm.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass
-from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
-from fairseq.tasks.translation import TranslationConfig, TranslationTask
-
-from . import register_task
-
-
-@dataclass
-class TranslationFromPretrainedXLMConfig(TranslationConfig):
- pass
-
-
-@register_task(
- "translation_from_pretrained_xlm", dataclass=TranslationFromPretrainedXLMConfig
-)
-class TranslationFromPretrainedXLMTask(TranslationTask):
- """
- Same as TranslationTask except use the MaskedLMDictionary class so that
- we can load data that was binarized with the MaskedLMDictionary class.
-
- This task should be used for the entire training pipeline when we want to
- train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
- training NMT with the pretrained XLM checkpoint, and subsequent evaluation
- of that trained model.
- """
-
- @classmethod
- def load_dictionary(cls, filename):
- """Load the masked LM dictionary from the filename
-
- Args:
- filename (str): the filename
- """
- return MaskedLMDictionary.load(filename)
diff --git a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/diarization/diarization.py b/spaces/Olivier-Truong/faster-whisper-webui-v2/src/diarization/diarization.py
deleted file mode 100644
index 2627894e621b25c1c9b4a87951c4edf000538be9..0000000000000000000000000000000000000000
--- a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/diarization/diarization.py
+++ /dev/null
@@ -1,195 +0,0 @@
-import argparse
-import gc
-import json
-import os
-from pathlib import Path
-import tempfile
-from typing import TYPE_CHECKING, List
-import torch
-
-import ffmpeg
-
-class DiarizationEntry:
- def __init__(self, start, end, speaker):
- self.start = start
- self.end = end
- self.speaker = speaker
-
- def __repr__(self):
- return f""
-
- def toJson(self):
- return {
- "start": self.start,
- "end": self.end,
- "speaker": self.speaker
- }
-
-class Diarization:
- def __init__(self, auth_token=None):
- if auth_token is None:
- auth_token = os.environ.get("HK_ACCESS_TOKEN")
- if auth_token is None:
- raise ValueError("No HuggingFace API Token provided - please use the --auth_token argument or set the HK_ACCESS_TOKEN environment variable")
-
- self.auth_token = auth_token
- self.initialized = False
- self.pipeline = None
-
- @staticmethod
- def has_libraries():
- try:
- import pyannote.audio
- import intervaltree
- return True
- except ImportError:
- return False
-
- def initialize(self):
- if self.initialized:
- return
- from pyannote.audio import Pipeline
-
- self.pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization@2.1", use_auth_token=self.auth_token)
- self.initialized = True
-
- # Load GPU mode if available
- device = "cuda" if torch.cuda.is_available() else "cpu"
- if device == "cuda":
- print("Diarization - using GPU")
- self.pipeline = self.pipeline.to(torch.device(0))
- else:
- print("Diarization - using CPU")
-
- def run(self, audio_file, **kwargs):
- self.initialize()
- audio_file_obj = Path(audio_file)
-
- # Supported file types in soundfile is WAV, FLAC, OGG and MAT
- if audio_file_obj.suffix in [".wav", ".flac", ".ogg", ".mat"]:
- target_file = audio_file
- else:
- # Create temp WAV file
- target_file = tempfile.mktemp(prefix="diarization_", suffix=".wav")
- try:
- ffmpeg.input(audio_file).output(target_file, ac=1).run()
- except ffmpeg.Error as e:
- print(f"Error occurred during audio conversion: {e.stderr}")
-
- diarization = self.pipeline(target_file, **kwargs)
-
- if target_file != audio_file:
- # Delete temp file
- os.remove(target_file)
-
- # Yield result
- for turn, _, speaker in diarization.itertracks(yield_label=True):
- yield DiarizationEntry(turn.start, turn.end, speaker)
-
- def mark_speakers(self, diarization_result: List[DiarizationEntry], whisper_result: dict):
- from intervaltree import IntervalTree
- result = whisper_result.copy()
-
- # Create an interval tree from the diarization results
- tree = IntervalTree()
- for entry in diarization_result:
- tree[entry.start:entry.end] = entry
-
- # Iterate through each segment in the Whisper JSON
- for segment in result["segments"]:
- segment_start = segment["start"]
- segment_end = segment["end"]
-
- # Find overlapping speakers using the interval tree
- overlapping_speakers = tree[segment_start:segment_end]
-
- # If no speakers overlap with this segment, skip it
- if not overlapping_speakers:
- continue
-
- # If multiple speakers overlap with this segment, choose the one with the longest duration
- longest_speaker = None
- longest_duration = 0
-
- for speaker_interval in overlapping_speakers:
- overlap_start = max(speaker_interval.begin, segment_start)
- overlap_end = min(speaker_interval.end, segment_end)
- overlap_duration = overlap_end - overlap_start
-
- if overlap_duration > longest_duration:
- longest_speaker = speaker_interval.data.speaker
- longest_duration = overlap_duration
-
- # Add speakers
- segment["longest_speaker"] = longest_speaker
- segment["speakers"] = list([speaker_interval.data.toJson() for speaker_interval in overlapping_speakers])
-
- # The write_srt will use the longest_speaker if it exist, and add it to the text field
-
- return result
-
-def _write_file(input_file: str, output_path: str, output_extension: str, file_writer: lambda f: None):
- if input_file is None:
- raise ValueError("input_file is required")
- if file_writer is None:
- raise ValueError("file_writer is required")
-
- # Write file
- if output_path is None:
- effective_path = os.path.splitext(input_file)[0] + "_output" + output_extension
- else:
- effective_path = output_path
-
- with open(effective_path, 'w+', encoding="utf-8") as f:
- file_writer(f)
-
- print(f"Output saved to {effective_path}")
-
-def main():
- from src.utils import write_srt
- from src.diarization.transcriptLoader import load_transcript
-
- parser = argparse.ArgumentParser(description='Add speakers to a SRT file or Whisper JSON file using pyannote/speaker-diarization.')
- parser.add_argument('audio_file', type=str, help='Input audio file')
- parser.add_argument('whisper_file', type=str, help='Input Whisper JSON/SRT file')
- parser.add_argument('--output_json_file', type=str, default=None, help='Output JSON file (optional)')
- parser.add_argument('--output_srt_file', type=str, default=None, help='Output SRT file (optional)')
- parser.add_argument('--auth_token', type=str, default=None, help='HuggingFace API Token (optional)')
- parser.add_argument("--max_line_width", type=int, default=40, help="Maximum line width for SRT file (default: 40)")
- parser.add_argument("--num_speakers", type=int, default=None, help="Number of speakers")
- parser.add_argument("--min_speakers", type=int, default=None, help="Minimum number of speakers")
- parser.add_argument("--max_speakers", type=int, default=None, help="Maximum number of speakers")
-
- args = parser.parse_args()
-
- print("\nReading whisper JSON from " + args.whisper_file)
-
- # Read whisper JSON or SRT file
- whisper_result = load_transcript(args.whisper_file)
-
- diarization = Diarization(auth_token=args.auth_token)
- diarization_result = list(diarization.run(args.audio_file, num_speakers=args.num_speakers, min_speakers=args.min_speakers, max_speakers=args.max_speakers))
-
- # Print result
- print("Diarization result:")
- for entry in diarization_result:
- print(f" start={entry.start:.1f}s stop={entry.end:.1f}s speaker_{entry.speaker}")
-
- marked_whisper_result = diarization.mark_speakers(diarization_result, whisper_result)
-
- # Write output JSON to file
- _write_file(args.whisper_file, args.output_json_file, ".json",
- lambda f: json.dump(marked_whisper_result, f, indent=4, ensure_ascii=False))
-
- # Write SRT
- _write_file(args.whisper_file, args.output_srt_file, ".srt",
- lambda f: write_srt(marked_whisper_result["segments"], f, maxLineWidth=args.max_line_width))
-
-if __name__ == "__main__":
- main()
-
- #test = Diarization()
- #print("Initializing")
- #test.initialize()
-
- #input("Press Enter to continue...")
\ No newline at end of file
diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/train.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/train.py
deleted file mode 100644
index be9ca8c6ef2a0cb9143ab6a0f4d91f571b691a95..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/train.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python3
-
-import logging
-import os
-import sys
-import traceback
-
-os.environ['OMP_NUM_THREADS'] = '1'
-os.environ['OPENBLAS_NUM_THREADS'] = '1'
-os.environ['MKL_NUM_THREADS'] = '1'
-os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
-os.environ['NUMEXPR_NUM_THREADS'] = '1'
-
-import hydra
-from omegaconf import OmegaConf
-from pytorch_lightning import Trainer
-from pytorch_lightning.callbacks import ModelCheckpoint
-from pytorch_lightning.loggers import TensorBoardLogger
-from pytorch_lightning.plugins import DDPPlugin
-
-from saicinpainting.training.trainers import make_training_model
-from saicinpainting.utils import register_debug_signal_handlers, handle_ddp_subprocess, handle_ddp_parent_process, \
- handle_deterministic_config
-
-LOGGER = logging.getLogger(__name__)
-
-
-@handle_ddp_subprocess()
-@hydra.main(config_path='../configs/training', config_name='tiny_test.yaml')
-def main(config: OmegaConf):
- try:
- need_set_deterministic = handle_deterministic_config(config)
-
- register_debug_signal_handlers() # kill -10 will result in traceback dumped into log
-
- is_in_ddp_subprocess = handle_ddp_parent_process()
-
- config.visualizer.outdir = os.path.join(os.getcwd(), config.visualizer.outdir)
- if not is_in_ddp_subprocess:
- LOGGER.info(OmegaConf.to_yaml(config))
- OmegaConf.save(config, os.path.join(os.getcwd(), 'config.yaml'))
-
- checkpoints_dir = os.path.join(os.getcwd(), 'models')
- os.makedirs(checkpoints_dir, exist_ok=True)
-
- # there is no need to suppress this logger in ddp, because it handles rank on its own
- metrics_logger = TensorBoardLogger(config.location.tb_dir, name=os.path.basename(os.getcwd()))
- metrics_logger.log_hyperparams(config)
-
- training_model = make_training_model(config)
-
- trainer_kwargs = OmegaConf.to_container(config.trainer.kwargs, resolve=True)
- if need_set_deterministic:
- trainer_kwargs['deterministic'] = True
-
- trainer = Trainer(
- # there is no need to suppress checkpointing in ddp, because it handles rank on its own
- callbacks=ModelCheckpoint(dirpath=checkpoints_dir, **config.trainer.checkpoint_kwargs),
- logger=metrics_logger,
- default_root_dir=os.getcwd(),
- **trainer_kwargs
- )
- trainer.fit(training_model)
- except KeyboardInterrupt:
- LOGGER.warning('Interrupted by user')
- except Exception as ex:
- LOGGER.critical(f'Training failed due to {ex}:\n{traceback.format_exc()}')
- sys.exit(1)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/swish.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/swish.py
deleted file mode 100644
index e2ca8ed7b749413f011ae54aac0cab27e6f0b51f..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/cnn/bricks/swish.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-
-from .registry import ACTIVATION_LAYERS
-
-
-@ACTIVATION_LAYERS.register_module()
-class Swish(nn.Module):
- """Swish Module.
-
- This module applies the swish function:
-
- .. math::
- Swish(x) = x * Sigmoid(x)
-
- Returns:
- Tensor: The output tensor.
- """
-
- def __init__(self):
- super(Swish, self).__init__()
-
- def forward(self, x):
- return x * torch.sigmoid(x)
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/define-grob-interfaces.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/define-grob-interfaces.go
deleted file mode 100644
index b8c9a687394cdf4050cb7f7d207efff638adb6fc..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/define-grob-interfaces.go and /dev/null differ
diff --git a/spaces/PineSearch/generatorImage/old_app.py b/spaces/PineSearch/generatorImage/old_app.py
deleted file mode 100644
index 282a56a62ebac582ef76d34f1923845c9772d140..0000000000000000000000000000000000000000
--- a/spaces/PineSearch/generatorImage/old_app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import gradio as gr
-from gradio.inputs import Textbox
-
-import torch
-from diffusers import StableDiffusionPipeline
-import boto3
-from io import BytesIO
-import os
-
-AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
-AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
-S3_BUCKET_NAME = os.getenv("BUCKET_NAME")
-
-model_id = "CompVis/stable-diffusion-v1-4"
-device = "cuda" if torch.cuda.is_available() else "cpu"
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id, torch_dtype=torch.float32)
-
-pipe = pipe.to(device)
-
-def text_to_image(prompt, save_as, key_id):
-
- if AWS_ACCESS_KEY_ID != key_id:
- return "not permition"
-
- # Create an instance of the S3 client
- s3 = boto3.client('s3',
- aws_access_key_id=AWS_ACCESS_KEY_ID,
- aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
-
- image_name = '-'.join(save_as.split()) + ".webp"
-
- def save_image_to_s3(image):
- # Create a BytesIO object to store the image.
- image_buffer = BytesIO()
- image.save(image_buffer, format='WEBP')
- image_buffer.seek(0)
-
- # Full path of the file in the bucket
- s3_key = "public/" + image_name
-
- # Upload the image to the S3 bucket
- s3.upload_fileobj(image_buffer, S3_BUCKET_NAME, s3_key)
-
- def generator_image(prompt):
- prompt = prompt
- image = pipe(prompt).images[0]
-
- # Save the image in S3
- save_image_to_s3(image)
-
- generator_image(prompt)
- return image_name
-
-
-
-iface = gr.Interface(fn=text_to_image, inputs=[Textbox(label="prompt"), Textbox(label="s3_save_as"), Textbox(label="aws_key_id")], outputs="text")
-iface.launch()
diff --git a/spaces/Pippoz/All_in_one/README.md b/spaces/Pippoz/All_in_one/README.md
deleted file mode 100644
index f9a4c8ae4f3749da051d00fdc52f404b6a8f41e5..0000000000000000000000000000000000000000
--- a/spaces/Pippoz/All_in_one/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: All_in_one tasks
-emoji: 🦾
-colorFrom: red
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.9.0
-app_file: app.py
-pinned: true
----
-
-The goal for this project is to create a space where all models and tasks can be performed. The user can choose either of those
diff --git a/spaces/Prasanna18/SujokTherapy/app.py b/spaces/Prasanna18/SujokTherapy/app.py
deleted file mode 100644
index 4b985e955328d030d459b919451b2d89822c6e13..0000000000000000000000000000000000000000
--- a/spaces/Prasanna18/SujokTherapy/app.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#shree
-import streamlit as st
-from streamlit_chat import message
-from langchain.chains import ConversationalRetrievalChain
-from langchain.document_loaders import DirectoryLoader
-from langchain.document_loaders import PyPDFLoader
-from langchain.embeddings import HuggingFaceEmbeddings
-from langchain.llms import CTransformers
-from langchain.text_splitter import RecursiveCharacterTextSplitter
-from langchain.vectorstores import FAISS
-from langchain.memory import ConversationBufferMemory
-from langchain.document_loaders.csv_loader import CSVLoader
-
-
-st.set_page_config(
- page_title="SuJokEase",
- page_icon="🩺",
- layout="wide",
- initial_sidebar_state="expanded",
-)
-
-from langchain.document_loaders.csv_loader import CSVLoader
-loader = CSVLoader(file_path='data.csv')
-documents = loader.load()
-
-text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
-text_chunks = text_splitter.split_documents(documents)
-
-embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
- model_kwargs={'device':"cpu"})
-
-vector_store = FAISS.from_documents(text_chunks,embeddings)
-
-llm = CTransformers(model="model.bin",model_type="llama",
- config={'max_new_tokens':128,'temperature':0.01})
-
-memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
-
-chain = ConversationalRetrievalChain.from_llm(llm=llm,chain_type='stuff',
- retriever=vector_store.as_retriever(search_kwargs={"k":2}),
- memory=memory)
-
-# Sidebar for user input
-st.sidebar.title("SuJokEase🩺")
-st.sidebar.info("A Conversational Retrieval Chain Chat-Bot for Sujok Therapy Points Conversational Retrieval Chain Chat-Bot for Sujok Therapy weather you are Students, Working Professional or in situations where you need instant relaxation. We are at your Back! ")
-github_link = "[GitHub]()"
-st.sidebar.info("To contribute and Sponser - " + github_link)
-
-st.title("A Conversational Retrieval Chain Chat-Bot for Sujok Therapy Points🩺")
-st.text("Your wellness companion for instant relaxation.")
-
-
-def conversation_chat(query):
- result = chain({"question": query, "chat_history": st.session_state['history']})
- st.session_state['history'].append((query, result["answer"]))
- return result["answer"]
-
-def initialize_session_state():
- if 'history' not in st.session_state:
- st.session_state['history'] = []
-
- if 'generated' not in st.session_state:
- st.session_state['generated'] = ["Hello! Ask me anything about Pains"]
-
- if 'past' not in st.session_state:
- st.session_state['past'] = ["Hello!"]
-
-def display_chat_history():
- reply_container = st.container()
- container = st.container()
-
- with container:
- with st.form(key='my_form', clear_on_submit=True):
- user_input = st.text_input("Question:", placeholder="Ask anything about Sujok Thearpy", key='input')
- submit_button = st.form_submit_button(label='Send')
-
- if submit_button and user_input:
- output = conversation_chat(user_input)
-
- st.session_state['past'].append(user_input)
- st.session_state['generated'].append(output)
-
- if st.session_state['generated']:
- with reply_container:
- for i in range(len(st.session_state['generated'])):
- message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="robot")
- message(st.session_state["generated"][i], key=str(i), avatar_style="superhero")
-
-# Initialize session state
-initialize_session_state()
-# Display chat history
-display_chat_history()
-
-
-
diff --git a/spaces/Rakot2223/faster-whisper-webui/docs/colab.md b/spaces/Rakot2223/faster-whisper-webui/docs/colab.md
deleted file mode 100644
index 3fcdb835327238764fb643b9bbd2e27b6e14f58c..0000000000000000000000000000000000000000
--- a/spaces/Rakot2223/faster-whisper-webui/docs/colab.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Running Whisper on Google Colab
-
-If you don't have a decent GPU or any experience in running command-line applications, you might want to try this Google Colab instead:
-
-* [Google Colab - Whisper WebUI GPU](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing)
-* [Screenshots](https://imgur.com/a/ZfY6uBO)
-
-The runtime (Runtime -> Change runtime type -> Hardware accelerator) should already be set top GPU. But if not, change it to GPU.
-
-Then, sign in to Google if you haven't already. Next, click on "Connect" at the top right.
-
-Under "Checking out WebUI from Git", click on the [play icon](https://imgur.com/a/81gOLyD) that appears in "[ ]" at the left. If you get a warning, click "Run anyway".
-
-After this step has completed, it should be get a green check mark. Then move on to the next section under "Installing dependencies", and click in "[ ]" again. This might take approximately 30 seconds.
-
-Once this has completed, scroll down to the "Run WebUI" section, and click on "[ ]". This will launch the WebUI in a shared link (expires in 72 hours). To open the UI, click on the link next to "Running on public URL", which will be something like https://12xxx.gradio.app/
-
-The audio length in this version is not restricted, and it will run much faster as it is backed by a GPU. You can also run it using the "Large" model. Also note that it might take some time to start the model the first time, as it may need to download a 2.8 GB file on Google's servers.
-
-Once you're done, you can close the WebUI session by clicking the animated close button under "Run WebUI". You can also do this if you encounter any errors and need to restart the UI. You should also go to "Manage Sessions" and terminate the session, otherwise you may end up using all your free compute credits.
\ No newline at end of file
diff --git a/spaces/RamAnanth1/Youtube-to-HF-Dataset/app.py b/spaces/RamAnanth1/Youtube-to-HF-Dataset/app.py
deleted file mode 100644
index 03a3b00f881b398db4f3c43d0b24eb311daba225..0000000000000000000000000000000000000000
--- a/spaces/RamAnanth1/Youtube-to-HF-Dataset/app.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import gradio as gr
-from dataset import TranscriptDataset
-from downloader import WhisperPP, YoutubeDownloader
-from interpreter import WhisperInterpreter
-
-model_size = "base"
-mode = "transcribe"
-write = False
-download_path = "tmp/"
-
-def dataset(url, name, token):
- ds = TranscriptDataset(name)
- data = []
- #whisper_options = dict(
- # model_size=model_size, mode=mode, write=write, number_videos=500)
- #whisperPP = WhisperPP(data,name, **whisper_options)
- #downloader = YoutubeDownloader(download_path)
- #downloader.download(url, whisperPP)
- params = dict(model_size=model_size,write=write, number_videos=500)
- overwrite = True
- ds.generate_dataset(url, download_path, overwrite, params)
- ds.upload(token)
-
- return "Dataset created at : " + "https://huggingface.co/datasets/"+ name
-
-yt_input = gr.Textbox(label = 'Youtube Link')
-name_input = gr.Textbox(label = 'Dataset Name',placeholder = "Enter in the format username/repo_name")
-token_input = gr.Textbox(label = "HF Token", placeholder="Write access token")
-
-repo_output = gr.Textbox(label = "Outcome")
-
-iface = gr.Interface(fn=dataset, inputs=[yt_input, name_input, token_input], outputs=repo_output, title="Create Transcription Dataset for Youtube using OpenAI Whisper !",
- description="Create a HuggingFace repository for Youtube Transcripts! You need to specify a write token obtained in https://hf.co/settings/token. This Space is a an experimental demo.",
- article="Find your write token at token settings
")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Ramse/TTS_Hindi/transformer/Models.py b/spaces/Ramse/TTS_Hindi/transformer/Models.py
deleted file mode 100644
index effcec285e2c10d9534049fd89bdd272f81bb0c3..0000000000000000000000000000000000000000
--- a/spaces/Ramse/TTS_Hindi/transformer/Models.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import torch
-import torch.nn as nn
-import numpy as np
-
-import transformer.Constants as Constants
-from .Layers import FFTBlock
-from text.symbols import symbols
-
-
-def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
- """ Sinusoid position encoding table """
-
- def cal_angle(position, hid_idx):
- return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
-
- def get_posi_angle_vec(position):
- return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
-
- sinusoid_table = np.array(
- [get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
- )
-
- sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
- sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
-
- if padding_idx is not None:
- # zero vector for padding dimension
- sinusoid_table[padding_idx] = 0.0
-
- return torch.FloatTensor(sinusoid_table)
-
-
-class Encoder(nn.Module):
- """ Encoder """
-
- def __init__(self, config):
- super(Encoder, self).__init__()
-
- n_position = config["max_seq_len"] + 1
- n_src_vocab = len(symbols) + 1
- d_word_vec = config["transformer"]["encoder_hidden"]
- n_layers = config["transformer"]["encoder_layer"]
- n_head = config["transformer"]["encoder_head"]
- d_k = d_v = (
- config["transformer"]["encoder_hidden"]
- // config["transformer"]["encoder_head"]
- )
- d_model = config["transformer"]["encoder_hidden"]
- d_inner = config["transformer"]["conv_filter_size"]
- kernel_size = config["transformer"]["conv_kernel_size"]
- dropout = config["transformer"]["encoder_dropout"]
-
- self.max_seq_len = config["max_seq_len"]
- self.d_model = d_model
-
- self.src_word_emb = nn.Embedding(
- n_src_vocab, d_word_vec, padding_idx=Constants.PAD
- )
- self.position_enc = nn.Parameter(
- get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
- requires_grad=False,
- )
-
- self.layer_stack = nn.ModuleList(
- [
- FFTBlock(
- d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=dropout
- )
- for _ in range(n_layers)
- ]
- )
-
- def forward(self, src_seq, mask, return_attns=False):
-
- enc_slf_attn_list = []
- batch_size, max_len = src_seq.shape[0], src_seq.shape[1]
-
- # -- Prepare masks
- slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
-
- # -- Forward
- if not self.training and src_seq.shape[1] > self.max_seq_len:
- enc_output = self.src_word_emb(src_seq) + get_sinusoid_encoding_table(
- src_seq.shape[1], self.d_model
- )[: src_seq.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(
- src_seq.device
- )
- else:
- enc_output = self.src_word_emb(src_seq) + self.position_enc[
- :, :max_len, :
- ].expand(batch_size, -1, -1)
-
- for enc_layer in self.layer_stack:
- enc_output, enc_slf_attn = enc_layer(
- enc_output, mask=mask, slf_attn_mask=slf_attn_mask
- )
- if return_attns:
- enc_slf_attn_list += [enc_slf_attn]
-
- return enc_output
-
-
-class Decoder(nn.Module):
- """ Decoder """
-
- def __init__(self, config):
- super(Decoder, self).__init__()
-
- n_position = config["max_seq_len"] + 1
- d_word_vec = config["transformer"]["decoder_hidden"]
- n_layers = config["transformer"]["decoder_layer"]
- n_head = config["transformer"]["decoder_head"]
- d_k = d_v = (
- config["transformer"]["decoder_hidden"]
- // config["transformer"]["decoder_head"]
- )
- d_model = config["transformer"]["decoder_hidden"]
- d_inner = config["transformer"]["conv_filter_size"]
- kernel_size = config["transformer"]["conv_kernel_size"]
- dropout = config["transformer"]["decoder_dropout"]
-
- self.max_seq_len = config["max_seq_len"]
- self.d_model = d_model
-
- self.position_enc = nn.Parameter(
- get_sinusoid_encoding_table(n_position, d_word_vec).unsqueeze(0),
- requires_grad=False,
- )
-
- self.layer_stack = nn.ModuleList(
- [
- FFTBlock(
- d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=dropout
- )
- for _ in range(n_layers)
- ]
- )
-
- def forward(self, enc_seq, mask, return_attns=False):
-
- dec_slf_attn_list = []
- batch_size, max_len = enc_seq.shape[0], enc_seq.shape[1]
-
- # -- Forward
- if not self.training and enc_seq.shape[1] > self.max_seq_len:
- # -- Prepare masks
- slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
- dec_output = enc_seq + get_sinusoid_encoding_table(
- enc_seq.shape[1], self.d_model
- )[: enc_seq.shape[1], :].unsqueeze(0).expand(batch_size, -1, -1).to(
- enc_seq.device
- )
- else:
- max_len = min(max_len, self.max_seq_len)
-
- # -- Prepare masks
- slf_attn_mask = mask.unsqueeze(1).expand(-1, max_len, -1)
- dec_output = enc_seq[:, :max_len, :] + self.position_enc[
- :, :max_len, :
- ].expand(batch_size, -1, -1)
- mask = mask[:, :max_len]
- slf_attn_mask = slf_attn_mask[:, :, :max_len]
-
- for dec_layer in self.layer_stack:
- dec_output, dec_slf_attn = dec_layer(
- dec_output, mask=mask, slf_attn_mask=slf_attn_mask
- )
- if return_attns:
- dec_slf_attn_list += [dec_slf_attn]
-
- return dec_output, mask
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/webencodings/tests.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/webencodings/tests.py
deleted file mode 100644
index e12c10d033026f09cf97b81d29555e12aae8c762..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/webencodings/tests.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# coding: utf-8
-"""
-
- webencodings.tests
- ~~~~~~~~~~~~~~~~~~
-
- A basic test suite for Encoding.
-
- :copyright: Copyright 2012 by Simon Sapin
- :license: BSD, see LICENSE for details.
-
-"""
-
-from __future__ import unicode_literals
-
-from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
- IncrementalDecoder, IncrementalEncoder, UTF8)
-
-
-def assert_raises(exception, function, *args, **kwargs):
- try:
- function(*args, **kwargs)
- except exception:
- return
- else: # pragma: no cover
- raise AssertionError('Did not raise %s.' % exception)
-
-
-def test_labels():
- assert lookup('utf-8').name == 'utf-8'
- assert lookup('Utf-8').name == 'utf-8'
- assert lookup('UTF-8').name == 'utf-8'
- assert lookup('utf8').name == 'utf-8'
- assert lookup('utf8').name == 'utf-8'
- assert lookup('utf8 ').name == 'utf-8'
- assert lookup(' \r\nutf8\t').name == 'utf-8'
- assert lookup('u8') is None # Python label.
- assert lookup('utf-8 ') is None # Non-ASCII white space.
-
- assert lookup('US-ASCII').name == 'windows-1252'
- assert lookup('iso-8859-1').name == 'windows-1252'
- assert lookup('latin1').name == 'windows-1252'
- assert lookup('LATIN1').name == 'windows-1252'
- assert lookup('latin-1') is None
- assert lookup('LATİN1') is None # ASCII-only case insensitivity.
-
-
-def test_all_labels():
- for label in LABELS:
- assert decode(b'', label) == ('', lookup(label))
- assert encode('', label) == b''
- for repeat in [0, 1, 12]:
- output, _ = iter_decode([b''] * repeat, label)
- assert list(output) == []
- assert list(iter_encode([''] * repeat, label)) == []
- decoder = IncrementalDecoder(label)
- assert decoder.decode(b'') == ''
- assert decoder.decode(b'', final=True) == ''
- encoder = IncrementalEncoder(label)
- assert encoder.encode('') == b''
- assert encoder.encode('', final=True) == b''
- # All encoding names are valid labels too:
- for name in set(LABELS.values()):
- assert lookup(name).name == name
-
-
-def test_invalid_label():
- assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
- assert_raises(LookupError, encode, 'é', 'invalid')
- assert_raises(LookupError, iter_decode, [], 'invalid')
- assert_raises(LookupError, iter_encode, [], 'invalid')
- assert_raises(LookupError, IncrementalDecoder, 'invalid')
- assert_raises(LookupError, IncrementalEncoder, 'invalid')
-
-
-def test_decode():
- assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
- assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
- assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
- assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
- assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
- assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM
-
- assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM
- assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM
- assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
- assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
-
- assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
- assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
- assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
-
- assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
- assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
- assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
-
-
-def test_encode():
- assert encode('é', 'latin1') == b'\xe9'
- assert encode('é', 'utf8') == b'\xc3\xa9'
- assert encode('é', 'utf8') == b'\xc3\xa9'
- assert encode('é', 'utf-16') == b'\xe9\x00'
- assert encode('é', 'utf-16le') == b'\xe9\x00'
- assert encode('é', 'utf-16be') == b'\x00\xe9'
-
-
-def test_iter_decode():
- def iter_decode_to_string(input, fallback_encoding):
- output, _encoding = iter_decode(input, fallback_encoding)
- return ''.join(output)
- assert iter_decode_to_string([], 'latin1') == ''
- assert iter_decode_to_string([b''], 'latin1') == ''
- assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
- assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
- assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
- assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
- assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
- assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
- assert iter_decode_to_string([
- b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
- assert iter_decode_to_string([
- b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
- assert iter_decode_to_string([
- b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
- assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
- assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
- assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
- assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
- assert iter_decode_to_string([
- b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
- assert iter_decode_to_string([
- b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
-
-
-def test_iter_encode():
- assert b''.join(iter_encode([], 'latin1')) == b''
- assert b''.join(iter_encode([''], 'latin1')) == b''
- assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
- assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
- assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
- assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
- assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
- assert b''.join(iter_encode([
- '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
-
-
-def test_x_user_defined():
- encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
- decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
- encoded = b'aa'
- decoded = 'aa'
- assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
- assert encode(decoded, 'x-user-defined') == encoded
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/upload_docs.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/upload_docs.py
deleted file mode 100644
index 3263f07f4877ad6f9ecc881c12df29a4a65b03f4..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/upload_docs.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# -*- coding: utf-8 -*-
-"""upload_docs
-
-Implements a Distutils 'upload_docs' subcommand (upload documentation to
-sites other than PyPi such as devpi).
-"""
-
-from base64 import standard_b64encode
-from distutils import log
-from distutils.errors import DistutilsOptionError
-import os
-import socket
-import zipfile
-import tempfile
-import shutil
-import itertools
-import functools
-import http.client
-import urllib.parse
-import warnings
-
-from .._importlib import metadata
-from .. import SetuptoolsDeprecationWarning
-
-from .upload import upload
-
-
-def _encode(s):
- return s.encode('utf-8', 'surrogateescape')
-
-
-class upload_docs(upload):
- # override the default repository as upload_docs isn't
- # supported by Warehouse (and won't be).
- DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
-
- description = 'Upload documentation to sites other than PyPi such as devpi'
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
- ('show-response', None,
- 'display full response text from server'),
- ('upload-dir=', None, 'directory to upload'),
- ]
- boolean_options = upload.boolean_options
-
- def has_sphinx(self):
- return bool(
- self.upload_dir is None
- and metadata.entry_points(group='distutils.commands', name='build_sphinx')
- )
-
- sub_commands = [('build_sphinx', has_sphinx)]
-
- def initialize_options(self):
- upload.initialize_options(self)
- self.upload_dir = None
- self.target_dir = None
-
- def finalize_options(self):
- log.warn(
- "Upload_docs command is deprecated. Use Read the Docs "
- "(https://readthedocs.org) instead.")
- upload.finalize_options(self)
- if self.upload_dir is None:
- if self.has_sphinx():
- build_sphinx = self.get_finalized_command('build_sphinx')
- self.target_dir = dict(build_sphinx.builder_target_dirs)['html']
- else:
- build = self.get_finalized_command('build')
- self.target_dir = os.path.join(build.build_base, 'docs')
- else:
- self.ensure_dirname('upload_dir')
- self.target_dir = self.upload_dir
- self.announce('Using upload directory %s' % self.target_dir)
-
- def create_zipfile(self, filename):
- zip_file = zipfile.ZipFile(filename, "w")
- try:
- self.mkpath(self.target_dir) # just in case
- for root, dirs, files in os.walk(self.target_dir):
- if root == self.target_dir and not files:
- tmpl = "no files found in upload directory '%s'"
- raise DistutilsOptionError(tmpl % self.target_dir)
- for name in files:
- full = os.path.join(root, name)
- relative = root[len(self.target_dir):].lstrip(os.path.sep)
- dest = os.path.join(relative, name)
- zip_file.write(full, dest)
- finally:
- zip_file.close()
-
- def run(self):
- warnings.warn(
- "upload_docs is deprecated and will be removed in a future "
- "version. Use tools like httpie or curl instead.",
- SetuptoolsDeprecationWarning,
- )
-
- # Run sub commands
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- tmp_dir = tempfile.mkdtemp()
- name = self.distribution.metadata.get_name()
- zip_file = os.path.join(tmp_dir, "%s.zip" % name)
- try:
- self.create_zipfile(zip_file)
- self.upload_file(zip_file)
- finally:
- shutil.rmtree(tmp_dir)
-
- @staticmethod
- def _build_part(item, sep_boundary):
- key, values = item
- title = '\nContent-Disposition: form-data; name="%s"' % key
- # handle multiple entries for the same name
- if not isinstance(values, list):
- values = [values]
- for value in values:
- if isinstance(value, tuple):
- title += '; filename="%s"' % value[0]
- value = value[1]
- else:
- value = _encode(value)
- yield sep_boundary
- yield _encode(title)
- yield b"\n\n"
- yield value
- if value and value[-1:] == b'\r':
- yield b'\n' # write an extra newline (lurve Macs)
-
- @classmethod
- def _build_multipart(cls, data):
- """
- Build up the MIME payload for the POST data
- """
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = b'\n--' + boundary.encode('ascii')
- end_boundary = sep_boundary + b'--'
- end_items = end_boundary, b"\n",
- builder = functools.partial(
- cls._build_part,
- sep_boundary=sep_boundary,
- )
- part_groups = map(builder, data.items())
- parts = itertools.chain.from_iterable(part_groups)
- body_items = itertools.chain(parts, end_items)
- content_type = 'multipart/form-data; boundary=%s' % boundary
- return b''.join(body_items), content_type
-
- def upload_file(self, filename):
- with open(filename, 'rb') as f:
- content = f.read()
- meta = self.distribution.metadata
- data = {
- ':action': 'doc_upload',
- 'name': meta.get_name(),
- 'content': (os.path.basename(filename), content),
- }
- # set up the authentication
- credentials = _encode(self.username + ':' + self.password)
- credentials = standard_b64encode(credentials).decode('ascii')
- auth = "Basic " + credentials
-
- body, ct = self._build_multipart(data)
-
- msg = "Submitting documentation to %s" % (self.repository)
- self.announce(msg, log.INFO)
-
- # build the Request
- # We can't use urllib2 since we need to send the Basic
- # auth right with the first request
- schema, netloc, url, params, query, fragments = \
- urllib.parse.urlparse(self.repository)
- assert not params and not query and not fragments
- if schema == 'http':
- conn = http.client.HTTPConnection(netloc)
- elif schema == 'https':
- conn = http.client.HTTPSConnection(netloc)
- else:
- raise AssertionError("unsupported schema " + schema)
-
- data = ''
- try:
- conn.connect()
- conn.putrequest("POST", url)
- content_type = ct
- conn.putheader('Content-type', content_type)
- conn.putheader('Content-length', str(len(body)))
- conn.putheader('Authorization', auth)
- conn.endheaders()
- conn.send(body)
- except socket.error as e:
- self.announce(str(e), log.ERROR)
- return
-
- r = conn.getresponse()
- if r.status == 200:
- msg = 'Server response (%s): %s' % (r.status, r.reason)
- self.announce(msg, log.INFO)
- elif r.status == 301:
- location = r.getheader('Location')
- if location is None:
- location = 'https://pythonhosted.org/%s/' % meta.get_name()
- msg = 'Upload successful. Visit %s' % location
- self.announce(msg, log.INFO)
- else:
- msg = 'Upload failed (%s): %s' % (r.status, r.reason)
- self.announce(msg, log.ERROR)
- if self.show_response:
- print('-' * 75, r.read(), '-' * 75)
diff --git a/spaces/Realcat/image-matching-webui/third_party/GlueStick/setup.py b/spaces/Realcat/image-matching-webui/third_party/GlueStick/setup.py
deleted file mode 100644
index c1a9df947ac2b788597e3028226f8efbdcd21b94..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/GlueStick/setup.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from setuptools import setup
-
-setup(name="gluestick", version="0.0", packages=["gluestick"])
diff --git a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/__init__.py b/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn_pytorch/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Ricecake123/RVC-demo/docs/README.ko.md b/spaces/Ricecake123/RVC-demo/docs/README.ko.md
deleted file mode 100644
index abea8e6a263b8eb1c8d1cb285089ce1a9f3ac182..0000000000000000000000000000000000000000
--- a/spaces/Ricecake123/RVC-demo/docs/README.ko.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-
-Retrieval-based-Voice-Conversion-WebUI
-VITS 기반의 간단하고 사용하기 쉬운 음성 변환 프레임워크.
-
-[](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)
-
-
-
-[](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb)
-[](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE)
-[](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)
-
-[](https://discord.gg/HcsmBBGyVk)
-
-
-
----
-
-[**업데이트 로그**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_KO.md)
-
-[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md))
-
-> [데모 영상](https://www.bilibili.com/video/BV1pm4y1z7Gm/)을 확인해 보세요!
-
-> RVC를 활용한 실시간 음성변환: [w-okada/voice-changer](https://github.com/w-okada/voice-changer)
-
-> 기본 모델은 50시간 가량의 고퀄리티 오픈 소스 VCTK 데이터셋을 사용하였으므로, 저작권상의 염려가 없으니 안심하고 사용하시기 바랍니다.
-
-> 저작권 문제가 없는 고퀄리티의 노래를 이후에도 계속해서 훈련할 예정입니다.
-
-## 소개
-
-본 Repo는 다음과 같은 특징을 가지고 있습니다:
-
-- top1 검색을 이용하여 입력 음색 특징을 훈련 세트 음색 특징으로 대체하여 음색의 누출을 방지;
-- 상대적으로 낮은 성능의 GPU에서도 빠른 훈련 가능;
-- 적은 양의 데이터로 훈련해도 좋은 결과를 얻을 수 있음 (최소 10분 이상의 저잡음 음성 데이터를 사용하는 것을 권장);
-- 모델 융합을 통한 음색의 변조 가능 (ckpt 처리 탭->ckpt 병합 선택);
-- 사용하기 쉬운 WebUI (웹 인터페이스);
-- UVR5 모델을 이용하여 목소리와 배경음악의 빠른 분리;
-
-## 환경의 준비
-
-poetry를 통해 dependecies를 설치하는 것을 권장합니다.
-
-다음 명령은 Python 버전 3.8 이상의 환경에서 실행되어야 합니다:
-
-```bash
-# PyTorch 관련 주요 dependencies 설치, 이미 설치되어 있는 경우 건너뛰기 가능
-# 참조: https://pytorch.org/get-started/locally/
-pip install torch torchvision torchaudio
-
-# Windows + Nvidia Ampere Architecture(RTX30xx)를 사용하고 있다면, https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/issues/21 에서 명시된 것과 같이 PyTorch에 맞는 CUDA 버전을 지정해야 합니다.
-#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117
-
-# Poetry 설치, 이미 설치되어 있는 경우 건너뛰기 가능
-# Reference: https://python-poetry.org/docs/#installation
-curl -sSL https://install.python-poetry.org | python3 -
-
-# Dependecies 설치
-poetry install
-```
-
-pip를 활용하여 dependencies를 설치하여도 무방합니다.
-
-```bash
-pip install -r requirements.txt
-```
-
-## 기타 사전 모델 준비
-
-RVC 모델은 추론과 훈련을 위하여 다른 사전 모델이 필요합니다.
-
-[Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)를 통해서 다운로드 할 수 있습니다.
-
-다음은 RVC에 필요한 사전 모델 및 기타 파일 목록입니다:
-
-```bash
-hubert_base.pt
-
-./pretrained
-
-./uvr5_weights
-
-# Windows를 사용하는 경우 이 사전도 필요할 수 있습니다. FFmpeg가 설치되어 있으면 건너뛰어도 됩니다.
-ffmpeg.exe
-```
-
-그 후 이하의 명령을 사용하여 WebUI를 시작할 수 있습니다:
-
-```bash
-python infer-web.py
-```
-
-Windows를 사용하는 경우 `RVC-beta.7z`를 다운로드 및 압축 해제하여 RVC를 직접 사용하거나 `go-web.bat`을 사용하여 WebUi를 시작할 수 있습니다.
-
-## 참고
-
-- [ContentVec](https://github.com/auspicious3000/contentvec/)
-- [VITS](https://github.com/jaywalnut310/vits)
-- [HIFIGAN](https://github.com/jik876/hifi-gan)
-- [Gradio](https://github.com/gradio-app/gradio)
-- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
-- [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui)
-- [audio-slicer](https://github.com/openvpi/audio-slicer)
-
-## 모든 기여자 분들의 노력에 감사드립니다.
-
-
-
-
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/rpn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/rpn.py
deleted file mode 100644
index 1a77294549d1c3dc7821063c3f3d08bb331fbe59..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/rpn.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import mmcv
-from mmcv.image import tensor2imgs
-
-from mmdet.core import bbox_mapping
-from ..builder import DETECTORS, build_backbone, build_head, build_neck
-from .base import BaseDetector
-
-
-@DETECTORS.register_module()
-class RPN(BaseDetector):
- """Implementation of Region Proposal Network."""
-
- def __init__(self,
- backbone,
- neck,
- rpn_head,
- train_cfg,
- test_cfg,
- pretrained=None):
- super(RPN, self).__init__()
- self.backbone = build_backbone(backbone)
- self.neck = build_neck(neck) if neck is not None else None
- rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
- rpn_head.update(train_cfg=rpn_train_cfg)
- rpn_head.update(test_cfg=test_cfg.rpn)
- self.rpn_head = build_head(rpn_head)
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
- self.init_weights(pretrained=pretrained)
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in detector.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- super(RPN, self).init_weights(pretrained)
- self.backbone.init_weights(pretrained=pretrained)
- if self.with_neck:
- self.neck.init_weights()
- self.rpn_head.init_weights()
-
- def extract_feat(self, img):
- """Extract features.
-
- Args:
- img (torch.Tensor): Image tensor with shape (n, c, h ,w).
-
- Returns:
- list[torch.Tensor]: Multi-level features that may have
- different resolutions.
- """
- x = self.backbone(img)
- if self.with_neck:
- x = self.neck(x)
- return x
-
- def forward_dummy(self, img):
- """Dummy forward function."""
- x = self.extract_feat(img)
- rpn_outs = self.rpn_head(x)
- return rpn_outs
-
- def forward_train(self,
- img,
- img_metas,
- gt_bboxes=None,
- gt_bboxes_ignore=None):
- """
- Args:
- img (Tensor): Input images of shape (N, C, H, W).
- Typically these should be mean centered and std scaled.
- img_metas (list[dict]): A List of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- :class:`mmdet.datasets.pipelines.Collect`.
- gt_bboxes (list[Tensor]): Each item are the truth boxes for each
- image in [tl_x, tl_y, br_x, br_y] format.
- gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- if (isinstance(self.train_cfg.rpn, dict)
- and self.train_cfg.rpn.get('debug', False)):
- self.rpn_head.debug_imgs = tensor2imgs(img)
-
- x = self.extract_feat(img)
- losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
- gt_bboxes_ignore)
- return losses
-
- def simple_test(self, img, img_metas, rescale=False):
- """Test function without test time augmentation.
-
- Args:
- imgs (list[torch.Tensor]): List of multiple images
- img_metas (list[dict]): List of image information.
- rescale (bool, optional): Whether to rescale the results.
- Defaults to False.
-
- Returns:
- list[np.ndarray]: proposals
- """
- x = self.extract_feat(img)
- proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
- if rescale:
- for proposals, meta in zip(proposal_list, img_metas):
- proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
-
- return [proposal.cpu().numpy() for proposal in proposal_list]
-
- def aug_test(self, imgs, img_metas, rescale=False):
- """Test function with test time augmentation.
-
- Args:
- imgs (list[torch.Tensor]): List of multiple images
- img_metas (list[dict]): List of image information.
- rescale (bool, optional): Whether to rescale the results.
- Defaults to False.
-
- Returns:
- list[np.ndarray]: proposals
- """
- proposal_list = self.rpn_head.aug_test_rpn(
- self.extract_feats(imgs), img_metas)
- if not rescale:
- for proposals, img_meta in zip(proposal_list, img_metas[0]):
- img_shape = img_meta['img_shape']
- scale_factor = img_meta['scale_factor']
- flip = img_meta['flip']
- flip_direction = img_meta['flip_direction']
- proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
- scale_factor, flip,
- flip_direction)
- return [proposal.cpu().numpy() for proposal in proposal_list]
-
- def show_result(self, data, result, top_k=20, **kwargs):
- """Show RPN proposals on the image.
-
- Args:
- data (str or np.ndarray): Image filename or loaded image.
- result (Tensor or tuple): The results to draw over `img`
- bbox_result or (bbox_result, segm_result).
- top_k (int): Plot the first k bboxes only
- if set positive. Default: 20
-
- Returns:
- np.ndarray: The image with bboxes drawn on it.
- """
- mmcv.imshow_bboxes(data, result, top_k=top_k)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/__init__.py
deleted file mode 100644
index a6ec0ecc3063cd23c2463f2f53f1c2a83b04d43b..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from .generic_roi_extractor import GenericRoIExtractor
-from .single_level_roi_extractor import SingleRoIExtractor
-
-__all__ = [
- 'SingleRoIExtractor',
- 'GenericRoIExtractor',
-]
diff --git a/spaces/Shawn37/UTR_LM/esm/multihead_attention.py b/spaces/Shawn37/UTR_LM/esm/multihead_attention.py
deleted file mode 100644
index 9b0e156dd0d83420f891b7e83b0e6467955bf043..0000000000000000000000000000000000000000
--- a/spaces/Shawn37/UTR_LM/esm/multihead_attention.py
+++ /dev/null
@@ -1,506 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from typing import Dict, Optional, Tuple
-
-import torch
-import torch.nn.functional as F
-from torch import Tensor, nn
-from torch.nn import Parameter
-from esm.rotary_embedding import RotaryEmbedding
-
-import uuid
-
-
-def utils_softmax(x, dim: int, onnx_trace: bool = False):
- if onnx_trace:
- return F.softmax(x.float(), dim=dim)
- else:
- return F.softmax(x, dim=dim, dtype=torch.float32)
-
-
-class FairseqIncrementalState(object):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.init_incremental_state()
-
- def init_incremental_state(self):
- self._incremental_state_id = str(uuid.uuid4())
-
- def _get_full_incremental_state_key(self, key: str) -> str:
- return "{}.{}".format(self._incremental_state_id, key)
-
- def get_incremental_state(
- self,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
- key: str,
- ) -> Optional[Dict[str, Optional[Tensor]]]:
- """Helper for getting incremental state for an nn.Module."""
- full_key = self._get_full_incremental_state_key(key)
- if incremental_state is None or full_key not in incremental_state:
- return None
- return incremental_state[full_key]
-
- def set_incremental_state(
- self,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
- key: str,
- value: Dict[str, Optional[Tensor]],
- ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
- """Helper for setting incremental state for an nn.Module."""
- if incremental_state is not None:
- full_key = self._get_full_incremental_state_key(key)
- incremental_state[full_key] = value
- return incremental_state
-
-
-def with_incremental_state(cls):
- cls.__bases__ = (FairseqIncrementalState,) + tuple(
- b for b in cls.__bases__ if b != FairseqIncrementalState
- )
- return cls
-
-
-@with_incremental_state
-class MultiheadAttention(nn.Module):
- """Multi-headed attention.
- See "Attention Is All You Need" for more details.
- """
-
- def __init__(
- self,
- embed_dim,
- num_heads,
- kdim=None,
- vdim=None,
- dropout=0.0,
- bias=True,
- add_bias_kv: bool = False,
- add_zero_attn: bool = False,
- self_attention: bool = False,
- encoder_decoder_attention: bool = False,
- use_rotary_embeddings: bool = False,
- ):
- super().__init__()
- self.embed_dim = embed_dim
- self.kdim = kdim if kdim is not None else embed_dim
- self.vdim = vdim if vdim is not None else embed_dim
- self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
-
- self.num_heads = num_heads
- self.dropout = dropout
- self.head_dim = embed_dim // num_heads
- assert (
- self.head_dim * num_heads == self.embed_dim
- ), "embed_dim must be divisible by num_heads"
- self.scaling = self.head_dim**-0.5
-
- self.self_attention = self_attention
- self.encoder_decoder_attention = encoder_decoder_attention
-
- assert not self.self_attention or self.qkv_same_dim, (
- "Self-attention requires query, key and " "value to be of the same size"
- )
-
- self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
- self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
- self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
-
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
-
- if add_bias_kv:
- self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
- self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
- else:
- self.bias_k = self.bias_v = None
-
- self.add_zero_attn = add_zero_attn
-
- self.reset_parameters()
-
- self.onnx_trace = False
- self.rot_emb = None
- if use_rotary_embeddings:
- self.rot_emb = RotaryEmbedding(dim=self.head_dim)
-
- self.enable_torch_version = False
- if hasattr(F, "multi_head_attention_forward"):
- self.enable_torch_version = True
- else:
- self.enable_torch_version = False
-
- def prepare_for_onnx_export_(self):
- self.onnx_trace = True
-
- def reset_parameters(self):
- if self.qkv_same_dim:
- # Empirically observed the convergence to be much better with
- # the scaled initialization
- nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
- nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
- nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
- else:
- nn.init.xavier_uniform_(self.k_proj.weight)
- nn.init.xavier_uniform_(self.v_proj.weight)
- nn.init.xavier_uniform_(self.q_proj.weight)
-
- nn.init.xavier_uniform_(self.out_proj.weight)
- if self.out_proj.bias is not None:
- nn.init.constant_(self.out_proj.bias, 0.0)
- if self.bias_k is not None:
- nn.init.xavier_normal_(self.bias_k)
- if self.bias_v is not None:
- nn.init.xavier_normal_(self.bias_v)
-
- def forward(
- self,
- query,
- key: Optional[Tensor],
- value: Optional[Tensor],
- key_padding_mask: Optional[Tensor] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- need_weights: bool = True,
- static_kv: bool = False,
- attn_mask: Optional[Tensor] = None,
- before_softmax: bool = False,
- need_head_weights: bool = False,
- ) -> Tuple[Tensor, Optional[Tensor]]:
- """Input shape: Time x Batch x Channel
- Args:
- key_padding_mask (ByteTensor, optional): mask to exclude
- keys that are pads, of shape `(batch, src_len)`, where
- padding elements are indicated by 1s.
- need_weights (bool, optional): return the attention weights,
- averaged over heads (default: False).
- attn_mask (ByteTensor, optional): typically used to
- implement causal attention, where the mask prevents the
- attention from looking forward in time (default: None).
- before_softmax (bool, optional): return the raw attention
- weights and values before the attention softmax.
- need_head_weights (bool, optional): return the attention
- weights for each head. Implies *need_weights*. Default:
- return the average attention weights over all heads.
- """
- if need_head_weights:
- need_weights = True
-
- tgt_len, bsz, embed_dim = query.size()
- assert embed_dim == self.embed_dim
- assert list(query.size()) == [tgt_len, bsz, embed_dim]
-
- if (
- not self.rot_emb
- and self.enable_torch_version
- and not self.onnx_trace
- and incremental_state is None
- and not static_kv
- # A workaround for quantization to work. Otherwise JIT compilation
- # treats bias in linear module as method.
- and not torch.jit.is_scripting()
- and not need_head_weights
- ):
- assert key is not None and value is not None
- return F.multi_head_attention_forward(
- query,
- key,
- value,
- self.embed_dim,
- self.num_heads,
- torch.empty([0]),
- torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
- self.bias_k,
- self.bias_v,
- self.add_zero_attn,
- self.dropout,
- self.out_proj.weight,
- self.out_proj.bias,
- self.training,
- key_padding_mask,
- need_weights,
- attn_mask,
- use_separate_proj_weight=True,
- q_proj_weight=self.q_proj.weight,
- k_proj_weight=self.k_proj.weight,
- v_proj_weight=self.v_proj.weight,
- )
- if incremental_state is not None:
- saved_state = self._get_input_buffer(incremental_state)
- if saved_state is not None and "prev_key" in saved_state:
- # previous time steps are cached - no need to recompute
- # key and value if they are static
- if static_kv:
- assert self.encoder_decoder_attention and not self.self_attention
- key = value = None
- else:
- saved_state = None
-
- if self.self_attention:
- q = self.q_proj(query)
- k = self.k_proj(query)
- v = self.v_proj(query)
- elif self.encoder_decoder_attention:
- # encoder-decoder attention
- q = self.q_proj(query)
- if key is None:
- assert value is None
- k = v = None
- else:
- k = self.k_proj(key)
- v = self.v_proj(key)
-
- else:
- assert key is not None and value is not None
- q = self.q_proj(query)
- k = self.k_proj(key)
- v = self.v_proj(value)
- q *= self.scaling
-
- if self.bias_k is not None:
- assert self.bias_v is not None
- k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
- v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
- if attn_mask is not None:
- attn_mask = torch.cat(
- [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
- )
- if key_padding_mask is not None:
- key_padding_mask = torch.cat(
- [
- key_padding_mask,
- key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
- ],
- dim=1,
- )
-
- q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
- if k is not None:
- k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
- if v is not None:
- v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
-
- if saved_state is not None:
- # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
- if "prev_key" in saved_state:
- _prev_key = saved_state["prev_key"]
- assert _prev_key is not None
- prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
- if static_kv:
- k = prev_key
- else:
- assert k is not None
- k = torch.cat([prev_key, k], dim=1)
- if "prev_value" in saved_state:
- _prev_value = saved_state["prev_value"]
- assert _prev_value is not None
- prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
- if static_kv:
- v = prev_value
- else:
- assert v is not None
- v = torch.cat([prev_value, v], dim=1)
- prev_key_padding_mask: Optional[Tensor] = None
- if "prev_key_padding_mask" in saved_state:
- prev_key_padding_mask = saved_state["prev_key_padding_mask"]
- assert k is not None and v is not None
- key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
- key_padding_mask=key_padding_mask,
- prev_key_padding_mask=prev_key_padding_mask,
- batch_size=bsz,
- src_len=k.size(1),
- static_kv=static_kv,
- )
-
- saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
- saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
- saved_state["prev_key_padding_mask"] = key_padding_mask
- # In this branch incremental_state is never None
- assert incremental_state is not None
- incremental_state = self._set_input_buffer(incremental_state, saved_state)
- assert k is not None
- src_len = k.size(1)
-
- # This is part of a workaround to get around fork/join parallelism
- # not supporting Optional types.
- if key_padding_mask is not None and key_padding_mask.dim() == 0:
- key_padding_mask = None
-
- if key_padding_mask is not None:
- assert key_padding_mask.size(0) == bsz
- assert key_padding_mask.size(1) == src_len
-
- if self.add_zero_attn:
- assert v is not None
- src_len += 1
- k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
- v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
- if attn_mask is not None:
- attn_mask = torch.cat(
- [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
- )
- if key_padding_mask is not None:
- key_padding_mask = torch.cat(
- [
- key_padding_mask,
- torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask),
- ],
- dim=1,
- )
-
- if self.rot_emb:
- q, k = self.rot_emb(q, k)
-
- attn_weights = torch.bmm(q, k.transpose(1, 2))
- attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
-
- assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
-
- if attn_mask is not None:
- attn_mask = attn_mask.unsqueeze(0)
- if self.onnx_trace:
- attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
- attn_weights += attn_mask
-
- if key_padding_mask is not None:
- # don't attend to padding symbols
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
- attn_weights = attn_weights.masked_fill(
- key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
- )
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
-
- if before_softmax:
- return attn_weights, v
-
- attn_weights_float = utils_softmax(attn_weights, dim=-1, onnx_trace=self.onnx_trace)
- attn_weights = attn_weights_float.type_as(attn_weights)
- attn_probs = F.dropout(
- attn_weights_float.type_as(attn_weights),
- p=self.dropout,
- training=self.training,
- )
- assert v is not None
- attn = torch.bmm(attn_probs, v)
- assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
- if self.onnx_trace and attn.size(1) == 1:
- # when ONNX tracing a single decoder step (sequence length == 1)
- # the transpose is a no-op copy before view, thus unnecessary
- attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
- else:
- attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
- attn = self.out_proj(attn)
- attn_weights: Optional[Tensor] = None
- if need_weights:
- attn_weights = attn_weights_float.view(
- bsz, self.num_heads, tgt_len, src_len
- ).type_as(attn).transpose(1, 0)
- if not need_head_weights:
- # average attention weights over heads
- attn_weights = attn_weights.mean(dim=0)
-
- return attn, attn_weights
-
- @staticmethod
- def _append_prev_key_padding_mask(
- key_padding_mask: Optional[Tensor],
- prev_key_padding_mask: Optional[Tensor],
- batch_size: int,
- src_len: int,
- static_kv: bool,
- ) -> Optional[Tensor]:
- # saved key padding masks have shape (bsz, seq_len)
- if prev_key_padding_mask is not None and static_kv:
- new_key_padding_mask = prev_key_padding_mask
- elif prev_key_padding_mask is not None and key_padding_mask is not None:
- new_key_padding_mask = torch.cat(
- [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
- )
- # During incremental decoding, as the padding token enters and
- # leaves the frame, there will be a time when prev or current
- # is None
- elif prev_key_padding_mask is not None:
- filler = torch.zeros(
- (batch_size, src_len - prev_key_padding_mask.size(1)),
- device=prev_key_padding_mask.device,
- )
- new_key_padding_mask = torch.cat(
- [prev_key_padding_mask.float(), filler.float()], dim=1
- )
- elif key_padding_mask is not None:
- filler = torch.zeros(
- (batch_size, src_len - key_padding_mask.size(1)),
- device=key_padding_mask.device,
- )
- new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)
- else:
- new_key_padding_mask = prev_key_padding_mask
- return new_key_padding_mask
-
- @torch.jit.export
- def reorder_incremental_state(
- self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor
- ):
- """Reorder buffered internal state (for incremental generation)."""
- input_buffer = self._get_input_buffer(incremental_state)
- if input_buffer is not None:
- for k in input_buffer.keys():
- input_buffer_k = input_buffer[k]
- if input_buffer_k is not None:
- if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(
- 0
- ):
- break
- input_buffer[k] = input_buffer_k.index_select(0, new_order)
- incremental_state = self._set_input_buffer(incremental_state, input_buffer)
- return incremental_state
-
- def _get_input_buffer(
- self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
- ) -> Dict[str, Optional[Tensor]]:
- result = self.get_incremental_state(incremental_state, "attn_state")
- if result is not None:
- return result
- else:
- empty_result: Dict[str, Optional[Tensor]] = {}
- return empty_result
-
- def _set_input_buffer(
- self,
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
- buffer: Dict[str, Optional[Tensor]],
- ):
- return self.set_incremental_state(incremental_state, "attn_state", buffer)
-
- def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
- return attn_weights
-
- def upgrade_state_dict_named(self, state_dict, name):
- prefix = name + "." if name != "" else ""
- items_to_add = {}
- keys_to_remove = []
- for k in state_dict.keys():
- if k.endswith(prefix + "in_proj_weight"):
- # in_proj_weight used to be q + k + v with same dimensions
- dim = int(state_dict[k].shape[0] / 3)
- items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
- items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
- items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
-
- keys_to_remove.append(k)
-
- k_bias = prefix + "in_proj_bias"
- if k_bias in state_dict.keys():
- dim = int(state_dict[k].shape[0] / 3)
- items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
- items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][dim : 2 * dim]
- items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
-
- keys_to_remove.append(prefix + "in_proj_bias")
-
- for k in keys_to_remove:
- del state_dict[k]
-
- for key, value in items_to_add.items():
- state_dict[key] = value
\ No newline at end of file
diff --git a/spaces/Sloth-Alchemist/Test.xyz/README.md b/spaces/Sloth-Alchemist/Test.xyz/README.md
deleted file mode 100644
index 5f7380617f352d55f042c1b5997d90ddc64a85b4..0000000000000000000000000000000000000000
--- a/spaces/Sloth-Alchemist/Test.xyz/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Test.xyz
-emoji: 🐨
-colorFrom: green
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.22.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/SpacesExamples/llama-cpp-python-cuda-gradio/app.py b/spaces/SpacesExamples/llama-cpp-python-cuda-gradio/app.py
deleted file mode 100644
index 2eb9834b563af44eb5ee9f32178a8b16ae9e055e..0000000000000000000000000000000000000000
--- a/spaces/SpacesExamples/llama-cpp-python-cuda-gradio/app.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import os
-import gradio as gr
-import copy
-import time
-import llama_cpp
-from llama_cpp import Llama
-from huggingface_hub import hf_hub_download
-
-
-llm = Llama(
- model_path=hf_hub_download(
- repo_id=os.environ.get("REPO_ID", "TheBloke/Llama-2-7b-Chat-GGUF"),
- filename=os.environ.get("MODEL_FILE", "llama-2-7b-chat.Q5_0.gguf"),
- ),
- n_ctx=2048,
- n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
-)
-
-history = []
-
-system_message = """
-You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
-
-If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
-"""
-
-
-def generate_text(message, history):
- temp = ""
- input_prompt = f"[INST] <>\n{system_message}\n< >\n\n "
- for interaction in history:
- input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " [INST] "
-
- input_prompt = input_prompt + str(message) + " [/INST] "
-
- output = llm(
- input_prompt,
- temperature=0.15,
- top_p=0.1,
- top_k=40,
- repeat_penalty=1.1,
- max_tokens=1024,
- stop=[
- "<|prompter|>",
- "<|endoftext|>",
- "<|endoftext|> \n",
- "ASSISTANT:",
- "USER:",
- "SYSTEM:",
- ],
- stream=True,
- )
- for out in output:
- stream = copy.deepcopy(out)
- temp += stream["choices"][0]["text"]
- yield temp
-
- history = ["init", input_prompt]
-
-
-demo = gr.ChatInterface(
- generate_text,
- title="llama-cpp-python on GPU",
- description="Running LLM with https://github.com/abetlen/llama-cpp-python",
- examples=["tell me everything about llamas"],
- cache_examples=True,
- retry_btn=None,
- undo_btn="Delete Previous",
- clear_btn="Clear",
-)
-demo.queue(concurrency_count=1, max_size=5)
-demo.launch()
\ No newline at end of file
diff --git a/spaces/StephanST/WALDOonline/app.py b/spaces/StephanST/WALDOonline/app.py
deleted file mode 100644
index 591a0a56343f6ce10ecf4d2a0ce288055095013b..0000000000000000000000000000000000000000
--- a/spaces/StephanST/WALDOonline/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import streamlit as st
-import cv2
-import io
-from PIL import Image
-import base64
-from run_local_onnx_largeinput_tiled_process import process_large_image
-import numpy as np
-import re
-import datetime
-
-
-def get_resolution_from_model_path(model_path):
- resolution = re.search(r"(\d+)px", model_path)
- if resolution:
- return int(resolution.group(1))
- return None
-
-
-
-def get_image_download_link(img, filename, text):
- buffered = io.BytesIO()
- img.save(buffered, format="JPEG")
- img_str = base64.b64encode(buffered.getvalue()).decode()
- href = f'{text}'
- return href
-
-def main():
- st.title("Use W.A.L.D.O. online!")
- st.write("Upload an image to process with WALDO neural network and get the output image and count of objects!")
- st.write("If you don't know WALDO, check it out here: https://github.com/stephansturges/WALDO ")
- st.write("All of the AI models used here are available for free in the WALDO main repository.")
- st.write("You can input an image of any resolution, the system will tile it down to the input size of the neural network to process and then tile it back together.")
- st.write("If you wand to test performance for embedded models --> **resize your input image to the resolution of the DNN** and simulate the field of view of your real-world camera. Otherwise you loose context on many of the detections since they end up on the \"seems\" of the tiled image, which leads to worse results.")
- st.write("Bear in mind that the system is running on very low end hardware at the moment, on a GPU system running locally it could be 1000x faster.")
- st.write("More AI models will be added as they are trained ;-)")
-
- # Add a list of available models and their respective paths
- models = {
- "Huuuge model: Yolov7-e6e, 12 class, 960px resolution": "20230501_yolov7e6e_12class_960px.onnx",
- "Deep model / 640px: Yolov7-e6e, 12 class, 640 px rez": "20230509_yolov7-e6e_12class_640px_noms_adam_34b.onnx",
- "Big model: YoloV7x, 12 class, 960px resolution": "20230428_yolov7x_12class_960px_noms.onnx",
- "Medium / base model: YoloV7, 12 class, 960px resolution": "20230420_yolov7_12class_960px_noms.onnx",
- "Alternative Medium / base model : YoloV7, 12 class, 960px resolution": "20230502_yolov7_12class_960px_noms_adam_30b.onnx",
- "Half resolution / small depth model / multiscale : YoloV7-tiny, 12 class, 480px resolution" : "20230502_yolov7-tiny_MS_480px.onnx",
- "Tiny resolution / fulĺ depth model: YoloV7, 12 class, 320px resolution (this model is for running embedded on UAV)": "20230424_yolov7_12class_320px_noms.onnx",
- "Tiny depth / fulĺ resolution model: YoloV7-tiny, 12 class, 960px resolution (this model is for running embedded on UAV)": "20230502_yolov7-tiny_12class_960px_noms_adam_80b.onnx",
- "Payton's Panel Prospector": "20230520_yolov7_832px_ppp.onnx"
- }
-
- # Create a Streamlit radio button to select the desired model
- selected_model = st.radio("Select a model", list(models.keys()))
- st.write("More models coming soon...")
- uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
-
-
- if uploaded_file is not None:
- input_image = Image.open(uploaded_file)
- st.image(input_image, caption="Input Image", use_column_width=True)
-
- # Check if the input image resolution matches the model's resolution
- model_resolution = get_resolution_from_model_path(models[selected_model])
- input_resolution = input_image.size
- if input_resolution != (model_resolution, model_resolution):
- st.warning(
- f"Warning: The input image resolution ({input_resolution[0]}x{input_resolution[1]}) "
- f"does not match the model's resolution ({model_resolution}x{model_resolution}). "
- "Performance of detection may be lower due to tiling the input image."
- )
-
- if st.button("Process Image"):
- # Convert PIL Image to OpenCV format (if required by your process_image function)
- input_image_cv = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
-
- # Process the input image, passing the selected model's path
- output_image_cv, console_msg = process_large_image(input_image_cv, models[selected_model])
-
- # Convert the output image back to PIL format for download
- output_image = Image.fromarray(cv2.cvtColor(output_image_cv, cv2.COLOR_BGR2RGB))
-
- # Display the console message and image
- st.image(output_image, caption="Output Image", use_column_width=True)
- st.write(f"Instances of detections : {console_msg}")
-
- # Get the current time
- now = datetime.datetime.now()
-
- # Format the time as a string
- formatted_time = now.strftime("%Y-%m-%d %H:%M:%S")
-
- # Print the formatted time
- print("The current time is:", formatted_time)
-
- # Add a download link for the output image
- st.markdown(get_image_download_link(output_image, "output_image.jpg", "Download Output Image"), unsafe_allow_html=True)
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Sumit7864/Image-Enhancer/cog_predict.py b/spaces/Sumit7864/Image-Enhancer/cog_predict.py
deleted file mode 100644
index fa0f89dfda8e3ff14afd7b3b8544f04d86e96562..0000000000000000000000000000000000000000
--- a/spaces/Sumit7864/Image-Enhancer/cog_predict.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# flake8: noqa
-# This file is used for deploying replicate models
-# running: cog predict -i img=@inputs/00017_gray.png -i version='General - v3' -i scale=2 -i face_enhance=True -i tile=0
-# push: cog push r8.im/xinntao/realesrgan
-
-import os
-
-os.system('pip install gfpgan')
-os.system('python setup.py develop')
-
-import cv2
-import shutil
-import tempfile
-import torch
-from basicsr.archs.rrdbnet_arch import RRDBNet
-from basicsr.archs.srvgg_arch import SRVGGNetCompact
-
-from realesrgan.utils import RealESRGANer
-
-try:
- from cog import BasePredictor, Input, Path
- from gfpgan import GFPGANer
-except Exception:
- print('please install cog and realesrgan package')
-
-
-class Predictor(BasePredictor):
-
- def setup(self):
- os.makedirs('output', exist_ok=True)
- # download weights
- if not os.path.exists('weights/realesr-general-x4v3.pth'):
- os.system(
- 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./weights'
- )
- if not os.path.exists('weights/GFPGANv1.4.pth'):
- os.system('wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./weights')
- if not os.path.exists('weights/RealESRGAN_x4plus.pth'):
- os.system(
- 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./weights'
- )
- if not os.path.exists('weights/RealESRGAN_x4plus_anime_6B.pth'):
- os.system(
- 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P ./weights'
- )
- if not os.path.exists('weights/realesr-animevideov3.pth'):
- os.system(
- 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth -P ./weights'
- )
-
- def choose_model(self, scale, version, tile=0):
- half = True if torch.cuda.is_available() else False
- if version == 'General - RealESRGANplus':
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- model_path = 'weights/RealESRGAN_x4plus.pth'
- self.upsampler = RealESRGANer(
- scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
- elif version == 'General - v3':
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
- model_path = 'weights/realesr-general-x4v3.pth'
- self.upsampler = RealESRGANer(
- scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
- elif version == 'Anime - anime6B':
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
- model_path = 'weights/RealESRGAN_x4plus_anime_6B.pth'
- self.upsampler = RealESRGANer(
- scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
- elif version == 'AnimeVideo - v3':
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
- model_path = 'weights/realesr-animevideov3.pth'
- self.upsampler = RealESRGANer(
- scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
-
- self.face_enhancer = GFPGANer(
- model_path='weights/GFPGANv1.4.pth',
- upscale=scale,
- arch='clean',
- channel_multiplier=2,
- bg_upsampler=self.upsampler)
-
- def predict(
- self,
- img: Path = Input(description='Input'),
- version: str = Input(
- description='RealESRGAN version. Please see [Readme] below for more descriptions',
- choices=['General - RealESRGANplus', 'General - v3', 'Anime - anime6B', 'AnimeVideo - v3'],
- default='General - v3'),
- scale: float = Input(description='Rescaling factor', default=2),
- face_enhance: bool = Input(
- description='Enhance faces with GFPGAN. Note that it does not work for anime images/vidoes', default=False),
- tile: int = Input(
- description=
- 'Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200',
- default=0)
- ) -> Path:
- if tile <= 100 or tile is None:
- tile = 0
- print(f'img: {img}. version: {version}. scale: {scale}. face_enhance: {face_enhance}. tile: {tile}.')
- try:
- extension = os.path.splitext(os.path.basename(str(img)))[1]
- img = cv2.imread(str(img), cv2.IMREAD_UNCHANGED)
- if len(img.shape) == 3 and img.shape[2] == 4:
- img_mode = 'RGBA'
- elif len(img.shape) == 2:
- img_mode = None
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
- else:
- img_mode = None
-
- h, w = img.shape[0:2]
- if h < 300:
- img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
-
- self.choose_model(scale, version, tile)
-
- try:
- if face_enhance:
- _, _, output = self.face_enhancer.enhance(
- img, has_aligned=False, only_center_face=False, paste_back=True)
- else:
- output, _ = self.upsampler.enhance(img, outscale=scale)
- except RuntimeError as error:
- print('Error', error)
- print('If you encounter CUDA out of memory, try to set "tile" to a smaller size, e.g., 400.')
-
- if img_mode == 'RGBA': # RGBA images should be saved in png format
- extension = 'png'
- # save_path = f'output/out.{extension}'
- # cv2.imwrite(save_path, output)
- out_path = Path(tempfile.mkdtemp()) / f'out.{extension}'
- cv2.imwrite(str(out_path), output)
- except Exception as error:
- print('global exception: ', error)
- finally:
- clean_folder('output')
- return out_path
-
-
-def clean_folder(folder):
- for filename in os.listdir(folder):
- file_path = os.path.join(folder, filename)
- try:
- if os.path.isfile(file_path) or os.path.islink(file_path):
- os.unlink(file_path)
- elif os.path.isdir(file_path):
- shutil.rmtree(file_path)
- except Exception as e:
- print(f'Failed to delete {file_path}. Reason: {e}')
diff --git a/spaces/Suniilkumaar/MusicGen-updated/audiocraft/data/audio_utils.py b/spaces/Suniilkumaar/MusicGen-updated/audiocraft/data/audio_utils.py
deleted file mode 100644
index 76d4bc2a33ce722d879db2af33cd1336bd6b1fb3..0000000000000000000000000000000000000000
--- a/spaces/Suniilkumaar/MusicGen-updated/audiocraft/data/audio_utils.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import sys
-import typing as tp
-
-import julius
-import torch
-import torchaudio
-
-
-def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor:
- """Convert audio to the given number of channels.
-
- Args:
- wav (torch.Tensor): Audio wave of shape [B, C, T].
- channels (int): Expected number of channels as output.
- Returns:
- torch.Tensor: Downmixed or unchanged audio wave [B, C, T].
- """
- *shape, src_channels, length = wav.shape
- if src_channels == channels:
- pass
- elif channels == 1:
- # Case 1:
- # The caller asked 1-channel audio, and the stream has multiple
- # channels, downmix all channels.
- wav = wav.mean(dim=-2, keepdim=True)
- elif src_channels == 1:
- # Case 2:
- # The caller asked for multiple channels, but the input file has
- # a single channel, replicate the audio over all channels.
- wav = wav.expand(*shape, channels, length)
- elif src_channels >= channels:
- # Case 3:
- # The caller asked for multiple channels, and the input file has
- # more channels than requested. In that case return the first channels.
- wav = wav[..., :channels, :]
- else:
- # Case 4: What is a reasonable choice here?
- raise ValueError('The audio file has less channels than requested but is not mono.')
- return wav
-
-
-def convert_audio(wav: torch.Tensor, from_rate: float,
- to_rate: float, to_channels: int) -> torch.Tensor:
- """Convert audio to new sample rate and number of audio channels.
- """
- wav = julius.resample_frac(wav, int(from_rate), int(to_rate))
- wav = convert_audio_channels(wav, to_channels)
- return wav
-
-
-def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14,
- loudness_compressor: bool = False, energy_floor: float = 2e-3):
- """Normalize an input signal to a user loudness in dB LKFS.
- Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
-
- Args:
- wav (torch.Tensor): Input multichannel audio data.
- sample_rate (int): Sample rate.
- loudness_headroom_db (float): Target loudness of the output in dB LUFS.
- loudness_compressor (bool): Uses tanh for soft clipping.
- energy_floor (float): anything below that RMS level will not be rescaled.
- Returns:
- output (torch.Tensor): Loudness normalized output data.
- """
- energy = wav.pow(2).mean().sqrt().item()
- if energy < energy_floor:
- return wav
- transform = torchaudio.transforms.Loudness(sample_rate)
- input_loudness_db = transform(wav).item()
- # calculate the gain needed to scale to the desired loudness level
- delta_loudness = -loudness_headroom_db - input_loudness_db
- gain = 10.0 ** (delta_loudness / 20.0)
- output = gain * wav
- if loudness_compressor:
- output = torch.tanh(output)
- assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt())
- return output
-
-
-def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None:
- """Utility function to clip the audio with logging if specified."""
- max_scale = wav.abs().max()
- if log_clipping and max_scale > 1:
- clamp_prob = (wav.abs() > 1).float().mean().item()
- print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):",
- clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr)
- wav.clamp_(-1, 1)
-
-
-def normalize_audio(wav: torch.Tensor, normalize: bool = True,
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
- loudness_compressor: bool = False, log_clipping: bool = False,
- sample_rate: tp.Optional[int] = None,
- stem_name: tp.Optional[str] = None) -> torch.Tensor:
- """Normalize the audio according to the prescribed strategy (see after).
-
- Args:
- wav (torch.Tensor): Audio data.
- normalize (bool): if `True` (default), normalizes according to the prescribed
- strategy (see after). If `False`, the strategy is only used in case clipping
- would happen.
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
- with extra headroom to avoid clipping. 'clip' just clips.
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
- than the `peak_clip` one to avoid further clipping.
- loudness_headroom_db (float): Target loudness for loudness normalization.
- loudness_compressor (bool): If True, uses tanh based soft clipping.
- log_clipping (bool): If True, basic logging on stderr when clipping still
- occurs despite strategy (only for 'rms').
- sample_rate (int): Sample rate for the audio data (required for loudness).
- stem_name (Optional[str]): Stem name for clipping logging.
- Returns:
- torch.Tensor: Normalized audio.
- """
- scale_peak = 10 ** (-peak_clip_headroom_db / 20)
- scale_rms = 10 ** (-rms_headroom_db / 20)
- if strategy == 'peak':
- rescaling = (scale_peak / wav.abs().max())
- if normalize or rescaling < 1:
- wav = wav * rescaling
- elif strategy == 'clip':
- wav = wav.clamp(-scale_peak, scale_peak)
- elif strategy == 'rms':
- mono = wav.mean(dim=0)
- rescaling = scale_rms / mono.pow(2).mean().sqrt()
- if normalize or rescaling < 1:
- wav = wav * rescaling
- _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
- elif strategy == 'loudness':
- assert sample_rate is not None, "Loudness normalization requires sample rate."
- wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor)
- _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
- else:
- assert wav.abs().max() < 1
- assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'"
- return wav
-
-
-def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
- """Convert audio to float 32 bits PCM format.
- """
- if wav.dtype.is_floating_point:
- return wav
- else:
- assert wav.dtype == torch.int16
- return wav.float() / 2**15
-
-
-def i16_pcm(wav: torch.Tensor) -> torch.Tensor:
- """Convert audio to int 16 bits PCM format.
-
- ..Warning:: There exist many formula for doing this convertion. None are perfect
- due to the asymetry of the int16 range. One either have possible clipping, DC offset,
- or inconsistancies with f32_pcm. If the given wav doesn't have enough headroom,
- it is possible that `i16_pcm(f32_pcm)) != Identity`.
- """
- if wav.dtype.is_floating_point:
- assert wav.abs().max() <= 1
- candidate = (wav * 2 ** 15).round()
- if candidate.max() >= 2 ** 15: # clipping would occur
- candidate = (wav * (2 ** 15 - 1)).round()
- return candidate.short()
- else:
- assert wav.dtype == torch.int16
- return wav
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/box_regression.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/box_regression.py
deleted file mode 100644
index 3cd5668d9a72edd34df4f458f90ac72553abb955..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/box_regression.py
+++ /dev/null
@@ -1,369 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import math
-from typing import List, Tuple, Union
-import torch
-from fvcore.nn import giou_loss, smooth_l1_loss
-from torch.nn import functional as F
-
-from annotator.oneformer.detectron2.layers import cat, ciou_loss, diou_loss
-from annotator.oneformer.detectron2.structures import Boxes
-
-# Value for clamping large dw and dh predictions. The heuristic is that we clamp
-# such that dw and dh are no larger than what would transform a 16px box into a
-# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
-_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
-
-
-__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated", "Box2BoxTransformLinear"]
-
-
-@torch.jit.script
-class Box2BoxTransform(object):
- """
- The box-to-box transform defined in R-CNN. The transformation is parameterized
- by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
- by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
- """
-
- def __init__(
- self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP
- ):
- """
- Args:
- weights (4-element tuple): Scaling factors that are applied to the
- (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
- such that the deltas have unit variance; now they are treated as
- hyperparameters of the system.
- scale_clamp (float): When predicting deltas, the predicted box scaling
- factors (dw and dh) are clamped such that they are <= scale_clamp.
- """
- self.weights = weights
- self.scale_clamp = scale_clamp
-
- def get_deltas(self, src_boxes, target_boxes):
- """
- Get box regression transformation deltas (dx, dy, dw, dh) that can be used
- to transform the `src_boxes` into the `target_boxes`. That is, the relation
- ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
- any delta is too large and is clamped).
-
- Args:
- src_boxes (Tensor): source boxes, e.g., object proposals
- target_boxes (Tensor): target of the transformation, e.g., ground-truth
- boxes.
- """
- assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
- assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
-
- src_widths = src_boxes[:, 2] - src_boxes[:, 0]
- src_heights = src_boxes[:, 3] - src_boxes[:, 1]
- src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
- src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
-
- target_widths = target_boxes[:, 2] - target_boxes[:, 0]
- target_heights = target_boxes[:, 3] - target_boxes[:, 1]
- target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
- target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
-
- wx, wy, ww, wh = self.weights
- dx = wx * (target_ctr_x - src_ctr_x) / src_widths
- dy = wy * (target_ctr_y - src_ctr_y) / src_heights
- dw = ww * torch.log(target_widths / src_widths)
- dh = wh * torch.log(target_heights / src_heights)
-
- deltas = torch.stack((dx, dy, dw, dh), dim=1)
- assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
- return deltas
-
- def apply_deltas(self, deltas, boxes):
- """
- Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
-
- Args:
- deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
- deltas[i] represents k potentially different class-specific
- box transformations for the single box boxes[i].
- boxes (Tensor): boxes to transform, of shape (N, 4)
- """
- deltas = deltas.float() # ensure fp32 for decoding precision
- boxes = boxes.to(deltas.dtype)
-
- widths = boxes[:, 2] - boxes[:, 0]
- heights = boxes[:, 3] - boxes[:, 1]
- ctr_x = boxes[:, 0] + 0.5 * widths
- ctr_y = boxes[:, 1] + 0.5 * heights
-
- wx, wy, ww, wh = self.weights
- dx = deltas[:, 0::4] / wx
- dy = deltas[:, 1::4] / wy
- dw = deltas[:, 2::4] / ww
- dh = deltas[:, 3::4] / wh
-
- # Prevent sending too large values into torch.exp()
- dw = torch.clamp(dw, max=self.scale_clamp)
- dh = torch.clamp(dh, max=self.scale_clamp)
-
- pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
- pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
- pred_w = torch.exp(dw) * widths[:, None]
- pred_h = torch.exp(dh) * heights[:, None]
-
- x1 = pred_ctr_x - 0.5 * pred_w
- y1 = pred_ctr_y - 0.5 * pred_h
- x2 = pred_ctr_x + 0.5 * pred_w
- y2 = pred_ctr_y + 0.5 * pred_h
- pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)
- return pred_boxes.reshape(deltas.shape)
-
-
-@torch.jit.script
-class Box2BoxTransformRotated(object):
- """
- The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized
- by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height
- by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height),
- and rotate a box's angle by da (radians).
- Note: angles of deltas are in radians while angles of boxes are in degrees.
- """
-
- def __init__(
- self,
- weights: Tuple[float, float, float, float, float],
- scale_clamp: float = _DEFAULT_SCALE_CLAMP,
- ):
- """
- Args:
- weights (5-element tuple): Scaling factors that are applied to the
- (dx, dy, dw, dh, da) deltas. These are treated as
- hyperparameters of the system.
- scale_clamp (float): When predicting deltas, the predicted box scaling
- factors (dw and dh) are clamped such that they are <= scale_clamp.
- """
- self.weights = weights
- self.scale_clamp = scale_clamp
-
- def get_deltas(self, src_boxes, target_boxes):
- """
- Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used
- to transform the `src_boxes` into the `target_boxes`. That is, the relation
- ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
- any delta is too large and is clamped).
-
- Args:
- src_boxes (Tensor): Nx5 source boxes, e.g., object proposals
- target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth
- boxes.
- """
- assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
- assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
-
- src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1)
-
- target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind(
- target_boxes, dim=1
- )
-
- wx, wy, ww, wh, wa = self.weights
- dx = wx * (target_ctr_x - src_ctr_x) / src_widths
- dy = wy * (target_ctr_y - src_ctr_y) / src_heights
- dw = ww * torch.log(target_widths / src_widths)
- dh = wh * torch.log(target_heights / src_heights)
- # Angles of deltas are in radians while angles of boxes are in degrees.
- # the conversion to radians serve as a way to normalize the values
- da = target_angles - src_angles
- da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
- da *= wa * math.pi / 180.0
-
- deltas = torch.stack((dx, dy, dw, dh, da), dim=1)
- assert (
- (src_widths > 0).all().item()
- ), "Input boxes to Box2BoxTransformRotated are not valid!"
- return deltas
-
- def apply_deltas(self, deltas, boxes):
- """
- Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`.
-
- Args:
- deltas (Tensor): transformation deltas of shape (N, k*5).
- deltas[i] represents box transformation for the single box boxes[i].
- boxes (Tensor): boxes to transform, of shape (N, 5)
- """
- assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5
-
- boxes = boxes.to(deltas.dtype).unsqueeze(2)
-
- ctr_x = boxes[:, 0]
- ctr_y = boxes[:, 1]
- widths = boxes[:, 2]
- heights = boxes[:, 3]
- angles = boxes[:, 4]
-
- wx, wy, ww, wh, wa = self.weights
-
- dx = deltas[:, 0::5] / wx
- dy = deltas[:, 1::5] / wy
- dw = deltas[:, 2::5] / ww
- dh = deltas[:, 3::5] / wh
- da = deltas[:, 4::5] / wa
-
- # Prevent sending too large values into torch.exp()
- dw = torch.clamp(dw, max=self.scale_clamp)
- dh = torch.clamp(dh, max=self.scale_clamp)
-
- pred_boxes = torch.zeros_like(deltas)
- pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr
- pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr
- pred_boxes[:, 2::5] = torch.exp(dw) * widths # width
- pred_boxes[:, 3::5] = torch.exp(dh) * heights # height
-
- # Following original RRPN implementation,
- # angles of deltas are in radians while angles of boxes are in degrees.
- pred_angle = da * 180.0 / math.pi + angles
- pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180)
-
- pred_boxes[:, 4::5] = pred_angle
-
- return pred_boxes
-
-
-class Box2BoxTransformLinear(object):
- """
- The linear box-to-box transform defined in FCOS. The transformation is parameterized
- by the distance from the center of (square) src box to 4 edges of the target box.
- """
-
- def __init__(self, normalize_by_size=True):
- """
- Args:
- normalize_by_size: normalize deltas by the size of src (anchor) boxes.
- """
- self.normalize_by_size = normalize_by_size
-
- def get_deltas(self, src_boxes, target_boxes):
- """
- Get box regression transformation deltas (dx1, dy1, dx2, dy2) that can be used
- to transform the `src_boxes` into the `target_boxes`. That is, the relation
- ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true.
- The center of src must be inside target boxes.
-
- Args:
- src_boxes (Tensor): square source boxes, e.g., anchors
- target_boxes (Tensor): target of the transformation, e.g., ground-truth
- boxes.
- """
- assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
- assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
-
- src_ctr_x = 0.5 * (src_boxes[:, 0] + src_boxes[:, 2])
- src_ctr_y = 0.5 * (src_boxes[:, 1] + src_boxes[:, 3])
-
- target_l = src_ctr_x - target_boxes[:, 0]
- target_t = src_ctr_y - target_boxes[:, 1]
- target_r = target_boxes[:, 2] - src_ctr_x
- target_b = target_boxes[:, 3] - src_ctr_y
-
- deltas = torch.stack((target_l, target_t, target_r, target_b), dim=1)
- if self.normalize_by_size:
- stride_w = src_boxes[:, 2] - src_boxes[:, 0]
- stride_h = src_boxes[:, 3] - src_boxes[:, 1]
- strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)
- deltas = deltas / strides
-
- return deltas
-
- def apply_deltas(self, deltas, boxes):
- """
- Apply transformation `deltas` (dx1, dy1, dx2, dy2) to `boxes`.
-
- Args:
- deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
- deltas[i] represents k potentially different class-specific
- box transformations for the single box boxes[i].
- boxes (Tensor): boxes to transform, of shape (N, 4)
- """
- # Ensure the output is a valid box. See Sec 2.1 of https://arxiv.org/abs/2006.09214
- deltas = F.relu(deltas)
- boxes = boxes.to(deltas.dtype)
-
- ctr_x = 0.5 * (boxes[:, 0] + boxes[:, 2])
- ctr_y = 0.5 * (boxes[:, 1] + boxes[:, 3])
- if self.normalize_by_size:
- stride_w = boxes[:, 2] - boxes[:, 0]
- stride_h = boxes[:, 3] - boxes[:, 1]
- strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1)
- deltas = deltas * strides
-
- l = deltas[:, 0::4]
- t = deltas[:, 1::4]
- r = deltas[:, 2::4]
- b = deltas[:, 3::4]
-
- pred_boxes = torch.zeros_like(deltas)
- pred_boxes[:, 0::4] = ctr_x[:, None] - l # x1
- pred_boxes[:, 1::4] = ctr_y[:, None] - t # y1
- pred_boxes[:, 2::4] = ctr_x[:, None] + r # x2
- pred_boxes[:, 3::4] = ctr_y[:, None] + b # y2
- return pred_boxes
-
-
-def _dense_box_regression_loss(
- anchors: List[Union[Boxes, torch.Tensor]],
- box2box_transform: Box2BoxTransform,
- pred_anchor_deltas: List[torch.Tensor],
- gt_boxes: List[torch.Tensor],
- fg_mask: torch.Tensor,
- box_reg_loss_type="smooth_l1",
- smooth_l1_beta=0.0,
-):
- """
- Compute loss for dense multi-level box regression.
- Loss is accumulated over ``fg_mask``.
-
- Args:
- anchors: #lvl anchor boxes, each is (HixWixA, 4)
- pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)
- gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))
- fg_mask: the foreground boolean mask of shape (N, R) to compute loss on
- box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou",
- "diou", "ciou".
- smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to
- use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
- """
- if isinstance(anchors[0], Boxes):
- anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
- else:
- anchors = cat(anchors)
- if box_reg_loss_type == "smooth_l1":
- gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
- gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
- loss_box_reg = smooth_l1_loss(
- cat(pred_anchor_deltas, dim=1)[fg_mask],
- gt_anchor_deltas[fg_mask],
- beta=smooth_l1_beta,
- reduction="sum",
- )
- elif box_reg_loss_type == "giou":
- pred_boxes = [
- box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
- ]
- loss_box_reg = giou_loss(
- torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
- )
- elif box_reg_loss_type == "diou":
- pred_boxes = [
- box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
- ]
- loss_box_reg = diou_loss(
- torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
- )
- elif box_reg_loss_type == "ciou":
- pred_boxes = [
- box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)
- ]
- loss_box_reg = ciou_loss(
- torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum"
- )
- else:
- raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'")
- return loss_box_reg
diff --git a/spaces/TYH71/gradio-ml-skeleton/src/model/yolov5.py b/spaces/TYH71/gradio-ml-skeleton/src/model/yolov5.py
deleted file mode 100644
index 62f98699bbbb698ef6ac636d240a9fb9450d3657..0000000000000000000000000000000000000000
--- a/spaces/TYH71/gradio-ml-skeleton/src/model/yolov5.py
+++ /dev/null
@@ -1,24 +0,0 @@
-'''
-module to load yolov5* model from the ultralytics/yolov5 repo
-'''
-import torch
-from src.core.logger import logger
-
-
-def load_model(model_repo: str = "ultralytics/yolov5", model_name: str = "yolov5s6"):
- """
- It loads the YOLOv5s model from the PyTorch Hub
- :return: A model
- """
- try:
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- _model = torch.hub.load(model_repo, model_name, device=device)
- _model_agnostic = True # NMS class-agnostic
- _model.amp = True # enable Automatic Mixed Precision (NMS) for inference
- return _model
- except Exception as e:
- logger.debug("Exception Caught: {}".format(e))
- finally:
- logger.info(f"[{model_repo}] {model_name} loaded with AMP [Device: {device}]")
-
-model = load_model()
diff --git a/spaces/TabPFN/TabPFNEvaluation/TabPFN/positional_encodings.py b/spaces/TabPFN/TabPFNEvaluation/TabPFN/positional_encodings.py
deleted file mode 100644
index 05580e052d6bb1fe782441e7e65088f7989e8e0b..0000000000000000000000000000000000000000
--- a/spaces/TabPFN/TabPFNEvaluation/TabPFN/positional_encodings.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import math
-
-import torch
-from torch import nn
-
-
-# Protocol for positonal encodings.
-# __init__(d_model, max_len=..[, more optionals])
-# forward(x: (seq_len, bs, d_model)) -> Tensor of shape (*x.shape[:2],d_model) containing pos. embeddings
-
-
-class NoPositionalEncoding(nn.Module):
- def __init__(self, d_model, max_len=None):
- super(NoPositionalEncoding, self).__init__()
- pass
-
- def forward(self, x):
- return x #* math.sqrt(x.shape[-1])
-
-
-class PositionalEncoding(nn.Module):
- def __init__(self, d_model, max_len=5000):
- super(PositionalEncoding, self).__init__()
- pe = torch.zeros(max_len, d_model)
- position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
- div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
- pe[:, 0::2] = torch.sin(position * div_term)
- pe[:, 1::2] = torch.cos(position * div_term)
- pe = pe.unsqueeze(0).transpose(0, 1)
- self.register_buffer('pe', pe)
-
- def forward(self, x):
- x = self.pe[:x.size(0), :] + x # * math.sqrt(x.shape[-1])
- return x
-
-
-class LearnedPositionalEncoding(nn.Module):
- def __init__(self, d_model, max_len=5000):
- super(LearnedPositionalEncoding, self).__init__()
- self.max_seq_len = max_len
- #self.positional_embeddings = nn.Embedding(max_len, d_model)
- self.positional_embeddings = nn.Parameter(torch.empty(max_len, d_model))
- nn.init.normal_(self.positional_embeddings, mean=0, std=d_model ** -0.5)
-
- def forward(self, x):
- seq_len, bs, d_model = x.shape
- assert seq_len <= len(self.positional_embeddings), 'seq_len can be at most max_len.'
- pos_emb = self.positional_embeddings[:seq_len]
- return pos_emb.unsqueeze(1).expand(seq_len, bs, d_model) + x #* math.sqrt(x.shape[-1])
-
-
-class PairedScrambledPositionalEncodings(LearnedPositionalEncoding):
- # TODO check whether it is a problem to use the same perm. for full batch
- def forward(self, x):
- seq_len, bs, d_model = x.shape
- assert seq_len <= len(self.positional_embeddings), 'seq_len can be at most max_len.'
- assert len(self.positional_embeddings) % 2 == 0, 'Please specify an even max_len.'
-
- paired_embs = self.positional_embeddings.view(len(self.positional_embeddings), -1, 2)
- pos_emb = paired_embs[torch.randperm(len(paired_embs))].view(*self.positional_embeddings.shape)[:seq_len]
-
- return pos_emb.unsqueeze(1).expand(seq_len, bs, d_model) + x #* math.sqrt(x.shape[-1])
-
-
-
-
-
-
-
-
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/py39compat.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/py39compat.py
deleted file mode 100644
index c43e5f10fdecb6606a1b75af3e149cb6a0a55e42..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/py39compat.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import sys
-import platform
-
-
-def add_ext_suffix_39(vars):
- """
- Ensure vars contains 'EXT_SUFFIX'. pypa/distutils#130
- """
- import _imp
-
- ext_suffix = _imp.extension_suffixes()[0]
- vars.update(
- EXT_SUFFIX=ext_suffix,
- # sysconfig sets SO to match EXT_SUFFIX, so maintain
- # that expectation.
- # https://github.com/python/cpython/blob/785cc6770588de087d09e89a69110af2542be208/Lib/sysconfig.py#L671-L673
- SO=ext_suffix,
- )
-
-
-needs_ext_suffix = sys.version_info < (3, 10) and platform.system() == 'Windows'
-add_ext_suffix = add_ext_suffix_39 if needs_ext_suffix else lambda vars: None
diff --git a/spaces/Tape/yoga/pytorch-openpose/body.py b/spaces/Tape/yoga/pytorch-openpose/body.py
deleted file mode 100644
index ecf06938faf81a153c0090e8ceccc5ff94771ee5..0000000000000000000000000000000000000000
--- a/spaces/Tape/yoga/pytorch-openpose/body.py
+++ /dev/null
@@ -1,218 +0,0 @@
-import cv2
-import numpy as np
-import math
-import time
-from scipy.ndimage.filters import gaussian_filter
-import matplotlib.pyplot as plt
-import matplotlib
-import torch
-from torchvision import transforms
-
-from src import util
-from src.model import bodypose_model
-
-class Body(object):
- def __init__(self, model_path):
- self.model = bodypose_model()
- if torch.cuda.is_available():
- self.model = self.model.cuda()
- model_dict = util.transfer(self.model, torch.load(model_path))
- self.model.load_state_dict(model_dict)
- self.model.eval()
-
- def __call__(self, oriImg):
- # scale_search = [0.5, 1.0, 1.5, 2.0]
- scale_search = [0.5]
- boxsize = 368
- stride = 8
- padValue = 128
- thre1 = 0.1
- thre2 = 0.05
- multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
- heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
- paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
-
- for m in range(len(multiplier)):
- scale = multiplier[m]
- imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
- imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
- im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
- im = np.ascontiguousarray(im)
-
- data = torch.from_numpy(im).float()
- if torch.cuda.is_available():
- data = data.cuda()
- # data = data.permute([2, 0, 1]).unsqueeze(0).float()
- with torch.no_grad():
- Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
- Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
- Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
-
- # extract outputs, resize, and remove padding
- # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
- heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
- heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
- heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
- heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
-
- # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
- paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
- paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
- paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
- paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
-
- heatmap_avg += heatmap_avg + heatmap / len(multiplier)
- paf_avg += + paf / len(multiplier)
-
- all_peaks = []
- peak_counter = 0
-
- for part in range(18):
- map_ori = heatmap_avg[:, :, part]
- one_heatmap = gaussian_filter(map_ori, sigma=3)
-
- map_left = np.zeros(one_heatmap.shape)
- map_left[1:, :] = one_heatmap[:-1, :]
- map_right = np.zeros(one_heatmap.shape)
- map_right[:-1, :] = one_heatmap[1:, :]
- map_up = np.zeros(one_heatmap.shape)
- map_up[:, 1:] = one_heatmap[:, :-1]
- map_down = np.zeros(one_heatmap.shape)
- map_down[:, :-1] = one_heatmap[:, 1:]
-
- peaks_binary = np.logical_and.reduce(
- (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
- peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
- peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
- peak_id = range(peak_counter, peak_counter + len(peaks))
- peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
-
- all_peaks.append(peaks_with_score_and_id)
- peak_counter += len(peaks)
-
- # find connection in the specified sequence, center 29 is in the position 15
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
- [1, 16], [16, 18], [3, 17], [6, 18]]
- # the middle joints heatmap correpondence
- mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
- [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
- [55, 56], [37, 38], [45, 46]]
-
- connection_all = []
- special_k = []
- mid_num = 10
-
- for k in range(len(mapIdx)):
- score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
- candA = all_peaks[limbSeq[k][0] - 1]
- candB = all_peaks[limbSeq[k][1] - 1]
- nA = len(candA)
- nB = len(candB)
- indexA, indexB = limbSeq[k]
- if (nA != 0 and nB != 0):
- connection_candidate = []
- for i in range(nA):
- for j in range(nB):
- vec = np.subtract(candB[j][:2], candA[i][:2])
- norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
- norm = max(0.001, norm)
- vec = np.divide(vec, norm)
-
- startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
- np.linspace(candA[i][1], candB[j][1], num=mid_num)))
-
- vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
- for I in range(len(startend))])
- vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
- for I in range(len(startend))])
-
- score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
- score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
- 0.5 * oriImg.shape[0] / norm - 1, 0)
- criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
- criterion2 = score_with_dist_prior > 0
- if criterion1 and criterion2:
- connection_candidate.append(
- [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
-
- connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
- connection = np.zeros((0, 5))
- for c in range(len(connection_candidate)):
- i, j, s = connection_candidate[c][0:3]
- if (i not in connection[:, 3] and j not in connection[:, 4]):
- connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
- if (len(connection) >= min(nA, nB)):
- break
-
- connection_all.append(connection)
- else:
- special_k.append(k)
- connection_all.append([])
-
- # last number in each row is the total parts number of that person
- # the second last number in each row is the score of the overall configuration
- subset = -1 * np.ones((0, 20))
- candidate = np.array([item for sublist in all_peaks for item in sublist])
-
- for k in range(len(mapIdx)):
- if k not in special_k:
- partAs = connection_all[k][:, 0]
- partBs = connection_all[k][:, 1]
- indexA, indexB = np.array(limbSeq[k]) - 1
-
- for i in range(len(connection_all[k])): # = 1:size(temp,1)
- found = 0
- subset_idx = [-1, -1]
- for j in range(len(subset)): # 1:size(subset,1):
- if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
- subset_idx[found] = j
- found += 1
-
- if found == 1:
- j = subset_idx[0]
- if subset[j][indexB] != partBs[i]:
- subset[j][indexB] = partBs[i]
- subset[j][-1] += 1
- subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
- elif found == 2: # if found 2 and disjoint, merge them
- j1, j2 = subset_idx
- membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
- if len(np.nonzero(membership == 2)[0]) == 0: # merge
- subset[j1][:-2] += (subset[j2][:-2] + 1)
- subset[j1][-2:] += subset[j2][-2:]
- subset[j1][-2] += connection_all[k][i][2]
- subset = np.delete(subset, j2, 0)
- else: # as like found == 1
- subset[j1][indexB] = partBs[i]
- subset[j1][-1] += 1
- subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
-
- # if find no partA in the subset, create a new subset
- elif not found and k < 17:
- row = -1 * np.ones(20)
- row[indexA] = partAs[i]
- row[indexB] = partBs[i]
- row[-1] = 2
- row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
- subset = np.vstack([subset, row])
- # delete some rows of subset which has few parts occur
- deleteIdx = []
- for i in range(len(subset)):
- if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
- deleteIdx.append(i)
- subset = np.delete(subset, deleteIdx, axis=0)
-
- # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
- # candidate: x, y, score, id
- return candidate, subset
-
-if __name__ == "__main__":
- body_estimation = Body('../model/body_pose_model.pth')
-
- test_image = '../images/ski.jpg'
- oriImg = cv2.imread(test_image) # B,G,R order
- candidate, subset = body_estimation(oriImg)
- canvas = util.draw_bodypose(oriImg, candidate, subset)
- plt.imshow(canvas[:, :, [2, 1, 0]])
- plt.show()
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tools/lazyconfig_train_net.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tools/lazyconfig_train_net.py
deleted file mode 100644
index bb62d36c0c171b0391453afafc2828ebab1b0da1..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tools/lazyconfig_train_net.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates.
-"""
-Training script using the new "LazyConfig" python config files.
-
-This scripts reads a given python config file and runs the training or evaluation.
-It can be used to train any models or dataset as long as they can be
-instantiated by the recursive construction defined in the given config file.
-
-Besides lazy construction of models, dataloader, etc., this scripts expects a
-few common configuration parameters currently defined in "configs/common/train.py".
-To add more complicated training logic, you can easily add other configs
-in the config file and implement a new train_net.py to handle them.
-"""
-import logging
-
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.config import LazyConfig, instantiate
-from detectron2.engine import (
- AMPTrainer,
- SimpleTrainer,
- default_argument_parser,
- default_setup,
- default_writers,
- hooks,
- launch,
-)
-from detectron2.engine.defaults import create_ddp_model
-from detectron2.evaluation import inference_on_dataset, print_csv_format
-from detectron2.utils import comm
-
-logger = logging.getLogger("detectron2")
-
-
-def do_test(cfg, model):
- if "evaluator" in cfg.dataloader:
- ret = inference_on_dataset(
- model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
- )
- print_csv_format(ret)
- return ret
-
-
-def do_train(args, cfg):
- """
- Args:
- cfg: an object with the following attributes:
- model: instantiate to a module
- dataloader.{train,test}: instantiate to dataloaders
- dataloader.evaluator: instantiate to evaluator for test set
- optimizer: instantaite to an optimizer
- lr_multiplier: instantiate to a fvcore scheduler
- train: other misc config defined in `configs/common/train.py`, including:
- output_dir (str)
- init_checkpoint (str)
- amp.enabled (bool)
- max_iter (int)
- eval_period, log_period (int)
- device (str)
- checkpointer (dict)
- ddp (dict)
- """
- model = instantiate(cfg.model)
- logger = logging.getLogger("detectron2")
- logger.info("Model:\n{}".format(model))
- model.to(cfg.train.device)
-
- cfg.optimizer.params.model = model
- optim = instantiate(cfg.optimizer)
-
- train_loader = instantiate(cfg.dataloader.train)
-
- model = create_ddp_model(model, **cfg.train.ddp)
- trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim)
- checkpointer = DetectionCheckpointer(
- model,
- cfg.train.output_dir,
- trainer=trainer,
- )
- trainer.register_hooks(
- [
- hooks.IterationTimer(),
- hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
- hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
- if comm.is_main_process()
- else None,
- hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
- hooks.PeriodicWriter(
- default_writers(cfg.train.output_dir, cfg.train.max_iter),
- period=cfg.train.log_period,
- )
- if comm.is_main_process()
- else None,
- ]
- )
-
- checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
- if args.resume and checkpointer.has_checkpoint():
- # The checkpoint stores the training iteration that just finished, thus we start
- # at the next iteration
- start_iter = trainer.iter + 1
- else:
- start_iter = 0
- trainer.train(start_iter, cfg.train.max_iter)
-
-
-def main(args):
- cfg = LazyConfig.load(args.config_file)
- cfg = LazyConfig.apply_overrides(cfg, args.opts)
- default_setup(cfg, args)
-
- if args.eval_only:
- model = instantiate(cfg.model)
- model.to(cfg.train.device)
- model = create_ddp_model(model)
- DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
- print(do_test(cfg, model))
- else:
- do_train(args, cfg)
-
-
-if __name__ == "__main__":
- args = default_argument_parser().parse_args()
- launch(
- main,
- args.num_gpus,
- num_machines=args.num_machines,
- machine_rank=args.machine_rank,
- dist_url=args.dist_url,
- args=(args,),
- )
diff --git a/spaces/Trangluna2002/AI_Cover_Gen/src/infer_pack/attentions.py b/spaces/Trangluna2002/AI_Cover_Gen/src/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/Trangluna2002/AI_Cover_Gen/src/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Xeaser/rvc-tes/vc_infer_pipeline.py b/spaces/Xeaser/rvc-tes/vc_infer_pipeline.py
deleted file mode 100644
index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000
--- a/spaces/Xeaser/rvc-tes/vc_infer_pipeline.py
+++ /dev/null
@@ -1,306 +0,0 @@
-import numpy as np, parselmouth, torch, pdb
-from time import time as ttime
-import torch.nn.functional as F
-from config import x_pad, x_query, x_center, x_max
-import scipy.signal as signal
-import pyworld, os, traceback, faiss
-from scipy import signal
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-
-class VC(object):
- def __init__(self, tgt_sr, device, is_half):
- self.sr = 16000 # hubert输入采样率
- self.window = 160 # 每帧点数
- self.t_pad = self.sr * x_pad # 每条前后pad时间
- self.t_pad_tgt = tgt_sr * x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * x_query # 查询切点前后查询时间
- self.t_center = self.sr * x_center # 查询切点位置
- self.t_max = self.sr * x_max # 免查询时长阈值
- self.device = device
- self.is_half = is_half
-
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- if f0_method == "pm":
- f0 = (
- parselmouth.Sound(x, self.sr)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=self.sr,
- f0_ceil=f0_max,
- f0_floor=f0_min,
- frame_period=10,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
- f0 = signal.medfilt(f0, 3)
- f0 *= pow(2, f0_up_key / 12)
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- tf0 = self.sr // self.window # 每秒f0点数
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
- )
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- f0bak = f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak # 1-0
-
- def vc(
- self,
- model,
- net_g,
- sid,
- audio0,
- pitch,
- pitchf,
- times,
- index,
- big_npy,
- index_rate,
- ): # ,file_index,file_big_npy
- feats = torch.from_numpy(audio0)
- if self.is_half:
- feats = feats.half()
- else:
- feats = feats.float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = {
- "source": feats.to(self.device),
- "padding_mask": padding_mask,
- "output_layer": 9, # layer 9
- }
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0])
-
- if (
- isinstance(index, type(None)) == False
- and isinstance(big_npy, type(None)) == False
- and index_rate != 0
- ):
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float32")
- _, I = index.search(npy, 1)
- npy = big_npy[I.squeeze()]
- if self.is_half:
- npy = npy.astype("float16")
- feats = (
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
- + (1 - index_rate) * feats
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- t1 = ttime()
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch != None and pitchf != None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
- p_len = torch.tensor([p_len], device=self.device).long()
- with torch.no_grad():
- if pitch != None and pitchf != None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- else:
- audio1 = (
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- del feats, p_len, padding_mask
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- t2 = ttime()
- times[0] += t1 - t0
- times[2] += t2 - t1
- return audio1
-
- def pipeline(
- self,
- model,
- net_g,
- sid,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- f0_file=None,
- ):
- if (
- file_big_npy != ""
- and file_index != ""
- and os.path.exists(file_big_npy) == True
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- big_npy = np.load(file_big_npy)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- print("Feature retrieval library doesn't exist or ratio is 0")
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- t1 = ttime()
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float32")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- t2 = ttime()
- times[1] += t2 - t1
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- s = t
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- pitch[:, t // self.window :] if t is not None else pitch,
- pitchf[:, t // self.window :] if t is not None else pitchf,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- audio_opt = np.concatenate(audio_opt)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
diff --git a/spaces/XuebaoDingZhen/YOLOv50.0.1/classify/val.py b/spaces/XuebaoDingZhen/YOLOv50.0.1/classify/val.py
deleted file mode 100644
index 4b92e9f105db9e7af6521b6689c279e948153a11..0000000000000000000000000000000000000000
--- a/spaces/XuebaoDingZhen/YOLOv50.0.1/classify/val.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
-"""
-Validate a trained YOLOv5 classification model on a classification dataset
-
-Usage:
- $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
- $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
-
-Usage - formats:
- $ python classify/val.py --weights yolov5s-cls.pt # PyTorch
- yolov5s-cls.torchscript # TorchScript
- yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s-cls_openvino_model # OpenVINO
- yolov5s-cls.engine # TensorRT
- yolov5s-cls.mlmodel # CoreML (macOS-only)
- yolov5s-cls_saved_model # TensorFlow SavedModel
- yolov5s-cls.pb # TensorFlow GraphDef
- yolov5s-cls.tflite # TensorFlow Lite
- yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
- yolov5s-cls_paddle_model # PaddlePaddle
-"""
-
-import argparse
-import os
-import sys
-from pathlib import Path
-
-import torch
-from tqdm import tqdm
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[1] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
-from models.common import DetectMultiBackend
-from utils.dataloaders import create_classification_dataloader
-from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
- increment_path, print_args)
-from utils.torch_utils import select_device, smart_inference_mode
-
-
-@smart_inference_mode()
-def run(
- data=ROOT / '../datasets/mnist', # dataset dir
- weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
- batch_size=128, # batch size
- imgsz=224, # inference size (pixels)
- device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
- workers=8, # max dataloader workers (per RANK in DDP mode)
- verbose=False, # verbose output
- project=ROOT / 'runs/val-cls', # save to project/name
- name='exp', # save to project/name
- exist_ok=False, # existing project/name ok, do not increment
- half=False, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- model=None,
- dataloader=None,
- criterion=None,
- pbar=None,
-):
- # Initialize/load model and set device
- training = model is not None
- if training: # called by train.py
- device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
- half &= device.type != 'cpu' # half precision only supported on CUDA
- model.half() if half else model.float()
- else: # called directly
- device = select_device(device, batch_size=batch_size)
-
- # Directories
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
- save_dir.mkdir(parents=True, exist_ok=True) # make dir
-
- # Load model
- model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
- stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
- imgsz = check_img_size(imgsz, s=stride) # check image size
- half = model.fp16 # FP16 supported on limited backends with CUDA
- if engine:
- batch_size = model.batch_size
- else:
- device = model.device
- if not (pt or jit):
- batch_size = 1 # export.py models default to batch-size 1
- LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
-
- # Dataloader
- data = Path(data)
- test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val
- dataloader = create_classification_dataloader(path=test_dir,
- imgsz=imgsz,
- batch_size=batch_size,
- augment=False,
- rank=-1,
- workers=workers)
-
- model.eval()
- pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
- n = len(dataloader) # number of batches
- action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
- desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}'
- bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
- with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
- for images, labels in bar:
- with dt[0]:
- images, labels = images.to(device, non_blocking=True), labels.to(device)
-
- with dt[1]:
- y = model(images)
-
- with dt[2]:
- pred.append(y.argsort(1, descending=True)[:, :5])
- targets.append(labels)
- if criterion:
- loss += criterion(y, labels)
-
- loss /= n
- pred, targets = torch.cat(pred), torch.cat(targets)
- correct = (targets[:, None] == pred).float()
- acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
- top1, top5 = acc.mean(0).tolist()
-
- if pbar:
- pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}'
- if verbose: # all classes
- LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
- LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
- for i, c in model.names.items():
- acc_i = acc[targets == i]
- top1i, top5i = acc_i.mean(0).tolist()
- LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}')
-
- # Print results
- t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image
- shape = (1, 3, imgsz, imgsz)
- LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
-
- return top1, top5, loss
-
-
-def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path')
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)')
- parser.add_argument('--batch-size', type=int, default=128, help='batch size')
- parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
- parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
- parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name')
- parser.add_argument('--name', default='exp', help='save to project/name')
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
- parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
- parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
- opt = parser.parse_args()
- print_args(vars(opt))
- return opt
-
-
-def main(opt):
- check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
- run(**vars(opt))
-
-
-if __name__ == '__main__':
- opt = parse_opt()
- main(opt)
diff --git a/spaces/XzJosh/Azuma-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md b/spaces/XzJosh/Azuma-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md
deleted file mode 100644
index 7bce039b7f81ee328fdf8efe3f14409200aacbef..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Azuma-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-language:
-- zh
-tags:
-- bert
-license: "apache-2.0"
----
-
-# Please use 'Bert' related functions to load this model!
-
-## Chinese BERT with Whole Word Masking
-For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
-
-**[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
-Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
-
-This repository is developed based on:https://github.com/google-research/bert
-
-You may also interested in,
-- Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
-- Chinese MacBERT: https://github.com/ymcui/MacBERT
-- Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
-- Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
-- Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
-
-More resources by HFL: https://github.com/ymcui/HFL-Anthology
-
-## Citation
-If you find the technical report or resource is useful, please cite the following technical report in your paper.
-- Primary: https://arxiv.org/abs/2004.13922
-```
-@inproceedings{cui-etal-2020-revisiting,
- title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
- author = "Cui, Yiming and
- Che, Wanxiang and
- Liu, Ting and
- Qin, Bing and
- Wang, Shijin and
- Hu, Guoping",
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
- month = nov,
- year = "2020",
- address = "Online",
- publisher = "Association for Computational Linguistics",
- url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
- pages = "657--668",
-}
-```
-- Secondary: https://arxiv.org/abs/1906.08101
-```
-@article{chinese-bert-wwm,
- title={Pre-Training with Whole Word Masking for Chinese BERT},
- author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
- journal={arXiv preprint arXiv:1906.08101},
- year={2019}
- }
-```
\ No newline at end of file
diff --git a/spaces/XzJosh/Diana-Bert-VITS2/monotonic_align/__init__.py b/spaces/XzJosh/Diana-Bert-VITS2/monotonic_align/__init__.py
deleted file mode 100644
index 75603d26cf2b8d6196f5a68a89f9e49d8e519bc8..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Diana-Bert-VITS2/monotonic_align/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from numpy import zeros, int32, float32
-from torch import from_numpy
-
-from .core import maximum_path_jit
-
-def maximum_path(neg_cent, mask):
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(float32)
- path = zeros(neg_cent.shape, dtype=int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
- maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
- return from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/XzJosh/maimai-Bert-VITS2/text/japanese.py b/spaces/XzJosh/maimai-Bert-VITS2/text/japanese.py
deleted file mode 100644
index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/maimai-Bert-VITS2/text/japanese.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py
-import re
-import sys
-
-import pyopenjtalk
-
-from text import symbols
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (symbol, Japanese) pairs for marks:
-_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('%', 'パーセント')
-]]
-
-
-# List of (consonant, sokuon) pairs:
-_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'Q([↑↓]*[kg])', r'k#\1'),
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
- (r'Q([↑↓]*[sʃ])', r's\1'),
- (r'Q([↑↓]*[pb])', r'p#\1')
-]]
-
-# List of (consonant, hatsuon) pairs:
-_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
- (r'N([↑↓]*[pbm])', r'm\1'),
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
- (r'N([↑↓]*[tdn])', r'n\1'),
- (r'N([↑↓]*[kg])', r'ŋ\1')
-]]
-
-
-
-def post_replace_ph(ph):
- rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- 'v': "V"
- }
- if ph in rep_map.keys():
- ph = rep_map[ph]
- if ph in symbols:
- return ph
- if ph not in symbols:
- ph = 'UNK'
- return ph
-
-def symbols_to_japanese(text):
- for regex, replacement in _symbols_to_japanese:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def preprocess_jap(text):
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
- text = symbols_to_japanese(text)
- sentences = re.split(_japanese_marks, text)
- marks = re.findall(_japanese_marks, text)
- text = []
- for i, sentence in enumerate(sentences):
- if re.match(_japanese_characters, sentence):
- p = pyopenjtalk.g2p(sentence)
- text += p.split(" ")
-
- if i < len(marks):
- text += [marks[i].replace(' ', '')]
- return text
-
-def text_normalize(text):
- # todo: jap text normalize
- return text
-
-def g2p(norm_text):
- phones = preprocess_jap(norm_text)
- phones = [post_replace_ph(i) for i in phones]
- # todo: implement tones and word2ph
- tones = [0 for i in phones]
- word2ph = [1 for i in phones]
- return phones, tones, word2ph
-
-
-if __name__ == '__main__':
- for line in open("../../../Downloads/transcript_utf8.txt").readlines():
- text = line.split(":")[1]
- phones, tones, word2ph = g2p(text)
- for p in phones:
- if p == "z":
- print(text, phones)
- sys.exit(0)
diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/__init__.py
deleted file mode 100644
index ebc8155403016dfd8ad7fb78d246f9da9098ac50..0000000000000000000000000000000000000000
--- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .rl import ValueGuidedRLPipeline
diff --git a/spaces/Zaixi/ICLR_FLAG/utils/data.py b/spaces/Zaixi/ICLR_FLAG/utils/data.py
deleted file mode 100644
index b1b62c321dec4424dc78714ff9550f446804e273..0000000000000000000000000000000000000000
--- a/spaces/Zaixi/ICLR_FLAG/utils/data.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import copy
-import torch
-import numpy as np
-from torch_geometric.data import Data, Batch
-# from torch_geometric.loader import DataLoader
-from torch.utils.data import Dataset
-
-FOLLOW_BATCH = ['protein_element', 'ligand_context_element', 'pos_real', 'pos_fake']
-
-
-class ProteinLigandData(object):
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- @staticmethod
- def from_protein_ligand_dicts(protein_dict=None, ligand_dict=None, **kwargs):
- instance = ProteinLigandData(**kwargs)
-
- if protein_dict is not None:
- for key, item in protein_dict.items():
- instance['protein_' + key] = item
-
- if ligand_dict is not None:
- for key, item in ligand_dict.items():
- if key == 'moltree':
- instance['moltree'] = item
- else:
- instance['ligand_' + key] = item
-
- # instance['ligand_nbh_list'] = {i.item():[j.item() for k, j in enumerate(instance.ligand_bond_index[1]) if instance.ligand_bond_index[0, k].item() == i] for i in instance.ligand_bond_index[0]}
- return instance
-
-
-def batch_from_data_list(data_list):
- return Batch.from_data_list(data_list, follow_batch=['ligand_element', 'protein_element'])
-
-
-def torchify_dict(data):
- output = {}
- for k, v in data.items():
- if isinstance(v, np.ndarray):
- output[k] = torch.from_numpy(v)
- else:
- output[k] = v
- return output
-
-
-def collate_mols(mol_dicts):
- data_batch = {}
- batch_size = len(mol_dicts)
- for key in ['protein_pos', 'protein_atom_feature', 'ligand_context_pos', 'ligand_context_feature_full',
- 'ligand_frontier', 'num_atoms', 'next_wid', 'current_wid', 'current_atoms', 'cand_labels',
- 'ligand_pos_torsion', 'ligand_feature_torsion', 'true_sin', 'true_cos', 'true_three_hop',
- 'dihedral_mask', 'protein_contact', 'true_dm', 'alpha_carbon_indicator']:
- data_batch[key] = torch.cat([mol_dict[key] for mol_dict in mol_dicts], dim=0)
- # unsqueeze dim0
- for key in ['xn_pos', 'yn_pos', 'ligand_torsion_xy_index', 'y_pos']:
- cat_list = [mol_dict[key].unsqueeze(0) for mol_dict in mol_dicts if len(mol_dict[key]) > 0]
- if len(cat_list) > 0:
- data_batch[key] = torch.cat(cat_list, dim=0)
- else:
- data_batch[key] = torch.tensor([])
- # follow batch
- for key in ['protein_element', 'ligand_context_element', 'current_atoms']:
- repeats = torch.tensor([len(mol_dict[key]) for mol_dict in mol_dicts])
- data_batch[key + '_batch'] = torch.repeat_interleave(torch.arange(batch_size), repeats)
- for key in ['ligand_element_torsion']:
- repeats = torch.tensor([len(mol_dict[key]) for mol_dict in mol_dicts if len(mol_dict[key]) > 0])
- if len(repeats) > 0:
- data_batch[key + '_batch'] = torch.repeat_interleave(torch.arange(len(repeats)), repeats)
- else:
- data_batch[key + '_batch'] = torch.tensor([])
- # distance matrix prediction
- p_idx, q_idx = torch.cartesian_prod(torch.arange(4), torch.arange(2)).chunk(2, dim=-1)
- p_idx, q_idx = p_idx.squeeze(-1), q_idx.squeeze(-1)
- protein_offsets = torch.cumsum(data_batch['protein_element_batch'].bincount(), dim=0)
- ligand_offsets = torch.cumsum(data_batch['ligand_context_element_batch'].bincount(), dim=0)
- protein_offsets, ligand_offsets = torch.cat([torch.tensor([0]), protein_offsets]), torch.cat([torch.tensor([0]), ligand_offsets])
- ligand_idx, protein_idx = [], []
- for i, mol_dict in enumerate(mol_dicts):
- if len(mol_dict['true_dm']) > 0:
- protein_idx.append(mol_dict['dm_protein_idx'][p_idx] + protein_offsets[i])
- ligand_idx.append(mol_dict['dm_ligand_idx'][q_idx] + ligand_offsets[i])
- if len(ligand_idx) > 0:
- data_batch['dm_ligand_idx'], data_batch['dm_protein_idx'] = torch.cat(ligand_idx), torch.cat(protein_idx)
-
- # structure refinement (alpha carbon - ligand atom)
- sr_ligand_idx, sr_protein_idx = [], []
- for i, mol_dict in enumerate(mol_dicts):
- if len(mol_dict['true_dm']) > 0:
- ligand_atom_index = torch.arange(len(mol_dict['ligand_context_pos']))
- p_idx, q_idx = torch.cartesian_prod(torch.arange(len(mol_dict['ligand_context_pos'])), torch.arange(len(mol_dict['protein_alpha_carbon_index']))).chunk(2, dim=-1)
- p_idx, q_idx = p_idx.squeeze(-1), q_idx.squeeze(-1)
- sr_ligand_idx.append(ligand_atom_index[p_idx] + ligand_offsets[i])
- sr_protein_idx.append(mol_dict['protein_alpha_carbon_index'][q_idx] + protein_offsets[i])
- if len(sr_ligand_idx) > 0:
- data_batch['sr_ligand_idx'], data_batch['sr_protein_idx'] = torch.cat(sr_ligand_idx).long(), torch.cat(sr_protein_idx).long()
-
- # structure refinement (ligand atom - ligand atom)
- sr_ligand_idx0, sr_ligand_idx1 = [], []
- for i, mol_dict in enumerate(mol_dicts):
- if len(mol_dict['true_dm']) > 0:
- ligand_atom_index = torch.arange(len(mol_dict['ligand_context_pos']))
- p_idx, q_idx = torch.cartesian_prod(torch.arange(len(mol_dict['ligand_context_pos'])), torch.arange(len(mol_dict['ligand_context_pos']))).chunk(2, dim=-1)
- p_idx, q_idx = p_idx.squeeze(-1), q_idx.squeeze(-1)
- sr_ligand_idx0.append(ligand_atom_index[p_idx] + ligand_offsets[i])
- sr_ligand_idx1.append(ligand_atom_index[q_idx] + ligand_offsets[i])
- if len(ligand_idx) > 0:
- data_batch['sr_ligand_idx0'], data_batch['sr_ligand_idx1'] = torch.cat(sr_ligand_idx0).long(), torch.cat(sr_ligand_idx1).long()
- # index
- if len(data_batch['y_pos']) > 0:
- repeats = torch.tensor([len(mol_dict['ligand_element_torsion']) for mol_dict in mol_dicts if len(mol_dict['ligand_element_torsion']) > 0])
- offsets = torch.cat([torch.tensor([0]), torch.cumsum(repeats, dim=0)])[:-1]
- data_batch['ligand_torsion_xy_index'] += offsets.unsqueeze(1)
-
- offsets1 = torch.cat([torch.tensor([0]), torch.cumsum(data_batch['num_atoms'], dim=0)])[:-1]
- data_batch['current_atoms'] += torch.repeat_interleave(offsets1, data_batch['current_atoms_batch'].bincount())
- # cand mols: torch geometric Data
- cand_mol_list = []
- for data in mol_dicts:
- if len(data['cand_labels']) > 0:
- cand_mol_list.extend(data['cand_mols'])
- if len(cand_mol_list) > 0:
- data_batch['cand_mols'] = Batch.from_data_list(cand_mol_list)
- return data_batch
-
diff --git a/spaces/ZhaoYoujia/ImageRecognition/README.md b/spaces/ZhaoYoujia/ImageRecognition/README.md
deleted file mode 100644
index 567137bdf26a6316c222a5f4407843488b831086..0000000000000000000000000000000000000000
--- a/spaces/ZhaoYoujia/ImageRecognition/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ImageRecognition
-emoji: 👁
-colorFrom: yellow
-colorTo: green
-sdk: gradio
-sdk_version: 3.35.2
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abdalrahmanshahrour/Summarization/summarize.py b/spaces/abdalrahmanshahrour/Summarization/summarize.py
deleted file mode 100644
index 7a019f47bd0ba6c67b3cf269a5dc6cc1061424f0..0000000000000000000000000000000000000000
--- a/spaces/abdalrahmanshahrour/Summarization/summarize.py
+++ /dev/null
@@ -1,177 +0,0 @@
-import logging
-import os
-import re
-from functools import lru_cache
-from urllib.parse import unquote
-
-import streamlit as st
-from codetiming import Timer
-from transformers import pipeline
-from arabert.preprocess import ArabertPreprocessor
-from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
-import tokenizers
-import re
-import heapq
-from string import punctuation
-import nltk
-from nltk.corpus import stopwords
-import download
-nltk.download('punkt')
-nltk.download('stopwords')
-nltk.download('wordnet')
-nltk.download('omw-1.4')
-
-
-punctuation = punctuation + '\n'
-logger = logging.getLogger(__name__)
-os.environ["TOKENIZERS_PARALLELISM"] = "false"
-
-logger.info("Loading models...")
-reader_time = Timer("loading", text="Time: {:.2f}", logger=logging.info)
-reader_time.start()
-
-
-reader_time.stop()
-
-
-logger.info("Finished loading the models...")
-logger.info(f"Time spent loading: {reader_time.last}")
-
-@lru_cache(maxsize=200)
-def get_results(text, model_selected, num_beams, length_penalty,number_of_sentence):
- logger.info("\n=================================================================")
- logger.info(f"Text: {text}")
- logger.info(f"model_selected: {model_selected}")
- logger.info(f"length_penalty: {length_penalty}")
- reader_time = Timer("summarize", text="Time: {:.2f}", logger=logging.info)
- reader_time.start()
- if model_selected == 'GPT-2':
- number_of_tokens_limit = 80
- else:
- number_of_tokens_limit = 150
- logger.info(f"input length: {len(text.split())}")
-
- if model_selected == 'arabartsummarization':
- model_name="abdalrahmanshahrour/arabartsummarization"
- preprocessor = ArabertPreprocessor(model_name="")
-
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
- pipeline1 = pipeline("text2text-generation",model=model,tokenizer=tokenizer)
- result = pipeline1(text,
- pad_token_id= tokenizer.eos_token_id,
- num_beams=num_beams,
- repetition_penalty=3.0,
- max_length=200,
- length_penalty=length_penalty,
- no_repeat_ngram_size = 3)[0]['generated_text']
- logger.info('arabartsummarization')
- elif model_selected == 'AraBART':
-
- model_name= "abdalrahmanshahrour/AraBART-summ"
- preprocessor = ArabertPreprocessor(model_name="")
-
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
- pipeline1 = pipeline("text2text-generation",model=model,tokenizer=tokenizer)
- result = pipeline1(text,
- pad_token_id= tokenizer.eos_token_id,
- num_beams=num_beams,
- repetition_penalty=3.0,
- max_length=200,
- length_penalty=length_penalty,
- no_repeat_ngram_size = 3)[0]['generated_text']
- logger.info('AraBART')
-
- elif model_selected == "auto-arabic-summarization":
-
- model_name="abdalrahmanshahrour/auto-arabic-summarization"
- preprocessor = ArabertPreprocessor(model_name="")
-
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
- pipeline1 = pipeline("text2text-generation",model=model,tokenizer=tokenizer)
- result = pipeline1(text,
- pad_token_id= tokenizer.eos_token_id,
- num_beams=num_beams,
- repetition_penalty=3.0,
- max_length=200,
- length_penalty=length_penalty,
- no_repeat_ngram_size = 3)[0]['generated_text']
- logger.info('auto-arabic-summarization')
-
- elif model_selected == 'BERT2BERT':
-
- model_name="malmarjeh/bert2bert"
- preprocessor = ArabertPreprocessor(model_name="")
-
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
- pipeline1 = pipeline("text2text-generation",model=model,tokenizer=tokenizer)
- result = pipeline1(text,
- pad_token_id= tokenizer.eos_token_id,
- num_beams=num_beams,
- repetition_penalty=3.0,
- max_length=200,
- length_penalty=length_penalty,
- no_repeat_ngram_size = 3)[0]['generated_text']
- logger.info('BERT2BERT')
-
- elif model_selected == "xlmroberta2xlmroberta":
- model_name="ahmeddbahaa/xlmroberta2xlmroberta-finetune-summarization-ar"
- preprocessor = ArabertPreprocessor(model_name="")
-
- tokenizer = AutoTokenizer.from_pretrained(model_name)
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
- pipeline1 = pipeline("text2text-generation",model=model,tokenizer=tokenizer)
- result = pipeline1(text,
- pad_token_id= tokenizer.eos_token_id,
- num_beams=num_beams,
- repetition_penalty=3.0,
- max_length=200,
- length_penalty=length_penalty,
- no_repeat_ngram_size = 3)[0]['generated_text']
- logger.info('xlmroberta2xlmroberta')
-
- elif model_selected == "nltk_summarizer":
- # number_of_sentence = 3
- stopWords = set(nltk.corpus.stopwords.words("arabic") + nltk.corpus.stopwords.words("english"))
- word_frequencies = {}
- for word in nltk.word_tokenize(text):
- if word not in stopWords:
- if word not in punctuation:
- if word not in word_frequencies.keys():
- word_frequencies[word] = 1
- else:
- word_frequencies[word] += 1
-
- maximum_frequncy = max(list(word_frequencies.values()),default=3)
-
- for word in word_frequencies.keys():
- word_frequencies[word] = (word_frequencies[word]/maximum_frequncy)
-
- sentence_list = nltk.sent_tokenize(text)
- sentence_scores = {}
- for sent in sentence_list:
- for word in nltk.word_tokenize(sent.lower()):
- if word in word_frequencies.keys():
- if len(sent.split(' ')) < 30:
- if sent not in sentence_scores.keys():
- sentence_scores[sent] = word_frequencies[word]
- else:
- sentence_scores[sent] += word_frequencies[word]
-
- summary_sentences = heapq.nlargest(number_of_sentence, sentence_scores, key=sentence_scores.get)
-
- result = ' '.join(summary_sentences)
- else:
- result = "الرجاء اختيار نموذج"
-
- reader_time.stop()
- logger.info(f"Time spent summarizing: {reader_time.last}")
-
- return result
-
-
-if __name__ == "__main__":
- results_dict = ""
\ No newline at end of file
diff --git a/spaces/abdvl/datahub_qa_bot/docs/actions/README.md b/spaces/abdvl/datahub_qa_bot/docs/actions/README.md
deleted file mode 100644
index 23596ec67514e5bd32bb3ef78fcfa3843ce925e8..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/actions/README.md
+++ /dev/null
@@ -1,255 +0,0 @@
-# ⚡ DataHub Actions Framework
-
-Welcome to DataHub Actions! The Actions framework makes responding to realtime changes in your Metadata Graph easy, enabling you to seamlessly integrate [DataHub](https://github.com/datahub-project/datahub) into a broader events-based architecture.
-
-For a detailed introduction, check out the [original announcement](https://www.youtube.com/watch?v=7iwNxHgqxtg&t=2189s) of the DataHub Actions Framework at the DataHub April 2022 Town Hall. For a more in-depth look at use cases and concepts, check out [DataHub Actions Concepts](concepts.md).
-
-## Quickstart
-
-To get started right away, check out the [DataHub Actions Quickstart](quickstart.md) Guide.
-
-
-## Prerequisites
-
-The DataHub Actions CLI commands are an extension of the base `datahub` CLI commands. We recommend
-first installing the `datahub` CLI:
-
-```shell
-python3 -m pip install --upgrade pip wheel setuptools
-python3 -m pip install --upgrade acryl-datahub
-datahub --version
-```
-
-> Note that the Actions Framework requires a version of `acryl-datahub` >= v0.8.34
-
-
-## Installation
-
-Next, simply install the `acryl-datahub-actions` package from PyPi:
-
-```shell
-python3 -m pip install --upgrade pip wheel setuptools
-python3 -m pip install --upgrade acryl-datahub-actions
-datahub actions version
-```
-
-
-## Configuring an Action
-
-Actions are configured using a YAML file, much in the same way DataHub ingestion sources are. An action configuration file consists of the following
-
-1. Action Pipeline Name (Should be unique and static)
-2. Source Configurations
-3. Transform + Filter Configurations
-4. Action Configuration
-5. Pipeline Options (Optional)
-6. DataHub API configs (Optional - required for select actions)
-
-With each component being independently pluggable and configurable.
-
-```yml
-# 1. Required: Action Pipeline Name
-name:
-
-# 2. Required: Event Source - Where to source event from.
-source:
- type:
- config:
- # Event Source specific configs (map)
-
-# 3a. Optional: Filter to run on events (map)
-filter:
- event_type:
- event:
- # Filter event fields by exact-match
-
-
-# 3b. Optional: Custom Transformers to run on events (array)
-transform:
- - type:
- config:
- # Transformer-specific configs (map)
-
-# 4. Required: Action - What action to take on events.
-action:
- type:
- config:
- # Action-specific configs (map)
-
-# 5. Optional: Additional pipeline options (error handling, etc)
-options:
- retry_count: 0 # The number of times to retry an Action with the same event. (If an exception is thrown). 0 by default.
- failure_mode: "CONTINUE" # What to do when an event fails to be processed. Either 'CONTINUE' to make progress or 'THROW' to stop the pipeline. Either way, the failed event will be logged to a failed_events.log file.
- failed_events_dir: "/tmp/datahub/actions" # The directory in which to write a failed_events.log file that tracks events which fail to be processed. Defaults to "/tmp/logs/datahub/actions".
-
-# 6. Optional: DataHub API configuration
-datahub:
- server: "http://localhost:8080" # Location of DataHub API
- # token: # Required if Metadata Service Auth enabled
-```
-
-### Example: Hello World
-
-An simple configuration file for a "Hello World" action, which simply prints all events it receives, is
-
-```yml
-# 1. Action Pipeline Name
-name: "hello_world"
-# 2. Event Source: Where to source event from.
-source:
- type: "kafka"
- config:
- connection:
- bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
- schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
-# 3. Action: What action to take on events.
-action:
- type: "hello_world"
-```
-
-We can modify this configuration further to filter for specific events, by adding a "filter" block.
-
-```yml
-# 1. Action Pipeline Name
-name: "hello_world"
-
-# 2. Event Source - Where to source event from.
-source:
- type: "kafka"
- config:
- connection:
- bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
- schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
-
-# 3. Filter - Filter events that reach the Action
-filter:
- event_type: "EntityChangeEvent_v1"
- event:
- category: "TAG"
- operation: "ADD"
- modifier: "urn:li:tag:pii"
-
-# 4. Action - What action to take on events.
-action:
- type: "hello_world"
-```
-
-
-## Running an Action
-
-To run a new Action, just use the `actions` CLI command
-
-```
-datahub actions -c
-```
-
-Once the Action is running, you will see
-
-```
-Action Pipeline with name '' is now running.
-```
-
-### Running multiple Actions
-
-You can run multiple actions pipeline within the same command. Simply provide multiple
-config files by restating the "-c" command line argument.
-
-For example,
-
-```
-datahub actions -c -c
-```
-
-### Running in debug mode
-
-Simply append the `--debug` flag to the CLI to run your action in debug mode.
-
-```
-datahub actions -c --debug
-```
-
-### Stopping an Action
-
-Just issue a Control-C as usual. You should see the Actions Pipeline shut down gracefully, with a small
-summary of processing results.
-
-```
-Actions Pipeline with name '-
- This page describes the steps required to configure a remote ingestion
- executor, which allows you to ingest metadata from private metadata sources
- using private credentials via the DataHub UI.
----
-import FeatureAvailability from '@site/src/components/FeatureAvailability';
-
-# Setting up Remote Ingestion Executor on AWS
-
-
-## Overview
-
-UI-based Metadata Ingestion reduces the overhead associated with operating DataHub. It allows you to create, schedule, and run batch metadata ingestion on demand in just a few clicks, without requiring custom orchestration. Behind the scenes, a simple ingestion "executor" abstraction makes this possible.
-
-Acryl DataHub comes packaged with an Acryl-managed ingestion executor, which is hosted inside of Acryl's environment on your behalf. However, there are certain scenarios in which an Acryl-hosted executor is not sufficient to cover all of an organization's ingestion sources.
-
-For example, if an ingestion source is not publicly accessible via the internet, e.g. hosted privately within a specific AWS account, then the Acryl executor will be unable to extract metadata from it.
-
-.png)
-
-To accommodate these cases, Acryl supports configuring a remote ingestion executor which can be deployed inside of your AWS account. This setup allows you to continue leveraging the Acryl DataHub console to create, schedule, and run metadata ingestion, all while retaining network and credential isolation.
-
-.png)
-
-## Deploying a Remote Ingestion Executor
-1. **Provide AWS Account Id**: Provide Acryl Team with the id of the AWS in which the remote executor will be hosted. This will be used to grant access to private Acryl containers and create a unique SQS queue which your remote agent will subscribe to. The account id can be provided to your Acryl representative via Email or [One Time Secret](https://onetimesecret.com/).
-
-2. **Provision an Acryl Executor** (ECS)**:** Acryl team will provide a [Cloudformation Template](https://github.com/acryldata/datahub-cloudformation/blob/master/Ingestion/templates/python.ecs.template.yaml) that you can run to provision an ECS cluster with a single remote ingestion task. It will also provision an AWS role for the task which grants the permissions necessary to read and delete from the private SQS queue created for you, along with reading the secrets you've specified. At minimum, the template requires the following parameters:
- 1. **Deployment Location:** The AWS VPC + subnet in which the Acryl Executor task is to be provisioned.
- 2. **SQS Queue ARN**: Reference to your private SQS command queue. This is provided by Acryl and is used to configure IAM policies enabling the Task role to read from the shared queue.
- 3. **SQS Queue URL**: The URL referring to your private SQS command queue. This is provided by Acryl and is used to read messages.
- 4. **DataHub Personal Access Token**: A valid DataHub PAT. This can be generated inside of **Settings > Access Tokens** of DataHub web application. You can alternatively create a secret in AWS Secrets Manager and refer to that by ARN.
- 5. **Acryl DataHub URL**: The URL for your DataHub instance, e.g. `.acryl.io/gms`. Note that you MUST enter the trailing /gms when configuring the executor.
- 6. **Acryl Remote Executor Version:** The version of the remote executor to deploy. This is converted into a container image tag. It will be set to the latest version of the executor by default.
- 7. **Ingestion Source Secrets:** The template accepts up to 10 named secrets which live inside your environment. Secrets are specified using the **OptionalSecrets** parameter in the following form: `SECRET_NAME=SECRET_ARN` with multiple separated by comma, e.g. `SECRET_NAME_1=SECRET_ARN_1,SECRET_NAME_2,SECRET_ARN_2.`
- 8. **Environment Variables:** The template accepts up to 10 arbitrary environment variables. These can be used to inject properties into your ingestion recipe from within your environment. Environment variables are specified using the **OptionalEnvVars** parameter in the following form: `ENV_VAR_NAME=ENV_VAR_VALUE` with multiple separated by comma, e.g. `ENV_VAR_NAME_1=ENV_VAR_VALUE_1,ENV_VAR_NAME_2,ENV_VAR_VALUE_2.`
- ``
- ``Providing secrets enables you to manage ingestion sources from the DataHub UI without storing credentials inside DataHub. Once defined, secrets can be referenced by name inside of your DataHub Ingestion Source configurations using the usual convention: `${SECRET_NAME}`.
-
- Note that the only external secret provider that is currently supported is AWS Secrets Manager.
-
-
-
-
-
-3. **Test the Executor:** To test your remote executor:
-
- 1. Create a new Ingestion Source by clicking '**Create new Source**' the '**Ingestion**' tab of the DataHub console. Configure your Ingestion Recipe as though you were running it from inside of your environment.
- 2. When working with "secret" fields (passwords, keys, etc), you can refer to any "self-managed" secrets by name: `${SECRET_NAME}:`
-
- 
- 3. In the 'Finish Up' step, click '**Advanced'**.
- 4. Update the '**Executor Id**' form field to be '**remote**'. This indicates that you'd like to use the remote executor.
- 5. Click '**Done**'.
-
- Now, simple click '**Execute**' to test out the remote executor. If your remote executor is configured properly, you should promptly see the ingestion task state change to 'Running'.
-
-
-## Updating a Remote Ingestion Executor
-In order to update the executor, ie. to deploy a new container version, you'll need to update the CloudFormation Stack to re-deploy the CloudFormation template with a new set of parameters.
-### Steps - AWS Console
-1. Navigate to CloudFormation in AWS Console
-2. Select the stack dedicated to the remote executor
-3. Click **Update**
-4. Select **Replace Current Template**
-5. Select **Upload a template file**
-6. Upload a copy of the Acryl Remote Executor [CloudFormation Template](https://raw.githubusercontent.com/acryldata/datahub-cloudformation/master/Ingestion/templates/python.ecs.template.yaml)
-
-7. Click **Next**
-8. Change parameters based on your modifications (e.g. ImageTag, etc)
-9. Click **Next**
-10. Confirm your parameter changes, and update. This should perform the necessary upgrades.
-
-## FAQ
-
-### If I need to change (or add) a secret that is stored in AWS Secrets Manager, e.g. for rotation, will the new secret automatically get picked up by Acryl's executor?**
-
-Unfortunately, no. Secrets are wired into the executor container at deployment time, via environment variables. Therefore, the ECS Task will need to be restarted (either manually or via a stack parameter update) whenever your secrets change.
-
-### I want to deploy multiple Acryl Executors. Is this currently possible?**
-
-This is possible, but requires a new SQS queue is maintained (on per executor). Please contact your Acryl representative for more information.
-
-### I've run the CloudFormation Template, how can I tell that the container was successfully deployed?**
-
-We recommend verifying in AWS Console by navigating to **ECS > Cluster > Stack Name > Services > Logs.**
-When you first deploy the executor, you should a single log line to indicate success:
-```
-Starting AWS executor consumer..
-```
-This indicates that the remote executor has established a successful connection to your DataHub instance and is ready to execute ingestion runs.
-If you DO NOT see this log line, but instead see something else, please contact your Acryl representative for support.
-
-## Release Notes
-This is where release notes for the Acryl Remote Executor Container will live.
-
-### v0.0.3.9
-Bumping to the latest version of acryl-executor, which includes smarter messaging around OOM errors.
diff --git a/spaces/abhishek/first-order-motion-model/app.py b/spaces/abhishek/first-order-motion-model/app.py
deleted file mode 100644
index 277cbd32a94297eeba071f051d75468af3a208a2..0000000000000000000000000000000000000000
--- a/spaces/abhishek/first-order-motion-model/app.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import gradio as gr
-import subprocess
-
-import yaml
-from tqdm import tqdm
-
-import imageio
-import numpy as np
-from skimage.transform import resize
-from skimage import img_as_ubyte
-import torch
-from sync_batchnorm import DataParallelWithCallback
-
-from modules.generator import OcclusionAwareGenerator
-from modules.keypoint_detector import KPDetector
-from animate import normalize_kp
-
-
-def load_checkpoints(config_path, checkpoint_path, cpu=False):
-
- with open(config_path) as f:
- config = yaml.load(f)
-
- generator = OcclusionAwareGenerator(
- **config["model_params"]["generator_params"], **config["model_params"]["common_params"]
- )
- if not cpu:
- generator.cuda()
-
- kp_detector = KPDetector(**config["model_params"]["kp_detector_params"], **config["model_params"]["common_params"])
- if not cpu:
- kp_detector.cuda()
-
- if cpu:
- checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
- else:
- checkpoint = torch.load(checkpoint_path)
-
- generator.load_state_dict(checkpoint["generator"])
- kp_detector.load_state_dict(checkpoint["kp_detector"])
-
- if not cpu:
- generator = DataParallelWithCallback(generator)
- kp_detector = DataParallelWithCallback(kp_detector)
-
- generator.eval()
- kp_detector.eval()
-
- return generator, kp_detector
-
-
-def make_animation(
- source_image, driving_video, generator, kp_detector, relative=True, adapt_movement_scale=True, cpu=False
-):
- with torch.no_grad():
- predictions = []
- source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
- if not cpu:
- source = source.cuda()
- driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
- kp_source = kp_detector(source)
- kp_driving_initial = kp_detector(driving[:, :, 0])
-
- for frame_idx in tqdm(range(driving.shape[2])):
- driving_frame = driving[:, :, frame_idx]
- if not cpu:
- driving_frame = driving_frame.cuda()
- kp_driving = kp_detector(driving_frame)
- kp_norm = normalize_kp(
- kp_source=kp_source,
- kp_driving=kp_driving,
- kp_driving_initial=kp_driving_initial,
- use_relative_movement=relative,
- use_relative_jacobian=relative,
- adapt_movement_scale=adapt_movement_scale,
- )
- out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
-
- predictions.append(np.transpose(out["prediction"].data.cpu().numpy(), [0, 2, 3, 1])[0])
- return predictions
-
-
-def inference(video, image):
- # trim video to 8 seconds
- cmd = f"ffmpeg -y -ss 00:00:00 -i {video} -to 00:00:08 -c copy video_input.mp4"
- subprocess.run(cmd.split())
- video = "video_input.mp4"
-
- source_image = imageio.imread(image)
- reader = imageio.get_reader(video)
- fps = reader.get_meta_data()["fps"]
- driving_video = []
- try:
- for im in reader:
- driving_video.append(im)
- except RuntimeError:
- pass
- reader.close()
-
- source_image = resize(source_image, (256, 256))[..., :3]
- driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
-
- predictions = make_animation(
- source_image,
- driving_video,
- generator,
- kp_detector,
- relative=True,
- adapt_movement_scale=True,
- cpu=True,
- )
- imageio.mimsave("result.mp4", [img_as_ubyte(frame) for frame in predictions], fps=fps)
- imageio.mimsave("driving.mp4", [img_as_ubyte(frame) for frame in driving_video], fps=fps)
- cmd = f"ffmpeg -y -i result.mp4 -i {video} -c copy -map 0:0 -map 1:1 -shortest out.mp4"
- subprocess.run(cmd.split())
- cmd = "ffmpeg -y -i driving.mp4 -i out.mp4 -filter_complex hstack=inputs=2 final.mp4"
- subprocess.run(cmd.split())
- return "final.mp4"
-
-
-title = "First Order Motion Model"
-description = "Gradio demo for First Order Motion Model. Read more at the links below. Upload a video file (cropped to face), a facial image and have fun :D. Please note that your video will be trimmed to first 8 seconds."
-article = "First Order Motion Model for Image Animation | Github Repo
"
-examples = [["bella_porch.mp4", "julien.png"]]
-generator, kp_detector = load_checkpoints(
- config_path="config/vox-256.yaml",
- checkpoint_path="weights/vox-adv-cpk.pth.tar",
- cpu=True,
-)
-
-iface = gr.Interface(
- inference,
- [
- gr.inputs.Video(type="mp4"),
- gr.inputs.Image(type="filepath"),
- ],
- outputs=gr.outputs.Video(label="Output Video"),
- examples=examples,
- enable_queue=True,
- title=title,
- article=article,
- description=description,
-)
-iface.launch(debug=True)
diff --git a/spaces/abhishek/first-order-motion-model/modules/keypoint_detector.py b/spaces/abhishek/first-order-motion-model/modules/keypoint_detector.py
deleted file mode 100644
index 33f9f1d50f2de3997c246067d947666f4dd728c5..0000000000000000000000000000000000000000
--- a/spaces/abhishek/first-order-motion-model/modules/keypoint_detector.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from torch import nn
-import torch
-import torch.nn.functional as F
-from modules.util import Hourglass, make_coordinate_grid, AntiAliasInterpolation2d
-
-
-class KPDetector(nn.Module):
- """
- Detecting a keypoints. Return keypoint position and jacobian near each keypoint.
- """
-
- def __init__(self, block_expansion, num_kp, num_channels, max_features,
- num_blocks, temperature, estimate_jacobian=False, scale_factor=1,
- single_jacobian_map=False, pad=0):
- super(KPDetector, self).__init__()
-
- self.predictor = Hourglass(block_expansion, in_features=num_channels,
- max_features=max_features, num_blocks=num_blocks)
-
- self.kp = nn.Conv2d(in_channels=self.predictor.out_filters, out_channels=num_kp, kernel_size=(7, 7),
- padding=pad)
-
- if estimate_jacobian:
- self.num_jacobian_maps = 1 if single_jacobian_map else num_kp
- self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters,
- out_channels=4 * self.num_jacobian_maps, kernel_size=(7, 7), padding=pad)
- self.jacobian.weight.data.zero_()
- self.jacobian.bias.data.copy_(torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float))
- else:
- self.jacobian = None
-
- self.temperature = temperature
- self.scale_factor = scale_factor
- if self.scale_factor != 1:
- self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
-
- def gaussian2kp(self, heatmap):
- """
- Extract the mean and from a heatmap
- """
- shape = heatmap.shape
- heatmap = heatmap.unsqueeze(-1)
- grid = make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)
- value = (heatmap * grid).sum(dim=(2, 3))
- kp = {'value': value}
-
- return kp
-
- def forward(self, x):
- if self.scale_factor != 1:
- x = self.down(x)
-
- feature_map = self.predictor(x)
- prediction = self.kp(feature_map)
-
- final_shape = prediction.shape
- heatmap = prediction.view(final_shape[0], final_shape[1], -1)
- heatmap = F.softmax(heatmap / self.temperature, dim=2)
- heatmap = heatmap.view(*final_shape)
-
- out = self.gaussian2kp(heatmap)
-
- if self.jacobian is not None:
- jacobian_map = self.jacobian(feature_map)
- jacobian_map = jacobian_map.reshape(final_shape[0], self.num_jacobian_maps, 4, final_shape[2],
- final_shape[3])
- heatmap = heatmap.unsqueeze(2)
-
- jacobian = heatmap * jacobian_map
- jacobian = jacobian.view(final_shape[0], final_shape[1], 4, -1)
- jacobian = jacobian.sum(dim=-1)
- jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 2, 2)
- out['jacobian'] = jacobian
-
- return out
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
deleted file mode 100644
index 988d9adf2f289ef223bd1c680a5ae1d3387f0269..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..utils import kaiming_init
-from .registry import PLUGIN_LAYERS
-
-
-@PLUGIN_LAYERS.register_module()
-class GeneralizedAttention(nn.Module):
- """GeneralizedAttention module.
-
- See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
- (https://arxiv.org/abs/1711.07971) for details.
-
- Args:
- in_channels (int): Channels of the input feature map.
- spatial_range (int): The spatial range. -1 indicates no spatial range
- constraint. Default: -1.
- num_heads (int): The head number of empirical_attention module.
- Default: 9.
- position_embedding_dim (int): The position embedding dimension.
- Default: -1.
- position_magnitude (int): A multiplier acting on coord difference.
- Default: 1.
- kv_stride (int): The feature stride acting on key/value feature map.
- Default: 2.
- q_stride (int): The feature stride acting on query feature map.
- Default: 1.
- attention_type (str): A binary indicator string for indicating which
- items in generalized empirical_attention module are used.
- Default: '1111'.
-
- - '1000' indicates 'query and key content' (appr - appr) item,
- - '0100' indicates 'query content and relative position'
- (appr - position) item,
- - '0010' indicates 'key content only' (bias - appr) item,
- - '0001' indicates 'relative position only' (bias - position) item.
- """
-
- _abbr_ = 'gen_attention_block'
-
- def __init__(self,
- in_channels,
- spatial_range=-1,
- num_heads=9,
- position_embedding_dim=-1,
- position_magnitude=1,
- kv_stride=2,
- q_stride=1,
- attention_type='1111'):
-
- super(GeneralizedAttention, self).__init__()
-
- # hard range means local range for non-local operation
- self.position_embedding_dim = (
- position_embedding_dim
- if position_embedding_dim > 0 else in_channels)
-
- self.position_magnitude = position_magnitude
- self.num_heads = num_heads
- self.in_channels = in_channels
- self.spatial_range = spatial_range
- self.kv_stride = kv_stride
- self.q_stride = q_stride
- self.attention_type = [bool(int(_)) for _ in attention_type]
- self.qk_embed_dim = in_channels // num_heads
- out_c = self.qk_embed_dim * num_heads
-
- if self.attention_type[0] or self.attention_type[1]:
- self.query_conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_c,
- kernel_size=1,
- bias=False)
- self.query_conv.kaiming_init = True
-
- if self.attention_type[0] or self.attention_type[2]:
- self.key_conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_c,
- kernel_size=1,
- bias=False)
- self.key_conv.kaiming_init = True
-
- self.v_dim = in_channels // num_heads
- self.value_conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=self.v_dim * num_heads,
- kernel_size=1,
- bias=False)
- self.value_conv.kaiming_init = True
-
- if self.attention_type[1] or self.attention_type[3]:
- self.appr_geom_fc_x = nn.Linear(
- self.position_embedding_dim // 2, out_c, bias=False)
- self.appr_geom_fc_x.kaiming_init = True
-
- self.appr_geom_fc_y = nn.Linear(
- self.position_embedding_dim // 2, out_c, bias=False)
- self.appr_geom_fc_y.kaiming_init = True
-
- if self.attention_type[2]:
- stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
- appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
- self.appr_bias = nn.Parameter(appr_bias_value)
-
- if self.attention_type[3]:
- stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
- geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
- self.geom_bias = nn.Parameter(geom_bias_value)
-
- self.proj_conv = nn.Conv2d(
- in_channels=self.v_dim * num_heads,
- out_channels=in_channels,
- kernel_size=1,
- bias=True)
- self.proj_conv.kaiming_init = True
- self.gamma = nn.Parameter(torch.zeros(1))
-
- if self.spatial_range >= 0:
- # only works when non local is after 3*3 conv
- if in_channels == 256:
- max_len = 84
- elif in_channels == 512:
- max_len = 42
-
- max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
- local_constraint_map = np.ones(
- (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
- for iy in range(max_len):
- for ix in range(max_len):
- local_constraint_map[
- iy, ix,
- max((iy - self.spatial_range) //
- self.kv_stride, 0):min((iy + self.spatial_range +
- 1) // self.kv_stride +
- 1, max_len),
- max((ix - self.spatial_range) //
- self.kv_stride, 0):min((ix + self.spatial_range +
- 1) // self.kv_stride +
- 1, max_len)] = 0
-
- self.local_constraint_map = nn.Parameter(
- torch.from_numpy(local_constraint_map).byte(),
- requires_grad=False)
-
- if self.q_stride > 1:
- self.q_downsample = nn.AvgPool2d(
- kernel_size=1, stride=self.q_stride)
- else:
- self.q_downsample = None
-
- if self.kv_stride > 1:
- self.kv_downsample = nn.AvgPool2d(
- kernel_size=1, stride=self.kv_stride)
- else:
- self.kv_downsample = None
-
- self.init_weights()
-
- def get_position_embedding(self,
- h,
- w,
- h_kv,
- w_kv,
- q_stride,
- kv_stride,
- device,
- dtype,
- feat_dim,
- wave_length=1000):
- # the default type of Tensor is float32, leading to type mismatch
- # in fp16 mode. Cast it to support fp16 mode.
- h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype)
- h_idxs = h_idxs.view((h, 1)) * q_stride
-
- w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype)
- w_idxs = w_idxs.view((w, 1)) * q_stride
-
- h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to(
- device=device, dtype=dtype)
- h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
-
- w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to(
- device=device, dtype=dtype)
- w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
-
- # (h, h_kv, 1)
- h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
- h_diff *= self.position_magnitude
-
- # (w, w_kv, 1)
- w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
- w_diff *= self.position_magnitude
-
- feat_range = torch.arange(0, feat_dim / 4).to(
- device=device, dtype=dtype)
-
- dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype)
- dim_mat = dim_mat**((4. / feat_dim) * feat_range)
- dim_mat = dim_mat.view((1, 1, -1))
-
- embedding_x = torch.cat(
- ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
-
- embedding_y = torch.cat(
- ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
-
- return embedding_x, embedding_y
-
- def forward(self, x_input):
- num_heads = self.num_heads
-
- # use empirical_attention
- if self.q_downsample is not None:
- x_q = self.q_downsample(x_input)
- else:
- x_q = x_input
- n, _, h, w = x_q.shape
-
- if self.kv_downsample is not None:
- x_kv = self.kv_downsample(x_input)
- else:
- x_kv = x_input
- _, _, h_kv, w_kv = x_kv.shape
-
- if self.attention_type[0] or self.attention_type[1]:
- proj_query = self.query_conv(x_q).view(
- (n, num_heads, self.qk_embed_dim, h * w))
- proj_query = proj_query.permute(0, 1, 3, 2)
-
- if self.attention_type[0] or self.attention_type[2]:
- proj_key = self.key_conv(x_kv).view(
- (n, num_heads, self.qk_embed_dim, h_kv * w_kv))
-
- if self.attention_type[1] or self.attention_type[3]:
- position_embed_x, position_embed_y = self.get_position_embedding(
- h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
- x_input.device, x_input.dtype, self.position_embedding_dim)
- # (n, num_heads, w, w_kv, dim)
- position_feat_x = self.appr_geom_fc_x(position_embed_x).\
- view(1, w, w_kv, num_heads, self.qk_embed_dim).\
- permute(0, 3, 1, 2, 4).\
- repeat(n, 1, 1, 1, 1)
-
- # (n, num_heads, h, h_kv, dim)
- position_feat_y = self.appr_geom_fc_y(position_embed_y).\
- view(1, h, h_kv, num_heads, self.qk_embed_dim).\
- permute(0, 3, 1, 2, 4).\
- repeat(n, 1, 1, 1, 1)
-
- position_feat_x /= math.sqrt(2)
- position_feat_y /= math.sqrt(2)
-
- # accelerate for saliency only
- if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
- appr_bias = self.appr_bias.\
- view(1, num_heads, 1, self.qk_embed_dim).\
- repeat(n, 1, 1, 1)
-
- energy = torch.matmul(appr_bias, proj_key).\
- view(n, num_heads, 1, h_kv * w_kv)
-
- h = 1
- w = 1
- else:
- # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
- if not self.attention_type[0]:
- energy = torch.zeros(
- n,
- num_heads,
- h,
- w,
- h_kv,
- w_kv,
- dtype=x_input.dtype,
- device=x_input.device)
-
- # attention_type[0]: appr - appr
- # attention_type[1]: appr - position
- # attention_type[2]: bias - appr
- # attention_type[3]: bias - position
- if self.attention_type[0] or self.attention_type[2]:
- if self.attention_type[0] and self.attention_type[2]:
- appr_bias = self.appr_bias.\
- view(1, num_heads, 1, self.qk_embed_dim)
- energy = torch.matmul(proj_query + appr_bias, proj_key).\
- view(n, num_heads, h, w, h_kv, w_kv)
-
- elif self.attention_type[0]:
- energy = torch.matmul(proj_query, proj_key).\
- view(n, num_heads, h, w, h_kv, w_kv)
-
- elif self.attention_type[2]:
- appr_bias = self.appr_bias.\
- view(1, num_heads, 1, self.qk_embed_dim).\
- repeat(n, 1, 1, 1)
-
- energy += torch.matmul(appr_bias, proj_key).\
- view(n, num_heads, 1, 1, h_kv, w_kv)
-
- if self.attention_type[1] or self.attention_type[3]:
- if self.attention_type[1] and self.attention_type[3]:
- geom_bias = self.geom_bias.\
- view(1, num_heads, 1, self.qk_embed_dim)
-
- proj_query_reshape = (proj_query + geom_bias).\
- view(n, num_heads, h, w, self.qk_embed_dim)
-
- energy_x = torch.matmul(
- proj_query_reshape.permute(0, 1, 3, 2, 4),
- position_feat_x.permute(0, 1, 2, 4, 3))
- energy_x = energy_x.\
- permute(0, 1, 3, 2, 4).unsqueeze(4)
-
- energy_y = torch.matmul(
- proj_query_reshape,
- position_feat_y.permute(0, 1, 2, 4, 3))
- energy_y = energy_y.unsqueeze(5)
-
- energy += energy_x + energy_y
-
- elif self.attention_type[1]:
- proj_query_reshape = proj_query.\
- view(n, num_heads, h, w, self.qk_embed_dim)
- proj_query_reshape = proj_query_reshape.\
- permute(0, 1, 3, 2, 4)
- position_feat_x_reshape = position_feat_x.\
- permute(0, 1, 2, 4, 3)
- position_feat_y_reshape = position_feat_y.\
- permute(0, 1, 2, 4, 3)
-
- energy_x = torch.matmul(proj_query_reshape,
- position_feat_x_reshape)
- energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
-
- energy_y = torch.matmul(proj_query_reshape,
- position_feat_y_reshape)
- energy_y = energy_y.unsqueeze(5)
-
- energy += energy_x + energy_y
-
- elif self.attention_type[3]:
- geom_bias = self.geom_bias.\
- view(1, num_heads, self.qk_embed_dim, 1).\
- repeat(n, 1, 1, 1)
-
- position_feat_x_reshape = position_feat_x.\
- view(n, num_heads, w*w_kv, self.qk_embed_dim)
-
- position_feat_y_reshape = position_feat_y.\
- view(n, num_heads, h * h_kv, self.qk_embed_dim)
-
- energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
- energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
-
- energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
- energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
-
- energy += energy_x + energy_y
-
- energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
-
- if self.spatial_range >= 0:
- cur_local_constraint_map = \
- self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
- contiguous().\
- view(1, 1, h*w, h_kv*w_kv)
-
- energy = energy.masked_fill_(cur_local_constraint_map,
- float('-inf'))
-
- attention = F.softmax(energy, 3)
-
- proj_value = self.value_conv(x_kv)
- proj_value_reshape = proj_value.\
- view((n, num_heads, self.v_dim, h_kv * w_kv)).\
- permute(0, 1, 3, 2)
-
- out = torch.matmul(attention, proj_value_reshape).\
- permute(0, 1, 3, 2).\
- contiguous().\
- view(n, self.v_dim * self.num_heads, h, w)
-
- out = self.proj_conv(out)
-
- # output is downsampled, upsample back to input size
- if self.q_downsample is not None:
- out = F.interpolate(
- out,
- size=x_input.shape[2:],
- mode='bilinear',
- align_corners=False)
-
- out = self.gamma * out + x_input
- return out
-
- def init_weights(self):
- for m in self.modules():
- if hasattr(m, 'kaiming_init') and m.kaiming_init:
- kaiming_init(
- m,
- mode='fan_in',
- nonlinearity='leaky_relu',
- bias=0,
- distribution='uniform',
- a=1)
diff --git a/spaces/agunes/ChatGPT4/README.md b/spaces/agunes/ChatGPT4/README.md
deleted file mode 100644
index 7938de14e5355209aaae713f289ca469181bbb17..0000000000000000000000000000000000000000
--- a/spaces/agunes/ChatGPT4/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Chat-with-GPT4
-emoji: 🚀
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: ysharma/ChatGPT4
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ahmedghani/Editing-Tools/image_variations.py b/spaces/ahmedghani/Editing-Tools/image_variations.py
deleted file mode 100644
index 89e41a9266fcadcaf78c221d084a3e7649c3f9f5..0000000000000000000000000000000000000000
--- a/spaces/ahmedghani/Editing-Tools/image_variations.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os
-import cv2
-from PIL import Image
-import numpy as np
-from diffusers import AutoencoderKL
-from diffusers import UniPCMultistepScheduler
-from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
-import torch
-from transformers import BlipProcessor, BlipForConditionalGeneration
-
-device = "cuda:0" if torch.cuda.is_available() else "cpu"
-
-# Blip for Image Captioning
-processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
-model = BlipForConditionalGeneration.from_pretrained(
- "Salesforce/blip-image-captioning-base",
- torch_dtype=torch.float16).to(device)
-
-# ControlNet for Image Variation Generation based on Canny Edge Detection
-pipe = StableDiffusionControlNetPipeline.from_pretrained(
- "stabilityai/stable-diffusion-2-1-base",
- controlnet=ControlNetModel.from_pretrained(
- "thibaud/controlnet-sd21-canny-diffusers",
- torch_dtype=torch.float16),
- torch_dtype=torch.float16,
- revision="fp16",
- vae=AutoencoderKL.from_pretrained(
- "stabilityai/sd-vae-ft-mse",
- torch_dtype=torch.float16
- ).to(device)
-).to(device)
-
-pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
-pipe.enable_xformers_memory_efficient_attention()
-
-def pre_process_image(image):
- image = np.array(image)
- low_threshold = 100
- high_threshold = 200
- image = cv2.Canny(image, low_threshold, high_threshold)
- image = image[:, :, None]
- image = np.concatenate([image, image, image], axis=2)
- return Image.fromarray(image)
-
-def image_variations(image, input_prompt):
- canny_image = pre_process_image(image)
- if input_prompt:
- prompt = input_prompt
- else:
- inputs = processor(image, return_tensors="pt").to(device, torch.float16)
- out = model.generate(**inputs)
- prompt = processor.decode(out[0], skip_special_tokens=True)
- print(f"Blip Captioning: {prompt}")
-
- output_images = pipe(
- [prompt]*4,
- canny_image,
- negative_prompt=["distorted, noisy, lowres, bad anatomy, worst quality, low quality, bad eyes, rough face, unclear face"] * 4,
- num_inference_steps=25,
- ).images
-
- return output_images, canny_image
diff --git a/spaces/aidealab/interior-ai/models.py b/spaces/aidealab/interior-ai/models.py
deleted file mode 100644
index 9a241544e7dab9a1f6e7f3a1ecf464a59490e82e..0000000000000000000000000000000000000000
--- a/spaces/aidealab/interior-ai/models.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""This file contains methods for inference and image generation."""
-import logging
-from typing import List, Tuple, Dict
-
-import streamlit as st
-import torch
-import gc
-import time
-import numpy as np
-from PIL import Image
-from PIL import ImageFilter
-
-from diffusers import ControlNetModel, UniPCMultistepScheduler
-
-from config import WIDTH, HEIGHT
-from palette import ade_palette
-from stable_diffusion_controlnet_inpaint_img2img import (
- StableDiffusionControlNetInpaintImg2ImgPipeline,
-)
-from helpers import flush, postprocess_image_masking, convolution
-from pipelines import (
- ControlNetPipeline,
- SDPipeline,
- get_inpainting_pipeline,
- get_controlnet,
-)
-
-LOGGING = logging.getLogger(__name__)
-
-
-@torch.inference_mode()
-def make_image_controlnet(
- image: np.ndarray,
- mask_image: np.ndarray,
- controlnet_conditioning_image: np.ndarray,
- positive_prompt: str,
- negative_prompt: str,
- seed: int = 2356132,
-) -> List[Image.Image]:
- """Method to make image using controlnet
- Args:
- image (np.ndarray): input image
- mask_image (np.ndarray): mask image
- controlnet_conditioning_image (np.ndarray): conditioning image
- positive_prompt (str): positive prompt string
- negative_prompt (str): negative prompt string
- seed (int, optional): seed. Defaults to 2356132.
- Returns:
- List[Image.Image]: list of generated images
- """
-
- pipe = get_controlnet()
- flush()
-
- image = Image.fromarray(image).convert("RGB")
- controlnet_conditioning_image = Image.fromarray(
- controlnet_conditioning_image
- ).convert(
- "RGB"
- ) # .filter(ImageFilter.GaussianBlur(radius = 9))
- mask_image = Image.fromarray((mask_image * 255).astype(np.uint8)).convert("RGB")
- mask_image_postproc = convolution(mask_image)
-
- # st.success(f"{pipe.queue_size} images in the queue, can take up to {(pipe.queue_size+1) * 10} seconds")
- generated_image = pipe(
- prompt=positive_prompt,
- negative_prompt=negative_prompt,
- num_inference_steps=50,
- strength=1.00,
- guidance_scale=7.0,
- generator=torch.Generator(device="cuda").manual_seed(seed),
- image=image,
- mask_image=mask_image,
- controlnet_conditioning_image=controlnet_conditioning_image
- ).images[0]
- generated_image = postprocess_image_masking(generated_image, image, mask_image_postproc)
-
- return generated_image
-
-
-@torch.inference_mode()
-def make_inpainting(
- positive_prompt: str,
- image: Image,
- mask_image: np.ndarray,
- negative_prompt: str = "",
-) -> List[Image.Image]:
- """Method to make inpainting
- Args:
- positive_prompt (str): positive prompt string
- image (Image): input image
- mask_image (np.ndarray): mask image
- negative_prompt (str, optional): negative prompt string. Defaults to "".
- Returns:
- List[Image.Image]: list of generated images
- """
- pipe = get_inpainting_pipeline()
- mask_image = Image.fromarray((mask_image * 255).astype(np.uint8))
- mask_image_postproc = convolution(mask_image)
-
- flush()
- st.success(
- f"{pipe.queue_size} images in the queue, can take up to {(pipe.queue_size+1) * 10} seconds"
- )
- generated_image = pipe(
- image=image,
- mask_image=mask_image,
- prompt=positive_prompt,
- negative_prompt=negative_prompt,
- num_inference_steps=50,
- height=HEIGHT,
- width=WIDTH,
- ).images[0]
- generated_image = postprocess_image_masking(
- generated_image, image, mask_image_postproc
- )
-
- return generated_image
diff --git a/spaces/akhaliq/JoJoGAN/e4e/models/stylegan2/model.py b/spaces/akhaliq/JoJoGAN/e4e/models/stylegan2/model.py
deleted file mode 100644
index fcb12af85669ab6fd7f79cb14ddbdf80b2fbd83d..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/JoJoGAN/e4e/models/stylegan2/model.py
+++ /dev/null
@@ -1,678 +0,0 @@
-import math
-import random
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-if torch.cuda.is_available():
- from op.fused_act import FusedLeakyReLU, fused_leaky_relu
- from op.upfirdn2d import upfirdn2d
-else:
- from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu
- from op.upfirdn2d_cpu import upfirdn2d
-
-
-class PixelNorm(nn.Module):
- def __init__(self):
- super().__init__()
-
- def forward(self, input):
- return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
-
-
-def make_kernel(k):
- k = torch.tensor(k, dtype=torch.float32)
-
- if k.ndim == 1:
- k = k[None, :] * k[:, None]
-
- k /= k.sum()
-
- return k
-
-
-class Upsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel) * (factor ** 2)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
-
- return out
-
-
-class Downsample(nn.Module):
- def __init__(self, kernel, factor=2):
- super().__init__()
-
- self.factor = factor
- kernel = make_kernel(kernel)
- self.register_buffer('kernel', kernel)
-
- p = kernel.shape[0] - factor
-
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.pad = (pad0, pad1)
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
-
- return out
-
-
-class Blur(nn.Module):
- def __init__(self, kernel, pad, upsample_factor=1):
- super().__init__()
-
- kernel = make_kernel(kernel)
-
- if upsample_factor > 1:
- kernel = kernel * (upsample_factor ** 2)
-
- self.register_buffer('kernel', kernel)
-
- self.pad = pad
-
- def forward(self, input):
- out = upfirdn2d(input, self.kernel, pad=self.pad)
-
- return out
-
-
-class EqualConv2d(nn.Module):
- def __init__(
- self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
- ):
- super().__init__()
-
- self.weight = nn.Parameter(
- torch.randn(out_channel, in_channel, kernel_size, kernel_size)
- )
- self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
-
- self.stride = stride
- self.padding = padding
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_channel))
-
- else:
- self.bias = None
-
- def forward(self, input):
- out = F.conv2d(
- input,
- self.weight * self.scale,
- bias=self.bias,
- stride=self.stride,
- padding=self.padding,
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
- f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
- )
-
-
-class EqualLinear(nn.Module):
- def __init__(
- self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
- ):
- super().__init__()
-
- self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
-
- if bias:
- self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
-
- else:
- self.bias = None
-
- self.activation = activation
-
- self.scale = (1 / math.sqrt(in_dim)) * lr_mul
- self.lr_mul = lr_mul
-
- def forward(self, input):
- if self.activation:
- out = F.linear(input, self.weight * self.scale)
- out = fused_leaky_relu(out, self.bias * self.lr_mul)
-
- else:
- out = F.linear(
- input, self.weight * self.scale, bias=self.bias * self.lr_mul
- )
-
- return out
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
- )
-
-
-class ScaledLeakyReLU(nn.Module):
- def __init__(self, negative_slope=0.2):
- super().__init__()
-
- self.negative_slope = negative_slope
-
- def forward(self, input):
- out = F.leaky_relu(input, negative_slope=self.negative_slope)
-
- return out * math.sqrt(2)
-
-
-class ModulatedConv2d(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- demodulate=True,
- upsample=False,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- ):
- super().__init__()
-
- self.eps = 1e-8
- self.kernel_size = kernel_size
- self.in_channel = in_channel
- self.out_channel = out_channel
- self.upsample = upsample
- self.downsample = downsample
-
- if upsample:
- factor = 2
- p = (len(blur_kernel) - factor) - (kernel_size - 1)
- pad0 = (p + 1) // 2 + factor - 1
- pad1 = p // 2 + 1
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- self.blur = Blur(blur_kernel, pad=(pad0, pad1))
-
- fan_in = in_channel * kernel_size ** 2
- self.scale = 1 / math.sqrt(fan_in)
- self.padding = kernel_size // 2
-
- self.weight = nn.Parameter(
- torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
- )
-
- self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
-
- self.demodulate = demodulate
-
- def __repr__(self):
- return (
- f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
- f'upsample={self.upsample}, downsample={self.downsample})'
- )
-
- def forward(self, input, style):
- batch, in_channel, height, width = input.shape
-
- style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
- weight = self.scale * self.weight * style
-
- if self.demodulate:
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
- weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
-
- weight = weight.view(
- batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
-
- if self.upsample:
- input = input.view(1, batch * in_channel, height, width)
- weight = weight.view(
- batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
- )
- weight = weight.transpose(1, 2).reshape(
- batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
- )
- out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
- out = self.blur(out)
-
- elif self.downsample:
- input = self.blur(input)
- _, _, height, width = input.shape
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- else:
- input = input.view(1, batch * in_channel, height, width)
- out = F.conv2d(input, weight, padding=self.padding, groups=batch)
- _, _, height, width = out.shape
- out = out.view(batch, self.out_channel, height, width)
-
- return out
-
-
-class NoiseInjection(nn.Module):
- def __init__(self):
- super().__init__()
-
- self.weight = nn.Parameter(torch.zeros(1))
-
- def forward(self, image, noise=None):
- if noise is None:
- batch, _, height, width = image.shape
- noise = image.new_empty(batch, 1, height, width).normal_()
-
- return image + self.weight * noise
-
-
-class ConstantInput(nn.Module):
- def __init__(self, channel, size=4):
- super().__init__()
-
- self.input = nn.Parameter(torch.randn(1, channel, size, size))
-
- def forward(self, input):
- batch = input.shape[0]
- out = self.input.repeat(batch, 1, 1, 1)
-
- return out
-
-
-class StyledConv(nn.Module):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=False,
- blur_kernel=[1, 3, 3, 1],
- demodulate=True,
- ):
- super().__init__()
-
- self.conv = ModulatedConv2d(
- in_channel,
- out_channel,
- kernel_size,
- style_dim,
- upsample=upsample,
- blur_kernel=blur_kernel,
- demodulate=demodulate,
- )
-
- self.noise = NoiseInjection()
- # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
- # self.activate = ScaledLeakyReLU(0.2)
- self.activate = FusedLeakyReLU(out_channel)
-
- def forward(self, input, style, noise=None):
- out = self.conv(input, style)
- out = self.noise(out, noise=noise)
- # out = out + self.bias
- out = self.activate(out)
-
- return out
-
-
-class ToRGB(nn.Module):
- def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- if upsample:
- self.upsample = Upsample(blur_kernel)
-
- self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
-
- def forward(self, input, style, skip=None):
- out = self.conv(input, style)
- out = out + self.bias
-
- if skip is not None:
- skip = self.upsample(skip)
-
- out = out + skip
-
- return out
-
-
-class Generator(nn.Module):
- def __init__(
- self,
- size,
- style_dim,
- n_mlp,
- channel_multiplier=2,
- blur_kernel=[1, 3, 3, 1],
- lr_mlp=0.01,
- ):
- super().__init__()
-
- self.size = size
-
- self.style_dim = style_dim
-
- layers = [PixelNorm()]
-
- for i in range(n_mlp):
- layers.append(
- EqualLinear(
- style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
- )
- )
-
- self.style = nn.Sequential(*layers)
-
- self.channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- self.input = ConstantInput(self.channels[4])
- self.conv1 = StyledConv(
- self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
- )
- self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
-
- self.log_size = int(math.log(size, 2))
- self.num_layers = (self.log_size - 2) * 2 + 1
-
- self.convs = nn.ModuleList()
- self.upsamples = nn.ModuleList()
- self.to_rgbs = nn.ModuleList()
- self.noises = nn.Module()
-
- in_channel = self.channels[4]
-
- for layer_idx in range(self.num_layers):
- res = (layer_idx + 5) // 2
- shape = [1, 1, 2 ** res, 2 ** res]
- self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
-
- for i in range(3, self.log_size + 1):
- out_channel = self.channels[2 ** i]
-
- self.convs.append(
- StyledConv(
- in_channel,
- out_channel,
- 3,
- style_dim,
- upsample=True,
- blur_kernel=blur_kernel,
- )
- )
-
- self.convs.append(
- StyledConv(
- out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
- )
- )
-
- self.to_rgbs.append(ToRGB(out_channel, style_dim))
-
- in_channel = out_channel
-
- self.n_latent = self.log_size * 2 - 2
-
- def make_noise(self):
- device = self.input.input.device
-
- noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
-
- for i in range(3, self.log_size + 1):
- for _ in range(2):
- noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
-
- return noises
-
- def mean_latent(self, n_latent):
- latent_in = torch.randn(
- n_latent, self.style_dim, device=self.input.input.device
- )
- latent = self.style(latent_in).mean(0, keepdim=True)
-
- return latent
-
- def get_latent(self, input):
- return self.style(input)
-
- def forward(
- self,
- styles,
- return_latents=False,
- return_features=False,
- inject_index=None,
- truncation=1,
- truncation_latent=None,
- input_is_latent=False,
- noise=None,
- randomize_noise=True,
- ):
- if not input_is_latent:
- styles = [self.style(s) for s in styles]
-
- if noise is None:
- if randomize_noise:
- noise = [None] * self.num_layers
- else:
- noise = [
- getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
- ]
-
- if truncation < 1:
- style_t = []
-
- for style in styles:
- style_t.append(
- truncation_latent + truncation * (style - truncation_latent)
- )
-
- styles = style_t
-
- if len(styles) < 2:
- inject_index = self.n_latent
-
- if styles[0].ndim < 3:
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else:
- latent = styles[0]
-
- else:
- if inject_index is None:
- inject_index = random.randint(1, self.n_latent - 1)
-
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
-
- latent = torch.cat([latent, latent2], 1)
-
- out = self.input(latent)
- out = self.conv1(out, latent[:, 0], noise=noise[0])
-
- skip = self.to_rgb1(out, latent[:, 1])
-
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(
- self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
- ):
- out = conv1(out, latent[:, i], noise=noise1)
- out = conv2(out, latent[:, i + 1], noise=noise2)
- skip = to_rgb(out, latent[:, i + 2], skip)
-
- i += 2
-
- image = skip
-
- if return_latents:
- return image, latent
- elif return_features:
- return image, out
- else:
- return image, None
-
-
-class ConvLayer(nn.Sequential):
- def __init__(
- self,
- in_channel,
- out_channel,
- kernel_size,
- downsample=False,
- blur_kernel=[1, 3, 3, 1],
- bias=True,
- activate=True,
- ):
- layers = []
-
- if downsample:
- factor = 2
- p = (len(blur_kernel) - factor) + (kernel_size - 1)
- pad0 = (p + 1) // 2
- pad1 = p // 2
-
- layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
-
- stride = 2
- self.padding = 0
-
- else:
- stride = 1
- self.padding = kernel_size // 2
-
- layers.append(
- EqualConv2d(
- in_channel,
- out_channel,
- kernel_size,
- padding=self.padding,
- stride=stride,
- bias=bias and not activate,
- )
- )
-
- if activate:
- if bias:
- layers.append(FusedLeakyReLU(out_channel))
-
- else:
- layers.append(ScaledLeakyReLU(0.2))
-
- super().__init__(*layers)
-
-
-class ResBlock(nn.Module):
- def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- self.conv1 = ConvLayer(in_channel, in_channel, 3)
- self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
-
- self.skip = ConvLayer(
- in_channel, out_channel, 1, downsample=True, activate=False, bias=False
- )
-
- def forward(self, input):
- out = self.conv1(input)
- out = self.conv2(out)
-
- skip = self.skip(input)
- out = (out + skip) / math.sqrt(2)
-
- return out
-
-
-class Discriminator(nn.Module):
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
- super().__init__()
-
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- convs = [ConvLayer(3, channels[size], 1)]
-
- log_size = int(math.log(size, 2))
-
- in_channel = channels[size]
-
- for i in range(log_size, 2, -1):
- out_channel = channels[2 ** (i - 1)]
-
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
-
- in_channel = out_channel
-
- self.convs = nn.Sequential(*convs)
-
- self.stddev_group = 4
- self.stddev_feat = 1
-
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
- self.final_linear = nn.Sequential(
- EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
- EqualLinear(channels[4], 1),
- )
-
- def forward(self, input):
- out = self.convs(input)
-
- batch, channel, height, width = out.shape
- group = min(batch, self.stddev_group)
- stddev = out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- out = torch.cat([out, stddev], 1)
-
- out = self.final_conv(out)
-
- out = out.view(batch, -1)
- out = self.final_linear(out)
-
- return out
diff --git a/spaces/akuysal/demo-app-streamlit/README.md b/spaces/akuysal/demo-app-streamlit/README.md
deleted file mode 100644
index 590ddbf6cb09f089f517d9a916dd1b891845c5f5..0000000000000000000000000000000000000000
--- a/spaces/akuysal/demo-app-streamlit/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Streamlit Demo App Sentiment English
-emoji: 🚀
-colorFrom: red
-colorTo: red
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/alamin655/websurfx/public/templates/general_tab.html b/spaces/alamin655/websurfx/public/templates/general_tab.html
deleted file mode 100644
index b83935a061f47dd160380172ee749183247d156d..0000000000000000000000000000000000000000
--- a/spaces/alamin655/websurfx/public/templates/general_tab.html
+++ /dev/null
@@ -1,4 +0,0 @@
-
- General
- Coming soon!!
-
diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/RegExp.pm b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/RegExp.pm
deleted file mode 100644
index f121fc46172d4a0178dc8cc0dde67e1866b6ff6b..0000000000000000000000000000000000000000
--- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/RegExp.pm
+++ /dev/null
@@ -1,82 +0,0 @@
-package XML::RegExp;
-
-use vars qw( $BaseChar $Ideographic $Letter $Digit $Extender
- $CombiningChar $NameChar
- $EntityRef $CharRef $Reference
- $Name $NmToken $AttValue
- $NCNameChar $NCName $Prefix $LocalPart $QName
- $VERSION );
-
-$VERSION = '0.04';
-
-$BaseChar = '(?:[a-zA-Z]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF]|\xC4[\x80-\xB1\xB4-\xBE]|\xC5[\x81-\x88\x8A-\xBE]|\xC6[\x80-\xBF]|\xC7[\x80-\x83\x8D-\xB0\xB4\xB5\xBA-\xBF]|\xC8[\x80-\x97]|\xC9[\x90-\xBF]|\xCA[\x80-\xA8\xBB-\xBF]|\xCB[\x80\x81]|\xCE[\x86\x88-\x8A\x8C\x8E-\xA1\xA3-\xBF]|\xCF[\x80-\x8E\x90-\x96\x9A\x9C\x9E\xA0\xA2-\xB3]|\xD0[\x81-\x8C\x8E-\xBF]|\xD1[\x80-\x8F\x91-\x9C\x9E-\xBF]|\xD2[\x80\x81\x90-\xBF]|\xD3[\x80-\x84\x87\x88\x8B\x8C\x90-\xAB\xAE-\xB5\xB8\xB9]|\xD4[\xB1-\xBF]|\xD5[\x80-\x96\x99\xA1-\xBF]|\xD6[\x80-\x86]|\xD7[\x90-\xAA\xB0-\xB2]|\xD8[\xA1-\xBA]|\xD9[\x81-\x8A\xB1-\xBF]|\xDA[\x80-\xB7\xBA-\xBE]|\xDB[\x80-\x8E\x90-\x93\x95\xA5\xA6]|\xE0(?:\xA4[\x85-\xB9\xBD]|\xA5[\x98-\xA1]|\xA6[\x85-\x8C\x8F\x90\x93-\xA8\xAA-\xB0\xB2\xB6-\xB9]|\xA7[\x9C\x9D\x9F-\xA1\xB0\xB1]|\xA8[\x85-\x8A\x8F\x90\x93-\xA8\xAA-\xB0\xB2\xB3\xB5\xB6\xB8\xB9]|\xA9[\x99-\x9C\x9E\xB2-\xB4]|\xAA[\x85-\x8B\x8D\x8F-\x91\x93-\xA8\xAA-\xB0\xB2\xB3\xB5-\xB9\xBD]|\xAB\xA0|\xAC[\x85-\x8C\x8F\x90\x93-\xA8\xAA-\xB0\xB2\xB3\xB6-\xB9\xBD]|\xAD[\x9C\x9D\x9F-\xA1]|\xAE[\x85-\x8A\x8E-\x90\x92-\x95\x99\x9A\x9C\x9E\x9F\xA3\xA4\xA8-\xAA\xAE-\xB5\xB7-\xB9]|\xB0[\x85-\x8C\x8E-\x90\x92-\xA8\xAA-\xB3\xB5-\xB9]|\xB1[\xA0\xA1]|\xB2[\x85-\x8C\x8E-\x90\x92-\xA8\xAA-\xB3\xB5-\xB9]|\xB3[\x9E\xA0\xA1]|\xB4[\x85-\x8C\x8E-\x90\x92-\xA8\xAA-\xB9]|\xB5[\xA0\xA1]|\xB8[\x81-\xAE\xB0\xB2\xB3]|\xB9[\x80-\x85]|\xBA[\x81\x82\x84\x87\x88\x8A\x8D\x94-\x97\x99-\x9F\xA1-\xA3\xA5\xA7\xAA\xAB\xAD\xAE\xB0\xB2\xB3\xBD]|\xBB[\x80-\x84]|\xBD[\x80-\x87\x89-\xA9])|\xE1(?:\x82[\xA0-\xBF]|\x83[\x80-\x85\x90-\xB6]|\x84[\x80\x82\x83\x85-\x87\x89\x8B\x8C\x8E-\x92\xBC\xBE]|\x85[\x80\x8C\x8E\x90\x94\x95\x99\x9F-\xA1\xA3\xA5\xA7\xA9\xAD\xAE\xB2\xB3\xB5]|\x86[\x9E\xA8\xAB\xAE\xAF\xB7\xB8\xBA\xBC-\xBF]|\x87[\x80-\x82\xAB\xB0\xB9]|[\xB8\xB9][\x80-\xBF]|\xBA[\x80-\x9B\xA0-\xBF]|\xBB[\x80-\xB9]|\xBC[\x80-\x95\x98-\x9D\xA0-\xBF]|\xBD[\x80-\x85\x88-\x8D\x90-\x97\x99\x9B\x9D\x9F-\xBD]|\xBE[\x80-\xB4\xB6-\xBC\xBE]|\xBF[\x82-\x84\x86-\x8C\x90-\x93\x96-\x9B\xA0-\xAC\xB2-\xB4\xB6-\xBC])|\xE2(?:\x84[\xA6\xAA\xAB\xAE]|\x86[\x80-\x82])|\xE3(?:\x81[\x81-\xBF]|\x82[\x80-\x94\xA1-\xBF]|\x83[\x80-\xBA]|\x84[\x85-\xAC])|\xEA(?:[\xB0-\xBF][\x80-\xBF])|\xEB(?:[\x80-\xBF][\x80-\xBF])|\xEC(?:[\x80-\xBF][\x80-\xBF])|\xED(?:[\x80-\x9D][\x80-\xBF]|\x9E[\x80-\xA3]))';
-
-$Ideographic = '(?:\xE3\x80[\x87\xA1-\xA9]|\xE4(?:[\xB8-\xBF][\x80-\xBF])|\xE5(?:[\x80-\xBF][\x80-\xBF])|\xE6(?:[\x80-\xBF][\x80-\xBF])|\xE7(?:[\x80-\xBF][\x80-\xBF])|\xE8(?:[\x80-\xBF][\x80-\xBF])|\xE9(?:[\x80-\xBD][\x80-\xBF]|\xBE[\x80-\xA5]))';
-
-$Digit = '(?:[0-9]|\xD9[\xA0-\xA9]|\xDB[\xB0-\xB9]|\xE0(?:\xA5[\xA6-\xAF]|\xA7[\xA6-\xAF]|\xA9[\xA6-\xAF]|\xAB[\xA6-\xAF]|\xAD[\xA6-\xAF]|\xAF[\xA7-\xAF]|\xB1[\xA6-\xAF]|\xB3[\xA6-\xAF]|\xB5[\xA6-\xAF]|\xB9[\x90-\x99]|\xBB[\x90-\x99]|\xBC[\xA0-\xA9]))';
-
-$Extender = '(?:\xC2\xB7|\xCB[\x90\x91]|\xCE\x87|\xD9\x80|\xE0(?:\xB9\x86|\xBB\x86)|\xE3(?:\x80[\x85\xB1-\xB5]|\x82[\x9D\x9E]|\x83[\xBC-\xBE]))';
-
-$CombiningChar = '(?:\xCC[\x80-\xBF]|\xCD[\x80-\x85\xA0\xA1]|\xD2[\x83-\x86]|\xD6[\x91-\xA1\xA3-\xB9\xBB-\xBD\xBF]|\xD7[\x81\x82\x84]|\xD9[\x8B-\x92\xB0]|\xDB[\x96-\xA4\xA7\xA8\xAA-\xAD]|\xE0(?:\xA4[\x81-\x83\xBC\xBE\xBF]|\xA5[\x80-\x8D\x91-\x94\xA2\xA3]|\xA6[\x81-\x83\xBC\xBE\xBF]|\xA7[\x80-\x84\x87\x88\x8B-\x8D\x97\xA2\xA3]|\xA8[\x82\xBC\xBE\xBF]|\xA9[\x80-\x82\x87\x88\x8B-\x8D\xB0\xB1]|\xAA[\x81-\x83\xBC\xBE\xBF]|\xAB[\x80-\x85\x87-\x89\x8B-\x8D]|\xAC[\x81-\x83\xBC\xBE\xBF]|\xAD[\x80-\x83\x87\x88\x8B-\x8D\x96\x97]|\xAE[\x82\x83\xBE\xBF]|\xAF[\x80-\x82\x86-\x88\x8A-\x8D\x97]|\xB0[\x81-\x83\xBE\xBF]|\xB1[\x80-\x84\x86-\x88\x8A-\x8D\x95\x96]|\xB2[\x82\x83\xBE\xBF]|\xB3[\x80-\x84\x86-\x88\x8A-\x8D\x95\x96]|\xB4[\x82\x83\xBE\xBF]|\xB5[\x80-\x83\x86-\x88\x8A-\x8D\x97]|\xB8[\xB1\xB4-\xBA]|\xB9[\x87-\x8E]|\xBA[\xB1\xB4-\xB9\xBB\xBC]|\xBB[\x88-\x8D]|\xBC[\x98\x99\xB5\xB7\xB9\xBE\xBF]|\xBD[\xB1-\xBF]|\xBE[\x80-\x84\x86-\x8B\x90-\x95\x97\x99-\xAD\xB1-\xB7\xB9])|\xE2\x83[\x90-\x9C\xA1]|\xE3(?:\x80[\xAA-\xAF]|\x82[\x99\x9A]))';
-
-$Letter = "(?:$BaseChar|$Ideographic)";
-$NameChar = "(?:[-._:]|$Letter|$Digit|$CombiningChar|$Extender)";
-
-$Name = "(?:(?:[:_]|$Letter)$NameChar*)";
-$NmToken = "(?:$NameChar+)";
-$EntityRef = "(?:\&$Name;)";
-$CharRef = "(?:\(?:[0-9]+|x[0-9a-fA-F]+);)";
-$Reference = "(?:$EntityRef|$CharRef)";
-
-#?? what if it contains entity references?
-$AttValue = "(?:\"(?:[^\"&<]*|$Reference)\"|'(?:[^\'&<]|$Reference)*')";
-
-#########################################################################
-# The following definitions came from the XML Namespaces spec:
-#########################################################################
-
-# Same as $NameChar without the ":"
-$NCNameChar = "(?:[-._]|$Letter|$Digit|$CombiningChar|$Extender)";
-
-# Same as $Name without the colons
-$NCName = "(?:(?:_|$Letter)$NCNameChar*)";
-
-$Prefix = $NCName;
-$LocalPart = $NCName;
-$QName = "(?:(?:$Prefix:)?$LocalPart)";
-
-return 1;
-
-__END__
-
-=head1 NAME
-
-XML::RegExp - Regular expressions for XML tokens
-
-=head1 SYNOPSIS
-
- use XML::RegExp;
-
- if ($my_name =~ /^$XML::RegExp::Name$/)
- {
- # $my_name is a valid XML 'Name'
- }
-
-=head1 DESCRIPTION
-
-This package contains regular expressions for the following XML tokens:
-BaseChar, Ideographic, Letter, Digit, Extender, CombiningChar, NameChar,
-EntityRef, CharRef, Reference, Name, NmToken, and AttValue.
-
-The definitions of these tokens were taken from the XML spec
-(Extensible Markup Language 1.0) at L.
-
-Also contains the regular expressions for the following tokens from the
-XML Namespaces spec at L:
-NCNameChar, NCName, QName, Prefix and LocalPart.
-
-=head1 AUTHOR
-
-Original Author is Enno Derksen >
-
-Please send bugs, comments and suggestions to T.J. Mather >
diff --git a/spaces/aliabid94/AutoGPT/autogpt/config/config.py b/spaces/aliabid94/AutoGPT/autogpt/config/config.py
deleted file mode 100644
index 4b53df10e8d2832be7ffb321d9036aec5a47a79d..0000000000000000000000000000000000000000
--- a/spaces/aliabid94/AutoGPT/autogpt/config/config.py
+++ /dev/null
@@ -1,251 +0,0 @@
-"""Configuration class to store the state of bools for different scripts access."""
-import os
-
-import openai
-import yaml
-from colorama import Fore
-from dotenv import load_dotenv
-
-from autogpt.config.singleton import Singleton
-
-load_dotenv(verbose=True)
-
-
-class Config(metaclass=Singleton):
- """
- Configuration class to store the state of bools for different scripts access.
- """
-
- def __init__(self) -> None:
- """Initialize the Config class"""
- self.debug_mode = False
- self.continuous_mode = False
- self.continuous_limit = 0
- self.speak_mode = False
- self.skip_reprompt = False
- self.allow_downloads = False
- self.skip_news = False
-
- self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
- self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
- self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
- self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
- self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
- self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
-
- self.openai_api_key = os.getenv("OPENAI_API_KEY")
- self.temperature = float(os.getenv("TEMPERATURE", "1"))
- self.use_azure = os.getenv("USE_AZURE") == "True"
- self.execute_local_commands = (
- os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
- )
- self.restrict_to_workspace = (
- os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
- )
-
- if self.use_azure:
- self.load_azure_config()
- openai.api_type = self.openai_api_type
- openai.api_base = self.openai_api_base
- openai.api_version = self.openai_api_version
-
- self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
- self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
- self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
-
- self.use_mac_os_tts = False
- self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
-
- self.use_brian_tts = False
- self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
-
- self.github_api_key = os.getenv("GITHUB_API_KEY")
- self.github_username = os.getenv("GITHUB_USERNAME")
-
- self.google_api_key = os.getenv("GOOGLE_API_KEY")
- self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
-
- self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
- self.pinecone_region = os.getenv("PINECONE_ENV")
-
- self.weaviate_host = os.getenv("WEAVIATE_HOST")
- self.weaviate_port = os.getenv("WEAVIATE_PORT")
- self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
- self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
- self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
- self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
- self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
- self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
- self.use_weaviate_embedded = (
- os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
- )
-
- # milvus configuration, e.g., localhost:19530.
- self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
- self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
-
- self.image_provider = os.getenv("IMAGE_PROVIDER")
- self.image_size = int(os.getenv("IMAGE_SIZE", 256))
- self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
- self.huggingface_image_model = os.getenv(
- "HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
- )
- self.huggingface_audio_to_text_model = os.getenv(
- "HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
- )
- self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
- self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
-
- # Selenium browser settings
- self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
- self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
-
- # User agent header to use when making HTTP requests
- # Some websites might just completely deny request with an error code if
- # no user agent was found.
- self.user_agent = os.getenv(
- "USER_AGENT",
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
- " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
- )
-
- self.redis_host = os.getenv("REDIS_HOST", "localhost")
- self.redis_port = os.getenv("REDIS_PORT", "6379")
- self.redis_password = os.getenv("REDIS_PASSWORD", "")
- self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
- self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
- # Note that indexes must be created on db 0 in redis, this is not configurable.
-
- self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
- # Initialize the OpenAI API client
- openai.api_key = self.openai_api_key
-
- def get_azure_deployment_id_for_model(self, model: str) -> str:
- """
- Returns the relevant deployment id for the model specified.
-
- Parameters:
- model(str): The model to map to the deployment id.
-
- Returns:
- The matching deployment id if found, otherwise an empty string.
- """
- if model == self.fast_llm_model:
- return self.azure_model_to_deployment_id_map[
- "fast_llm_model_deployment_id"
- ] # type: ignore
- elif model == self.smart_llm_model:
- return self.azure_model_to_deployment_id_map[
- "smart_llm_model_deployment_id"
- ] # type: ignore
- elif model == "text-embedding-ada-002":
- return self.azure_model_to_deployment_id_map[
- "embedding_model_deployment_id"
- ] # type: ignore
- else:
- return ""
-
- AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
-
- def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
- """
- Loads the configuration parameters for Azure hosting from the specified file
- path as a yaml file.
-
- Parameters:
- config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
-
- Returns:
- None
- """
- try:
- with open(config_file) as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader)
- except FileNotFoundError:
- config_params = {}
- self.openai_api_type = config_params.get("azure_api_type") or "azure"
- self.openai_api_base = config_params.get("azure_api_base") or ""
- self.openai_api_version = (
- config_params.get("azure_api_version") or "2023-03-15-preview"
- )
- self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
-
- def set_continuous_mode(self, value: bool) -> None:
- """Set the continuous mode value."""
- self.continuous_mode = value
-
- def set_continuous_limit(self, value: int) -> None:
- """Set the continuous limit value."""
- self.continuous_limit = value
-
- def set_speak_mode(self, value: bool) -> None:
- """Set the speak mode value."""
- self.speak_mode = value
-
- def set_fast_llm_model(self, value: str) -> None:
- """Set the fast LLM model value."""
- self.fast_llm_model = value
-
- def set_smart_llm_model(self, value: str) -> None:
- """Set the smart LLM model value."""
- self.smart_llm_model = value
-
- def set_fast_token_limit(self, value: int) -> None:
- """Set the fast token limit value."""
- self.fast_token_limit = value
-
- def set_smart_token_limit(self, value: int) -> None:
- """Set the smart token limit value."""
- self.smart_token_limit = value
-
- def set_browse_chunk_max_length(self, value: int) -> None:
- """Set the browse_website command chunk max length value."""
- self.browse_chunk_max_length = value
-
- def set_openai_api_key(self, value: str) -> None:
- """Set the OpenAI API key value."""
- self.openai_api_key = value
-
- def set_elevenlabs_api_key(self, value: str) -> None:
- """Set the ElevenLabs API key value."""
- self.elevenlabs_api_key = value
-
- def set_elevenlabs_voice_1_id(self, value: str) -> None:
- """Set the ElevenLabs Voice 1 ID value."""
- self.elevenlabs_voice_1_id = value
-
- def set_elevenlabs_voice_2_id(self, value: str) -> None:
- """Set the ElevenLabs Voice 2 ID value."""
- self.elevenlabs_voice_2_id = value
-
- def set_google_api_key(self, value: str) -> None:
- """Set the Google API key value."""
- self.google_api_key = value
-
- def set_custom_search_engine_id(self, value: str) -> None:
- """Set the custom search engine id value."""
- self.custom_search_engine_id = value
-
- def set_pinecone_api_key(self, value: str) -> None:
- """Set the Pinecone API key value."""
- self.pinecone_api_key = value
-
- def set_pinecone_region(self, value: str) -> None:
- """Set the Pinecone region value."""
- self.pinecone_region = value
-
- def set_debug_mode(self, value: bool) -> None:
- """Set the debug mode value."""
- self.debug_mode = value
-
-
-def check_openai_api_key() -> None:
- """Check if the OpenAI API key is set in config.py or as an environment variable."""
- cfg = Config()
- if not cfg.openai_api_key:
- print(
- Fore.RED
- + "Please set your OpenAI API key in .env or as an environment variable."
- )
- print("You can get your key from https://platform.openai.com/account/api-keys")
- exit(1)
diff --git a/spaces/allknowingroger/Image-Models-Test114/README.md b/spaces/allknowingroger/Image-Models-Test114/README.md
deleted file mode 100644
index 2a2241f8e28017a6ba8d5c8d915362b27b22c247..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test114/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-duplicated_from: allknowingroger/Image-Models-Test113
----
-
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test132/README.md b/spaces/allknowingroger/Image-Models-Test132/README.md
deleted file mode 100644
index 300d39de521455149d01b006a31ce0748698af81..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test132/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-duplicated_from: allknowingroger/Image-Models-Test131
----
-
-
\ No newline at end of file
diff --git a/spaces/almakedon/faster-whisper-webui/src/prompts/abstractPromptStrategy.py b/spaces/almakedon/faster-whisper-webui/src/prompts/abstractPromptStrategy.py
deleted file mode 100644
index 41e8cba49fdbcc294ea216fffcafee89b07ed4df..0000000000000000000000000000000000000000
--- a/spaces/almakedon/faster-whisper-webui/src/prompts/abstractPromptStrategy.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import abc
-
-
-class AbstractPromptStrategy:
- """
- Represents a strategy for generating prompts for a given audio segment.
-
- Note that the strategy must be picklable, as it will be serialized and sent to the workers.
- """
-
- @abc.abstractmethod
- def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str:
- """
- Retrieves the prompt for a given segment.
-
- Parameters
- ----------
- segment_index: int
- The index of the segment.
- whisper_prompt: str
- The prompt for the segment generated by Whisper. This is typically concatenated with the initial prompt.
- detected_language: str
- The language detected for the segment.
- """
- pass
-
- @abc.abstractmethod
- def on_segment_finished(self, segment_index: int, whisper_prompt: str, detected_language: str, result: dict):
- """
- Called when a segment has finished processing.
-
- Parameters
- ----------
- segment_index: int
- The index of the segment.
- whisper_prompt: str
- The prompt for the segment generated by Whisper. This is typically concatenated with the initial prompt.
- detected_language: str
- The language detected for the segment.
- result: dict
- The result of the segment. It has the following format:
- {
- "text": str,
- "segments": [
- {
- "text": str,
- "start": float,
- "end": float,
- "words": [words],
- }
- ],
- "language": str,
- }
- """
- pass
-
- def _concat_prompt(self, prompt1, prompt2):
- """
- Concatenates two prompts.
-
- Parameters
- ----------
- prompt1: str
- The first prompt.
- prompt2: str
- The second prompt.
- """
- if (prompt1 is None):
- return prompt2
- elif (prompt2 is None):
- return prompt1
- else:
- return prompt1 + " " + prompt2
\ No newline at end of file
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/hostapi/wasapi/mingw-include/endpointvolume.h b/spaces/amarchheda/ChordDuplicate/portaudio/src/hostapi/wasapi/mingw-include/endpointvolume.h
deleted file mode 100644
index 81155d7a92ad4ab0597888f88c542d1544187168..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/src/hostapi/wasapi/mingw-include/endpointvolume.h
+++ /dev/null
@@ -1,620 +0,0 @@
-
-
-/* this ALWAYS GENERATED file contains the definitions for the interfaces */
-
-
- /* File created by MIDL compiler version 7.00.0499 */
-/* Compiler settings for endpointvolume.idl:
- Oicf, W1, Zp8, env=Win32 (32b run)
- protocol : dce , ms_ext, c_ext, robust
- error checks: allocation ref bounds_check enum stub_data
- VC __declspec() decoration level:
- __declspec(uuid()), __declspec(selectany), __declspec(novtable)
- DECLSPEC_UUID(), MIDL_INTERFACE()
-*/
-//@@MIDL_FILE_HEADING( )
-
-#pragma warning( disable: 4049 ) /* more than 64k source lines */
-
-
-/* verify that the version is high enough to compile this file*/
-#ifndef __REQUIRED_RPCNDR_H_VERSION__
-#define __REQUIRED_RPCNDR_H_VERSION__ 500
-#endif
-
-/* verify that the version is high enough to compile this file*/
-#ifndef __REQUIRED_RPCSAL_H_VERSION__
-#define __REQUIRED_RPCSAL_H_VERSION__ 100
-#endif
-
-#include "rpc.h"
-#include "rpcndr.h"
-
-#ifndef __RPCNDR_H_VERSION__
-#error this stub requires an updated version of
-#endif // __RPCNDR_H_VERSION__
-
-#ifndef COM_NO_WINDOWS_H
-#include "windows.h"
-#include "ole2.h"
-#endif /*COM_NO_WINDOWS_H*/
-
-#ifndef __endpointvolume_h__
-#define __endpointvolume_h__
-
-#if defined(_MSC_VER) && (_MSC_VER >= 1020)
-#pragma once
-#endif
-
-/* Forward Declarations */
-
-#ifndef __IAudioEndpointVolumeCallback_FWD_DEFINED__
-#define __IAudioEndpointVolumeCallback_FWD_DEFINED__
-typedef interface IAudioEndpointVolumeCallback IAudioEndpointVolumeCallback;
-#endif /* __IAudioEndpointVolumeCallback_FWD_DEFINED__ */
-
-
-#ifndef __IAudioEndpointVolume_FWD_DEFINED__
-#define __IAudioEndpointVolume_FWD_DEFINED__
-typedef interface IAudioEndpointVolume IAudioEndpointVolume;
-#endif /* __IAudioEndpointVolume_FWD_DEFINED__ */
-
-
-#ifndef __IAudioMeterInformation_FWD_DEFINED__
-#define __IAudioMeterInformation_FWD_DEFINED__
-typedef interface IAudioMeterInformation IAudioMeterInformation;
-#endif /* __IAudioMeterInformation_FWD_DEFINED__ */
-
-
-/* header files for imported files */
-#include "unknwn.h"
-#include "devicetopology.h"
-
-#ifdef __cplusplus
-extern "C"{
-#endif
-
-
-/* interface __MIDL_itf_endpointvolume_0000_0000 */
-/* [local] */
-
-typedef struct AUDIO_VOLUME_NOTIFICATION_DATA
- {
- GUID guidEventContext;
- BOOL bMuted;
- float fMasterVolume;
- UINT nChannels;
- float afChannelVolumes[ 1 ];
- } AUDIO_VOLUME_NOTIFICATION_DATA;
-
-typedef struct AUDIO_VOLUME_NOTIFICATION_DATA *PAUDIO_VOLUME_NOTIFICATION_DATA;
-
-#define ENDPOINT_HARDWARE_SUPPORT_VOLUME 0x00000001
-#define ENDPOINT_HARDWARE_SUPPORT_MUTE 0x00000002
-#define ENDPOINT_HARDWARE_SUPPORT_METER 0x00000004
-
-
-extern RPC_IF_HANDLE __MIDL_itf_endpointvolume_0000_0000_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_endpointvolume_0000_0000_v0_0_s_ifspec;
-
-#ifndef __IAudioEndpointVolumeCallback_INTERFACE_DEFINED__
-#define __IAudioEndpointVolumeCallback_INTERFACE_DEFINED__
-
-/* interface IAudioEndpointVolumeCallback */
-/* [unique][helpstring][nonextensible][uuid][local][object] */
-
-
-EXTERN_C const IID IID_IAudioEndpointVolumeCallback;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("657804FA-D6AD-4496-8A60-352752AF4F89")
- IAudioEndpointVolumeCallback : public IUnknown
- {
- public:
- virtual HRESULT STDMETHODCALLTYPE OnNotify(
- PAUDIO_VOLUME_NOTIFICATION_DATA pNotify) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IAudioEndpointVolumeCallbackVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IAudioEndpointVolumeCallback * This,
- /* [in] */ REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IAudioEndpointVolumeCallback * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IAudioEndpointVolumeCallback * This);
-
- HRESULT ( STDMETHODCALLTYPE *OnNotify )(
- IAudioEndpointVolumeCallback * This,
- PAUDIO_VOLUME_NOTIFICATION_DATA pNotify);
-
- END_INTERFACE
- } IAudioEndpointVolumeCallbackVtbl;
-
- interface IAudioEndpointVolumeCallback
- {
- CONST_VTBL struct IAudioEndpointVolumeCallbackVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IAudioEndpointVolumeCallback_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IAudioEndpointVolumeCallback_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IAudioEndpointVolumeCallback_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IAudioEndpointVolumeCallback_OnNotify(This,pNotify) \
- ( (This)->lpVtbl -> OnNotify(This,pNotify) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IAudioEndpointVolumeCallback_INTERFACE_DEFINED__ */
-
-
-#ifndef __IAudioEndpointVolume_INTERFACE_DEFINED__
-#define __IAudioEndpointVolume_INTERFACE_DEFINED__
-
-/* interface IAudioEndpointVolume */
-/* [unique][helpstring][nonextensible][uuid][local][object] */
-
-
-EXTERN_C const IID IID_IAudioEndpointVolume;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("5CDF2C82-841E-4546-9722-0CF74078229A")
- IAudioEndpointVolume : public IUnknown
- {
- public:
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE RegisterControlChangeNotify(
- /* [in] */
- __in IAudioEndpointVolumeCallback *pNotify) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE UnregisterControlChangeNotify(
- /* [in] */
- __in IAudioEndpointVolumeCallback *pNotify) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelCount(
- /* [out] */
- __out UINT *pnChannelCount) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetMasterVolumeLevel(
- /* [in] */
- __in float fLevelDB,
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetMasterVolumeLevelScalar(
- /* [in] */
- __in float fLevel,
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMasterVolumeLevel(
- /* [out] */
- __out float *pfLevelDB) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMasterVolumeLevelScalar(
- /* [out] */
- __out float *pfLevel) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetChannelVolumeLevel(
- /* [in] */
- __in UINT nChannel,
- float fLevelDB,
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetChannelVolumeLevelScalar(
- /* [in] */
- __in UINT nChannel,
- float fLevel,
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelVolumeLevel(
- /* [in] */
- __in UINT nChannel,
- /* [out] */
- __out float *pfLevelDB) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelVolumeLevelScalar(
- /* [in] */
- __in UINT nChannel,
- /* [out] */
- __out float *pfLevel) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetMute(
- /* [in] */
- __in BOOL bMute,
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMute(
- /* [out] */
- __out BOOL *pbMute) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetVolumeStepInfo(
- /* [out] */
- __out UINT *pnStep,
- /* [out] */
- __out UINT *pnStepCount) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE VolumeStepUp(
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE VolumeStepDown(
- /* [unique][in] */ LPCGUID pguidEventContext) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE QueryHardwareSupport(
- /* [out] */
- __out DWORD *pdwHardwareSupportMask) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetVolumeRange(
- /* [out] */
- __out float *pflVolumeMindB,
- /* [out] */
- __out float *pflVolumeMaxdB,
- /* [out] */
- __out float *pflVolumeIncrementdB) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IAudioEndpointVolumeVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IAudioEndpointVolume * This,
- /* [in] */ REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IAudioEndpointVolume * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IAudioEndpointVolume * This);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *RegisterControlChangeNotify )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in IAudioEndpointVolumeCallback *pNotify);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *UnregisterControlChangeNotify )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in IAudioEndpointVolumeCallback *pNotify);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelCount )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out UINT *pnChannelCount);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetMasterVolumeLevel )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in float fLevelDB,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetMasterVolumeLevelScalar )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in float fLevel,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMasterVolumeLevel )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out float *pfLevelDB);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMasterVolumeLevelScalar )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out float *pfLevel);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetChannelVolumeLevel )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in UINT nChannel,
- float fLevelDB,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetChannelVolumeLevelScalar )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in UINT nChannel,
- float fLevel,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelVolumeLevel )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in UINT nChannel,
- /* [out] */
- __out float *pfLevelDB);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelVolumeLevelScalar )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in UINT nChannel,
- /* [out] */
- __out float *pfLevel);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetMute )(
- IAudioEndpointVolume * This,
- /* [in] */
- __in BOOL bMute,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMute )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out BOOL *pbMute);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetVolumeStepInfo )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out UINT *pnStep,
- /* [out] */
- __out UINT *pnStepCount);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *VolumeStepUp )(
- IAudioEndpointVolume * This,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *VolumeStepDown )(
- IAudioEndpointVolume * This,
- /* [unique][in] */ LPCGUID pguidEventContext);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *QueryHardwareSupport )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out DWORD *pdwHardwareSupportMask);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetVolumeRange )(
- IAudioEndpointVolume * This,
- /* [out] */
- __out float *pflVolumeMindB,
- /* [out] */
- __out float *pflVolumeMaxdB,
- /* [out] */
- __out float *pflVolumeIncrementdB);
-
- END_INTERFACE
- } IAudioEndpointVolumeVtbl;
-
- interface IAudioEndpointVolume
- {
- CONST_VTBL struct IAudioEndpointVolumeVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IAudioEndpointVolume_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IAudioEndpointVolume_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IAudioEndpointVolume_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IAudioEndpointVolume_RegisterControlChangeNotify(This,pNotify) \
- ( (This)->lpVtbl -> RegisterControlChangeNotify(This,pNotify) )
-
-#define IAudioEndpointVolume_UnregisterControlChangeNotify(This,pNotify) \
- ( (This)->lpVtbl -> UnregisterControlChangeNotify(This,pNotify) )
-
-#define IAudioEndpointVolume_GetChannelCount(This,pnChannelCount) \
- ( (This)->lpVtbl -> GetChannelCount(This,pnChannelCount) )
-
-#define IAudioEndpointVolume_SetMasterVolumeLevel(This,fLevelDB,pguidEventContext) \
- ( (This)->lpVtbl -> SetMasterVolumeLevel(This,fLevelDB,pguidEventContext) )
-
-#define IAudioEndpointVolume_SetMasterVolumeLevelScalar(This,fLevel,pguidEventContext) \
- ( (This)->lpVtbl -> SetMasterVolumeLevelScalar(This,fLevel,pguidEventContext) )
-
-#define IAudioEndpointVolume_GetMasterVolumeLevel(This,pfLevelDB) \
- ( (This)->lpVtbl -> GetMasterVolumeLevel(This,pfLevelDB) )
-
-#define IAudioEndpointVolume_GetMasterVolumeLevelScalar(This,pfLevel) \
- ( (This)->lpVtbl -> GetMasterVolumeLevelScalar(This,pfLevel) )
-
-#define IAudioEndpointVolume_SetChannelVolumeLevel(This,nChannel,fLevelDB,pguidEventContext) \
- ( (This)->lpVtbl -> SetChannelVolumeLevel(This,nChannel,fLevelDB,pguidEventContext) )
-
-#define IAudioEndpointVolume_SetChannelVolumeLevelScalar(This,nChannel,fLevel,pguidEventContext) \
- ( (This)->lpVtbl -> SetChannelVolumeLevelScalar(This,nChannel,fLevel,pguidEventContext) )
-
-#define IAudioEndpointVolume_GetChannelVolumeLevel(This,nChannel,pfLevelDB) \
- ( (This)->lpVtbl -> GetChannelVolumeLevel(This,nChannel,pfLevelDB) )
-
-#define IAudioEndpointVolume_GetChannelVolumeLevelScalar(This,nChannel,pfLevel) \
- ( (This)->lpVtbl -> GetChannelVolumeLevelScalar(This,nChannel,pfLevel) )
-
-#define IAudioEndpointVolume_SetMute(This,bMute,pguidEventContext) \
- ( (This)->lpVtbl -> SetMute(This,bMute,pguidEventContext) )
-
-#define IAudioEndpointVolume_GetMute(This,pbMute) \
- ( (This)->lpVtbl -> GetMute(This,pbMute) )
-
-#define IAudioEndpointVolume_GetVolumeStepInfo(This,pnStep,pnStepCount) \
- ( (This)->lpVtbl -> GetVolumeStepInfo(This,pnStep,pnStepCount) )
-
-#define IAudioEndpointVolume_VolumeStepUp(This,pguidEventContext) \
- ( (This)->lpVtbl -> VolumeStepUp(This,pguidEventContext) )
-
-#define IAudioEndpointVolume_VolumeStepDown(This,pguidEventContext) \
- ( (This)->lpVtbl -> VolumeStepDown(This,pguidEventContext) )
-
-#define IAudioEndpointVolume_QueryHardwareSupport(This,pdwHardwareSupportMask) \
- ( (This)->lpVtbl -> QueryHardwareSupport(This,pdwHardwareSupportMask) )
-
-#define IAudioEndpointVolume_GetVolumeRange(This,pflVolumeMindB,pflVolumeMaxdB,pflVolumeIncrementdB) \
- ( (This)->lpVtbl -> GetVolumeRange(This,pflVolumeMindB,pflVolumeMaxdB,pflVolumeIncrementdB) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IAudioEndpointVolume_INTERFACE_DEFINED__ */
-
-
-#ifndef __IAudioMeterInformation_INTERFACE_DEFINED__
-#define __IAudioMeterInformation_INTERFACE_DEFINED__
-
-/* interface IAudioMeterInformation */
-/* [unique][helpstring][nonextensible][uuid][local][object] */
-
-
-EXTERN_C const IID IID_IAudioMeterInformation;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("C02216F6-8C67-4B5B-9D00-D008E73E0064")
- IAudioMeterInformation : public IUnknown
- {
- public:
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetPeakValue(
- /* [out] */ float *pfPeak) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMeteringChannelCount(
- /* [out] */
- __out UINT *pnChannelCount) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelsPeakValues(
- /* [in] */ UINT32 u32ChannelCount,
- /* [size_is][out] */ float *afPeakValues) = 0;
-
- virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE QueryHardwareSupport(
- /* [out] */
- __out DWORD *pdwHardwareSupportMask) = 0;
-
- };
-
-#else /* C style interface */
-
- typedef struct IAudioMeterInformationVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IAudioMeterInformation * This,
- /* [in] */ REFIID riid,
- /* [iid_is][out] */
- __RPC__deref_out void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IAudioMeterInformation * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IAudioMeterInformation * This);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetPeakValue )(
- IAudioMeterInformation * This,
- /* [out] */ float *pfPeak);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMeteringChannelCount )(
- IAudioMeterInformation * This,
- /* [out] */
- __out UINT *pnChannelCount);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelsPeakValues )(
- IAudioMeterInformation * This,
- /* [in] */ UINT32 u32ChannelCount,
- /* [size_is][out] */ float *afPeakValues);
-
- /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *QueryHardwareSupport )(
- IAudioMeterInformation * This,
- /* [out] */
- __out DWORD *pdwHardwareSupportMask);
-
- END_INTERFACE
- } IAudioMeterInformationVtbl;
-
- interface IAudioMeterInformation
- {
- CONST_VTBL struct IAudioMeterInformationVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IAudioMeterInformation_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IAudioMeterInformation_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IAudioMeterInformation_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IAudioMeterInformation_GetPeakValue(This,pfPeak) \
- ( (This)->lpVtbl -> GetPeakValue(This,pfPeak) )
-
-#define IAudioMeterInformation_GetMeteringChannelCount(This,pnChannelCount) \
- ( (This)->lpVtbl -> GetMeteringChannelCount(This,pnChannelCount) )
-
-#define IAudioMeterInformation_GetChannelsPeakValues(This,u32ChannelCount,afPeakValues) \
- ( (This)->lpVtbl -> GetChannelsPeakValues(This,u32ChannelCount,afPeakValues) )
-
-#define IAudioMeterInformation_QueryHardwareSupport(This,pdwHardwareSupportMask) \
- ( (This)->lpVtbl -> QueryHardwareSupport(This,pdwHardwareSupportMask) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IAudioMeterInformation_INTERFACE_DEFINED__ */
-
-
-/* Additional Prototypes for ALL interfaces */
-
-/* end of Additional Prototypes */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
-
diff --git a/spaces/andy7475/english_place_name_generator/README.md b/spaces/andy7475/english_place_name_generator/README.md
deleted file mode 100644
index e1ce2e81657d64e6a0bc2194c60d49b61cf257d3..0000000000000000000000000000000000000000
--- a/spaces/andy7475/english_place_name_generator/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-title: English Place Name Generator
-emoji: 💻
-colorFrom: yellow
-colorTo: red
-sdk: streamlit
-sdk_version: 1.27.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-# Acknowledgements
-Based off an assignment using an RNN to generate dinosaur names from the DeepLearning.ai course
-https://www.coursera.org/learn/nlp-sequence-models
-
diff --git a/spaces/anhnv125/FRN/models/__init__.py b/spaces/anhnv125/FRN/models/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/aphenx/bingo/src/app/layout.tsx b/spaces/aphenx/bingo/src/app/layout.tsx
deleted file mode 100644
index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000
--- a/spaces/aphenx/bingo/src/app/layout.tsx
+++ /dev/null
@@ -1,47 +0,0 @@
-import { Metadata } from 'next'
-import { Toaster } from 'react-hot-toast'
-import { TailwindIndicator } from '@/components/tailwind-indicator'
-import { Providers } from '@/components/providers'
-import { Header } from '@/components/header'
-
-import '@/app/globals.scss'
-
-
-export const metadata: Metadata = {
- title: {
- default: 'Bing AI Chatbot',
- template: `%s - Bing AI Chatbot`
- },
- description: 'Bing AI Chatbot Web App.',
- themeColor: [
- { media: '(prefers-color-scheme: light)', color: 'white' },
- { media: '(prefers-color-scheme: dark)', color: 'dark' }
- ],
- icons: {
- icon: '/favicon.ico',
- shortcut: '../assets/images/logo.svg',
- apple: '../assets/images/logo.svg'
- }
-}
-
-interface RootLayoutProps {
- children: React.ReactNode
-}
-
-export default function RootLayout({ children }: RootLayoutProps) {
- return (
-
-
-
-
-
- {/* @ts-ignore */}
-
- {children}
-
-
-
-
-
- )
-}
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/encoder/utils/visual.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/encoder/utils/visual.py
deleted file mode 100644
index 6575b86ec22818fe1dc0c1e6336a7fd255855330..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/encoder/utils/visual.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import matplotlib
-import matplotlib.pyplot as plt
-import numpy as np
-import umap
-
-matplotlib.use("Agg")
-
-
-colormap = (
- np.array(
- [
- [76, 255, 0],
- [0, 127, 70],
- [255, 0, 0],
- [255, 217, 38],
- [0, 135, 255],
- [165, 0, 165],
- [255, 167, 255],
- [0, 255, 255],
- [255, 96, 38],
- [142, 76, 0],
- [33, 0, 127],
- [0, 0, 0],
- [183, 183, 183],
- ],
- dtype=float,
- )
- / 255
-)
-
-
-def plot_embeddings(embeddings, num_classes_in_batch):
- num_utter_per_class = embeddings.shape[0] // num_classes_in_batch
-
- # if necessary get just the first 10 classes
- if num_classes_in_batch > 10:
- num_classes_in_batch = 10
- embeddings = embeddings[: num_classes_in_batch * num_utter_per_class]
-
- model = umap.UMAP()
- projection = model.fit_transform(embeddings)
- ground_truth = np.repeat(np.arange(num_classes_in_batch), num_utter_per_class)
- colors = [colormap[i] for i in ground_truth]
- fig, ax = plt.subplots(figsize=(16, 10))
- _ = ax.scatter(projection[:, 0], projection[:, 1], c=colors)
- plt.gca().set_aspect("equal", "datalim")
- plt.title("UMAP projection")
- plt.tight_layout()
- plt.savefig("umap")
- return fig
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/api.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/api.py
deleted file mode 100644
index 84162e11410c85e03ac18223efa2c6db392b75e9..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v4/api.py
+++ /dev/null
@@ -1,2519 +0,0 @@
-import warnings
-
-import hashlib
-import io
-import json
-import jsonschema
-import pandas as pd
-from toolz.curried import pipe as _pipe
-
-from .schema import core, channels, mixins, Undefined, SCHEMA_URL
-
-from .data import data_transformers
-from ... import utils, expr
-from .display import renderers, VEGALITE_VERSION, VEGAEMBED_VERSION, VEGA_VERSION
-from .theme import themes
-
-
-# ------------------------------------------------------------------------
-# Data Utilities
-def _dataset_name(values):
- """Generate a unique hash of the data
-
- Parameters
- ----------
- values : list or dict
- A list/dict representation of data values.
-
- Returns
- -------
- name : string
- A unique name generated from the hash of the values.
- """
- if isinstance(values, core.InlineDataset):
- values = values.to_dict()
- if values == [{}]:
- return "empty"
- values_json = json.dumps(values, sort_keys=True)
- hsh = hashlib.md5(values_json.encode()).hexdigest()
- return "data-" + hsh
-
-
-def _consolidate_data(data, context):
- """If data is specified inline, then move it to context['datasets']
-
- This function will modify context in-place, and return a new version of data
- """
- values = Undefined
- kwds = {}
-
- if isinstance(data, core.InlineData):
- if data.name is Undefined and data.values is not Undefined:
- if isinstance(data.values, core.InlineDataset):
- values = data.to_dict()["values"]
- else:
- values = data.values
- kwds = {"format": data.format}
-
- elif isinstance(data, dict):
- if "name" not in data and "values" in data:
- values = data["values"]
- kwds = {k: v for k, v in data.items() if k != "values"}
-
- if values is not Undefined:
- name = _dataset_name(values)
- data = core.NamedData(name=name, **kwds)
- context.setdefault("datasets", {})[name] = values
-
- return data
-
-
-def _prepare_data(data, context=None):
- """Convert input data to data for use within schema
-
- Parameters
- ----------
- data :
- The input dataset in the form of a DataFrame, dictionary, altair data
- object, or other type that is recognized by the data transformers.
- context : dict (optional)
- The to_dict context in which the data is being prepared. This is used
- to keep track of information that needs to be passed up and down the
- recursive serialization routine, such as global named datasets.
- """
- if data is Undefined:
- return data
-
- # convert dataframes or objects with __geo_interface__ to dict
- if isinstance(data, pd.DataFrame) or hasattr(data, "__geo_interface__"):
- data = _pipe(data, data_transformers.get())
-
- # convert string input to a URLData
- if isinstance(data, str):
- data = core.UrlData(data)
-
- # consolidate inline data to top-level datasets
- if context is not None and data_transformers.consolidate_datasets:
- data = _consolidate_data(data, context)
-
- # if data is still not a recognized type, then return
- if not isinstance(data, (dict, core.Data)):
- warnings.warn("data of type {} not recognized".format(type(data)))
-
- return data
-
-
-# ------------------------------------------------------------------------
-# Aliases & specializations
-Bin = core.BinParams
-
-
-@utils.use_signature(core.LookupData)
-class LookupData(core.LookupData):
- def to_dict(self, *args, **kwargs):
- """Convert the chart to a dictionary suitable for JSON export."""
- copy = self.copy(deep=False)
- copy.data = _prepare_data(copy.data, kwargs.get("context"))
- return super(LookupData, copy).to_dict(*args, **kwargs)
-
-
-@utils.use_signature(core.FacetMapping)
-class FacetMapping(core.FacetMapping):
- _class_is_valid_at_instantiation = False
-
- def to_dict(self, *args, **kwargs):
- copy = self.copy(deep=False)
- context = kwargs.get("context", {})
- data = context.get("data", None)
- if isinstance(self.row, str):
- copy.row = core.FacetFieldDef(**utils.parse_shorthand(self.row, data))
- if isinstance(self.column, str):
- copy.column = core.FacetFieldDef(**utils.parse_shorthand(self.column, data))
- return super(FacetMapping, copy).to_dict(*args, **kwargs)
-
-
-# ------------------------------------------------------------------------
-# Encoding will contain channel objects that aren't valid at instantiation
-core.FacetedEncoding._class_is_valid_at_instantiation = False
-
-# ------------------------------------------------------------------------
-# These are parameters that are valid at the top level, but are not valid
-# for specs that are within a composite chart
-# (layer, hconcat, vconcat, facet, repeat)
-TOPLEVEL_ONLY_KEYS = {"background", "config", "autosize", "padding", "$schema"}
-
-
-def _get_channels_mapping():
- mapping = {}
- for attr in dir(channels):
- cls = getattr(channels, attr)
- if isinstance(cls, type) and issubclass(cls, core.SchemaBase):
- mapping[cls] = attr.replace("Value", "").lower()
- return mapping
-
-
-# -------------------------------------------------------------------------
-# Tools for working with selections
-class Selection(object):
- """A Selection object"""
-
- _counter = 0
-
- @classmethod
- def _get_name(cls):
- cls._counter += 1
- return "selector{:03d}".format(cls._counter)
-
- def __init__(self, name, selection):
- if name is None:
- name = self._get_name()
- self.name = name
- self.selection = selection
-
- def __repr__(self):
- return "Selection({0!r}, {1})".format(self.name, self.selection)
-
- def ref(self):
- return self.to_dict()
-
- def to_dict(self):
- return {
- "selection": self.name.to_dict()
- if hasattr(self.name, "to_dict")
- else self.name
- }
-
- def __invert__(self):
- return Selection(core.SelectionNot(**{"not": self.name}), self.selection)
-
- def __and__(self, other):
- if isinstance(other, Selection):
- other = other.name
- return Selection(
- core.SelectionAnd(**{"and": [self.name, other]}), self.selection
- )
-
- def __or__(self, other):
- if isinstance(other, Selection):
- other = other.name
- return Selection(core.SelectionOr(**{"or": [self.name, other]}), self.selection)
-
- def __getattr__(self, field_name):
- if field_name.startswith("__") and field_name.endswith("__"):
- raise AttributeError(field_name)
- return expr.core.GetAttrExpression(self.name, field_name)
-
- def __getitem__(self, field_name):
- return expr.core.GetItemExpression(self.name, field_name)
-
-
-# ------------------------------------------------------------------------
-# Top-Level Functions
-
-
-def value(value, **kwargs):
- """Specify a value for use in an encoding"""
- return dict(value=value, **kwargs)
-
-
-def selection(name=None, type=Undefined, **kwds):
- """Create a named selection.
-
- Parameters
- ----------
- name : string (optional)
- The name of the selection. If not specified, a unique name will be
- created.
- type : string
- The type of the selection: one of ["interval", "single", or "multi"]
- **kwds :
- additional keywords will be used to construct a SelectionDef instance
- that controls the selection.
-
- Returns
- -------
- selection: Selection
- The selection object that can be used in chart creation.
- """
- return Selection(name, core.SelectionDef(type=type, **kwds))
-
-
-@utils.use_signature(core.IntervalSelection)
-def selection_interval(**kwargs):
- """Create a selection with type='interval'"""
- return selection(type="interval", **kwargs)
-
-
-@utils.use_signature(core.MultiSelection)
-def selection_multi(**kwargs):
- """Create a selection with type='multi'"""
- return selection(type="multi", **kwargs)
-
-
-@utils.use_signature(core.SingleSelection)
-def selection_single(**kwargs):
- """Create a selection with type='single'"""
- return selection(type="single", **kwargs)
-
-
-@utils.use_signature(core.Binding)
-def binding(input, **kwargs):
- """A generic binding"""
- return core.Binding(input=input, **kwargs)
-
-
-@utils.use_signature(core.BindCheckbox)
-def binding_checkbox(**kwargs):
- """A checkbox binding"""
- return core.BindCheckbox(input="checkbox", **kwargs)
-
-
-@utils.use_signature(core.BindRadioSelect)
-def binding_radio(**kwargs):
- """A radio button binding"""
- return core.BindRadioSelect(input="radio", **kwargs)
-
-
-@utils.use_signature(core.BindRadioSelect)
-def binding_select(**kwargs):
- """A select binding"""
- return core.BindRadioSelect(input="select", **kwargs)
-
-
-@utils.use_signature(core.BindRange)
-def binding_range(**kwargs):
- """A range binding"""
- return core.BindRange(input="range", **kwargs)
-
-
-def condition(predicate, if_true, if_false, **kwargs):
- """A conditional attribute or encoding
-
- Parameters
- ----------
- predicate: Selection, PredicateComposition, expr.Expression, dict, or string
- the selection predicate or test predicate for the condition.
- if a string is passed, it will be treated as a test operand.
- if_true:
- the spec or object to use if the selection predicate is true
- if_false:
- the spec or object to use if the selection predicate is false
- **kwargs:
- additional keyword args are added to the resulting dict
-
- Returns
- -------
- spec: dict or VegaLiteSchema
- the spec that describes the condition
- """
- test_predicates = (str, expr.Expression, core.PredicateComposition)
-
- if isinstance(predicate, Selection):
- condition = {"selection": predicate.name}
- elif isinstance(predicate, core.SelectionComposition):
- condition = {"selection": predicate}
- elif isinstance(predicate, test_predicates):
- condition = {"test": predicate}
- elif isinstance(predicate, dict):
- condition = predicate
- else:
- raise NotImplementedError(
- "condition predicate of type {}" "".format(type(predicate))
- )
-
- if isinstance(if_true, core.SchemaBase):
- # convert to dict for now; the from_dict call below will wrap this
- # dict in the appropriate schema
- if_true = if_true.to_dict()
- elif isinstance(if_true, str):
- if_true = {"shorthand": if_true}
- if_true.update(kwargs)
- condition.update(if_true)
-
- if isinstance(if_false, core.SchemaBase):
- # For the selection, the channel definitions all allow selections
- # already. So use this SchemaBase wrapper if possible.
- selection = if_false.copy()
- selection.condition = condition
- elif isinstance(if_false, str):
- selection = {"condition": condition, "shorthand": if_false}
- selection.update(kwargs)
- else:
- selection = dict(condition=condition, **if_false)
-
- return selection
-
-
-# --------------------------------------------------------------------
-# Top-level objects
-
-
-class TopLevelMixin(mixins.ConfigMethodMixin):
- """Mixin for top-level chart objects such as Chart, LayeredChart, etc."""
-
- _class_is_valid_at_instantiation = False
-
- def to_dict(self, *args, **kwargs):
- """Convert the chart to a dictionary suitable for JSON export"""
- # We make use of three context markers:
- # - 'data' points to the data that should be referenced for column type
- # inference.
- # - 'top_level' is a boolean flag that is assumed to be true; if it's
- # true then a "$schema" arg is added to the dict.
- # - 'datasets' is a dict of named datasets that should be inserted
- # in the top-level object
-
- # note: not a deep copy because we want datasets and data arguments to
- # be passed by reference
- context = kwargs.get("context", {}).copy()
- context.setdefault("datasets", {})
- is_top_level = context.get("top_level", True)
-
- copy = self.copy(deep=False)
- original_data = getattr(copy, "data", Undefined)
- copy.data = _prepare_data(original_data, context)
-
- if original_data is not Undefined:
- context["data"] = original_data
-
- # remaining to_dict calls are not at top level
- context["top_level"] = False
- kwargs["context"] = context
-
- try:
- dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
- except jsonschema.ValidationError:
- dct = None
-
- # If we hit an error, then re-convert with validate='deep' to get
- # a more useful traceback. We don't do this by default because it's
- # much slower in the case that there are no errors.
- if dct is None:
- kwargs["validate"] = "deep"
- dct = super(TopLevelMixin, copy).to_dict(*args, **kwargs)
-
- # TODO: following entries are added after validation. Should they be validated?
- if is_top_level:
- # since this is top-level we add $schema if it's missing
- if "$schema" not in dct:
- dct["$schema"] = SCHEMA_URL
-
- # apply theme from theme registry
- the_theme = themes.get()
- dct = utils.update_nested(the_theme(), dct, copy=True)
-
- # update datasets
- if context["datasets"]:
- dct.setdefault("datasets", {}).update(context["datasets"])
-
- return dct
-
- def to_html(
- self,
- base_url="https://cdn.jsdelivr.net/npm/",
- output_div="vis",
- embed_options=None,
- json_kwds=None,
- fullhtml=True,
- requirejs=False,
- ):
- return utils.spec_to_html(
- self.to_dict(),
- mode="vega-lite",
- vegalite_version=VEGALITE_VERSION,
- vegaembed_version=VEGAEMBED_VERSION,
- vega_version=VEGA_VERSION,
- base_url=base_url,
- output_div=output_div,
- embed_options=embed_options,
- json_kwds=json_kwds,
- fullhtml=fullhtml,
- requirejs=requirejs,
- )
-
- def save(
- self,
- fp,
- format=None,
- override_data_transformer=True,
- scale_factor=1.0,
- vegalite_version=VEGALITE_VERSION,
- vega_version=VEGA_VERSION,
- vegaembed_version=VEGAEMBED_VERSION,
- **kwargs,
- ):
- """Save a chart to file in a variety of formats
-
- Supported formats are json, html, png, svg, pdf; the last three require
- the altair_saver package to be installed.
-
- Parameters
- ----------
- fp : string filename or file-like object
- file in which to write the chart.
- format : string (optional)
- the format to write: one of ['json', 'html', 'png', 'svg', 'pdf'].
- If not specified, the format will be determined from the filename.
- override_data_transformer : boolean (optional)
- If True (default), then the save action will be done with
- the MaxRowsError disabled. If False, then do not change the data
- transformer.
- scale_factor : float
- For svg or png formats, scale the image by this factor when saving.
- This can be used to control the size or resolution of the output.
- Default is 1.0
- **kwargs :
- Additional keyword arguments are passed to the output method
- associated with the specified format.
-
- """
- from ...utils.save import save
-
- kwds = dict(
- chart=self,
- fp=fp,
- format=format,
- scale_factor=scale_factor,
- vegalite_version=vegalite_version,
- vega_version=vega_version,
- vegaembed_version=vegaembed_version,
- **kwargs,
- )
-
- # By default we override the data transformer. This makes it so
- # that save() will succeed even for large datasets that would
- # normally trigger a MaxRowsError
- if override_data_transformer:
- with data_transformers.disable_max_rows():
- result = save(**kwds)
- else:
- result = save(**kwds)
- return result
-
- # Fallback for when rendering fails; the full repr is too long to be
- # useful in nearly all cases.
- def __repr__(self):
- return "alt.{}(...)".format(self.__class__.__name__)
-
- # Layering and stacking
- def __add__(self, other):
- if not isinstance(other, TopLevelMixin):
- raise ValueError("Only Chart objects can be layered.")
- return layer(self, other)
-
- def __and__(self, other):
- if not isinstance(other, TopLevelMixin):
- raise ValueError("Only Chart objects can be concatenated.")
- return vconcat(self, other)
-
- def __or__(self, other):
- if not isinstance(other, TopLevelMixin):
- raise ValueError("Only Chart objects can be concatenated.")
- return hconcat(self, other)
-
- def repeat(
- self,
- repeat=Undefined,
- row=Undefined,
- column=Undefined,
- layer=Undefined,
- columns=Undefined,
- **kwargs,
- ):
- """Return a RepeatChart built from the chart
-
- Fields within the chart can be set to correspond to the row or
- column using `alt.repeat('row')` and `alt.repeat('column')`.
-
- Parameters
- ----------
- repeat : list
- a list of data column names to be repeated. This cannot be
- used along with the ``row``, ``column`` or ``layer`` argument.
- row : list
- a list of data column names to be mapped to the row facet
- column : list
- a list of data column names to be mapped to the column facet
- layer : list
- a list of data column names to be layered. This cannot be
- used along with the ``row``, ``column`` or ``repeat`` argument.
- columns : int
- the maximum number of columns before wrapping. Only referenced
- if ``repeat`` is specified.
- **kwargs :
- additional keywords passed to RepeatChart.
-
- Returns
- -------
- chart : RepeatChart
- a repeated chart.
- """
- repeat_specified = repeat is not Undefined
- rowcol_specified = row is not Undefined or column is not Undefined
- layer_specified = layer is not Undefined
-
- if repeat_specified and rowcol_specified:
- raise ValueError(
- "repeat argument cannot be combined with row/column argument."
- )
- elif repeat_specified and layer_specified:
- raise ValueError("repeat argument cannot be combined with layer argument.")
- elif layer_specified and rowcol_specified:
- raise ValueError(
- "layer argument cannot be combined with row/column argument."
- )
-
- if repeat_specified:
- repeat = repeat
- elif layer_specified:
- repeat = core.LayerRepeatMapping(layer=layer)
- else:
- repeat = core.RepeatMapping(row=row, column=column)
-
- return RepeatChart(spec=self, repeat=repeat, columns=columns, **kwargs)
-
- def properties(self, **kwargs):
- """Set top-level properties of the Chart.
-
- Argument names and types are the same as class initialization.
- """
- copy = self.copy(deep=False)
- for key, val in kwargs.items():
- if key == "selection" and isinstance(val, Selection):
- # For backward compatibility with old selection interface.
- setattr(copy, key, {val.name: val.selection})
- else:
- # Don't validate data, because it hasn't been processed.
- if key != "data":
- self.validate_property(key, val)
- setattr(copy, key, val)
- return copy
-
- def project(
- self,
- type="mercator",
- center=Undefined,
- clipAngle=Undefined,
- clipExtent=Undefined,
- coefficient=Undefined,
- distance=Undefined,
- fraction=Undefined,
- lobes=Undefined,
- parallel=Undefined,
- precision=Undefined,
- radius=Undefined,
- ratio=Undefined,
- reflectX=Undefined,
- reflectY=Undefined,
- rotate=Undefined,
- scale=Undefined,
- spacing=Undefined,
- tilt=Undefined,
- translate=Undefined,
- **kwds,
- ):
- """Add a geographic projection to the chart.
-
- This is generally used either with ``mark_geoshape`` or with the
- ``latitude``/``longitude`` encodings.
-
- Available projection types are
- ['albers', 'albersUsa', 'azimuthalEqualArea', 'azimuthalEquidistant',
- 'conicConformal', 'conicEqualArea', 'conicEquidistant', 'equalEarth', 'equirectangular',
- 'gnomonic', 'identity', 'mercator', 'orthographic', 'stereographic', 'transverseMercator']
-
- Attributes
- ----------
- type : ProjectionType
- The cartographic projection to use. This value is case-insensitive, for example
- `"albers"` and `"Albers"` indicate the same projection type. You can find all valid
- projection types [in the
- documentation](https://vega.github.io/vega-lite/docs/projection.html#projection-types).
-
- **Default value:** `mercator`
- center : List(float)
- Sets the projection’s center to the specified center, a two-element array of
- longitude and latitude in degrees.
-
- **Default value:** `[0, 0]`
- clipAngle : float
- Sets the projection’s clipping circle radius to the specified angle in degrees. If
- `null`, switches to [antimeridian](http://bl.ocks.org/mbostock/3788999) cutting
- rather than small-circle clipping.
- clipExtent : List(List(float))
- Sets the projection’s viewport clip extent to the specified bounds in pixels. The
- extent bounds are specified as an array `[[x0, y0], [x1, y1]]`, where `x0` is the
- left-side of the viewport, `y0` is the top, `x1` is the right and `y1` is the
- bottom. If `null`, no viewport clipping is performed.
- coefficient : float
-
- distance : float
-
- fraction : float
-
- lobes : float
-
- parallel : float
-
- precision : Mapping(required=[length])
- Sets the threshold for the projection’s [adaptive
- resampling](http://bl.ocks.org/mbostock/3795544) to the specified value in pixels.
- This value corresponds to the [Douglas–Peucker
- distance](http://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm).
- If precision is not specified, returns the projection’s current resampling
- precision which defaults to `√0.5 ≅ 0.70710…`.
- radius : float
-
- ratio : float
-
- reflectX : boolean
-
- reflectY : boolean
-
- rotate : List(float)
- Sets the projection’s three-axis rotation to the specified angles, which must be a
- two- or three-element array of numbers [`lambda`, `phi`, `gamma`] specifying the
- rotation angles in degrees about each spherical axis. (These correspond to yaw,
- pitch and roll.)
-
- **Default value:** `[0, 0, 0]`
- scale : float
- Sets the projection's scale (zoom) value, overriding automatic fitting.
-
- spacing : float
-
- tilt : float
-
- translate : List(float)
- Sets the projection's translation (pan) value, overriding automatic fitting.
-
- """
- projection = core.Projection(
- center=center,
- clipAngle=clipAngle,
- clipExtent=clipExtent,
- coefficient=coefficient,
- distance=distance,
- fraction=fraction,
- lobes=lobes,
- parallel=parallel,
- precision=precision,
- radius=radius,
- ratio=ratio,
- reflectX=reflectX,
- reflectY=reflectY,
- rotate=rotate,
- scale=scale,
- spacing=spacing,
- tilt=tilt,
- translate=translate,
- type=type,
- **kwds,
- )
- return self.properties(projection=projection)
-
- def _add_transform(self, *transforms):
- """Copy the chart and add specified transforms to chart.transform"""
- copy = self.copy(deep=["transform"])
- if copy.transform is Undefined:
- copy.transform = []
- copy.transform.extend(transforms)
- return copy
-
- def transform_aggregate(self, aggregate=Undefined, groupby=Undefined, **kwds):
- """
- Add an AggregateTransform to the schema.
-
- Parameters
- ----------
- aggregate : List(:class:`AggregatedFieldDef`)
- Array of objects that define fields to aggregate.
- groupby : List(string)
- The data fields to group by. If not specified, a single group containing all data
- objects will be used.
- **kwds :
- additional keywords are converted to aggregates using standard
- shorthand parsing.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- Examples
- --------
- The aggregate transform allows you to specify transforms directly using
- the same shorthand syntax as used in encodings:
-
- >>> import altair as alt
- >>> chart1 = alt.Chart().transform_aggregate(
- ... mean_acc='mean(Acceleration)',
- ... groupby=['Origin']
- ... )
- >>> print(chart1.transform[0].to_json()) # doctest: +NORMALIZE_WHITESPACE
- {
- "aggregate": [
- {
- "as": "mean_acc",
- "field": "Acceleration",
- "op": "mean"
- }
- ],
- "groupby": [
- "Origin"
- ]
- }
-
- It also supports including AggregatedFieldDef instances or dicts directly,
- so you can create the above transform like this:
-
- >>> chart2 = alt.Chart().transform_aggregate(
- ... [alt.AggregatedFieldDef(field='Acceleration', op='mean',
- ... **{'as': 'mean_acc'})],
- ... groupby=['Origin']
- ... )
- >>> chart2.transform == chart1.transform
- True
-
- See Also
- --------
- alt.AggregateTransform : underlying transform object
-
- """
- if aggregate is Undefined:
- aggregate = []
- for key, val in kwds.items():
- parsed = utils.parse_shorthand(val)
- dct = {
- "as": key,
- "field": parsed.get("field", Undefined),
- "op": parsed.get("aggregate", Undefined),
- }
- aggregate.append(core.AggregatedFieldDef(**dct))
- return self._add_transform(
- core.AggregateTransform(aggregate=aggregate, groupby=groupby)
- )
-
- def transform_bin(self, as_=Undefined, field=Undefined, bin=True, **kwargs):
- """
- Add a BinTransform to the schema.
-
- Parameters
- ----------
- as_ : anyOf(string, List(string))
- The output fields at which to write the start and end bin values.
- bin : anyOf(boolean, :class:`BinParams`)
- An object indicating bin properties, or simply ``true`` for using default bin
- parameters.
- field : string
- The data field to bin.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- Examples
- --------
- >>> import altair as alt
- >>> chart = alt.Chart().transform_bin("x_binned", "x")
- >>> chart.transform[0]
- BinTransform({
- as: 'x_binned',
- bin: True,
- field: 'x'
- })
-
- >>> chart = alt.Chart().transform_bin("x_binned", "x",
- ... bin=alt.Bin(maxbins=10))
- >>> chart.transform[0]
- BinTransform({
- as: 'x_binned',
- bin: BinParams({
- maxbins: 10
- }),
- field: 'x'
- })
-
- See Also
- --------
- alt.BinTransform : underlying transform object
-
- """
- if as_ is not Undefined:
- if "as" in kwargs:
- raise ValueError(
- "transform_bin: both 'as_' and 'as' passed as arguments."
- )
- kwargs["as"] = as_
- kwargs["bin"] = bin
- kwargs["field"] = field
- return self._add_transform(core.BinTransform(**kwargs))
-
- def transform_calculate(self, as_=Undefined, calculate=Undefined, **kwargs):
- """
- Add a CalculateTransform to the schema.
-
- Parameters
- ----------
- as_ : string
- The field for storing the computed formula value.
- calculate : string or alt.expr expression
- A `expression `__
- string. Use the variable ``datum`` to refer to the current data object.
- **kwargs
- transforms can also be passed by keyword argument; see Examples
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- Examples
- --------
- >>> import altair as alt
- >>> from altair import datum, expr
-
- >>> chart = alt.Chart().transform_calculate(y = 2 * expr.sin(datum.x))
- >>> chart.transform[0]
- CalculateTransform({
- as: 'y',
- calculate: (2 * sin(datum.x))
- })
-
- It's also possible to pass the ``CalculateTransform`` arguments directly:
-
- >>> kwds = {'as': 'y', 'calculate': '2 * sin(datum.x)'}
- >>> chart = alt.Chart().transform_calculate(**kwds)
- >>> chart.transform[0]
- CalculateTransform({
- as: 'y',
- calculate: '2 * sin(datum.x)'
- })
-
- As the first form is easier to write and understand, that is the
- recommended method.
-
- See Also
- --------
- alt.CalculateTransform : underlying transform object
- """
- if as_ is Undefined:
- as_ = kwargs.pop("as", Undefined)
- elif "as" in kwargs:
- raise ValueError(
- "transform_calculate: both 'as_' and 'as' passed as arguments."
- )
- if as_ is not Undefined or calculate is not Undefined:
- dct = {"as": as_, "calculate": calculate}
- self = self._add_transform(core.CalculateTransform(**dct))
- for as_, calculate in kwargs.items():
- dct = {"as": as_, "calculate": calculate}
- self = self._add_transform(core.CalculateTransform(**dct))
- return self
-
- def transform_density(
- self,
- density,
- as_=Undefined,
- bandwidth=Undefined,
- counts=Undefined,
- cumulative=Undefined,
- extent=Undefined,
- groupby=Undefined,
- maxsteps=Undefined,
- minsteps=Undefined,
- steps=Undefined,
- ):
- """Add a DensityTransform to the spec.
-
- Attributes
- ----------
- density : str
- The data field for which to perform density estimation.
- as_ : [str, str]
- The output fields for the sample value and corresponding density estimate.
- **Default value:** ``["value", "density"]``
- bandwidth : float
- The bandwidth (standard deviation) of the Gaussian kernel. If unspecified or set to
- zero, the bandwidth value is automatically estimated from the input data using
- Scott’s rule.
- counts : boolean
- A boolean flag indicating if the output values should be probability estimates
- (false) or smoothed counts (true).
- **Default value:** ``false``
- cumulative : boolean
- A boolean flag indicating whether to produce density estimates (false) or cumulative
- density estimates (true).
- **Default value:** ``false``
- extent : List([float, float])
- A [min, max] domain from which to sample the distribution. If unspecified, the
- extent will be determined by the observed minimum and maximum values of the density
- value field.
- groupby : List(str)
- The data fields to group by. If not specified, a single group containing all data
- objects will be used.
- maxsteps : float
- The maximum number of samples to take along the extent domain for plotting the
- density. **Default value:** ``200``
- minsteps : float
- The minimum number of samples to take along the extent domain for plotting the
- density. **Default value:** ``25``
- steps : float
- The exact number of samples to take along the extent domain for plotting the
- density. If specified, overrides both minsteps and maxsteps to set an exact number
- of uniform samples. Potentially useful in conjunction with a fixed extent to ensure
- consistent sample points for stacked densities.
- """
- return self._add_transform(
- core.DensityTransform(
- density=density,
- bandwidth=bandwidth,
- counts=counts,
- cumulative=cumulative,
- extent=extent,
- groupby=groupby,
- maxsteps=maxsteps,
- minsteps=minsteps,
- steps=steps,
- **{"as": as_},
- )
- )
-
- def transform_impute(
- self,
- impute,
- key,
- frame=Undefined,
- groupby=Undefined,
- keyvals=Undefined,
- method=Undefined,
- value=Undefined,
- ):
- """
- Add an ImputeTransform to the schema.
-
- Parameters
- ----------
- impute : string
- The data field for which the missing values should be imputed.
- key : string
- A key field that uniquely identifies data objects within a group.
- Missing key values (those occurring in the data but not in the current group) will
- be imputed.
- frame : List(anyOf(None, float))
- A frame specification as a two-element array used to control the window over which
- the specified method is applied. The array entries should either be a number
- indicating the offset from the current data object, or null to indicate unbounded
- rows preceding or following the current data object. For example, the value ``[-5,
- 5]`` indicates that the window should include five objects preceding and five
- objects following the current object.
- **Default value:** : ``[null, null]`` indicating that the window includes all
- objects.
- groupby : List(string)
- An optional array of fields by which to group the values.
- Imputation will then be performed on a per-group basis.
- keyvals : anyOf(List(Mapping(required=[])), :class:`ImputeSequence`)
- Defines the key values that should be considered for imputation.
- An array of key values or an object defining a `number sequence
- `__.
- If provided, this will be used in addition to the key values observed within the
- input data. If not provided, the values will be derived from all unique values of
- the ``key`` field. For ``impute`` in ``encoding``, the key field is the x-field if
- the y-field is imputed, or vice versa.
- If there is no impute grouping, this property *must* be specified.
- method : :class:`ImputeMethod`
- The imputation method to use for the field value of imputed data objects.
- One of ``value``, ``mean``, ``median``, ``max`` or ``min``.
- **Default value:** ``"value"``
- value : Mapping(required=[])
- The field value to use when the imputation ``method`` is ``"value"``.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.ImputeTransform : underlying transform object
- """
- return self._add_transform(
- core.ImputeTransform(
- impute=impute,
- key=key,
- frame=frame,
- groupby=groupby,
- keyvals=keyvals,
- method=method,
- value=value,
- )
- )
-
- def transform_joinaggregate(
- self, joinaggregate=Undefined, groupby=Undefined, **kwargs
- ):
- """
- Add a JoinAggregateTransform to the schema.
-
- Parameters
- ----------
- joinaggregate : List(:class:`JoinAggregateFieldDef`)
- The definition of the fields in the join aggregate, and what calculations to use.
- groupby : List(string)
- The data fields for partitioning the data objects into separate groups. If
- unspecified, all data points will be in a single group.
- **kwargs
- joinaggregates can also be passed by keyword argument; see Examples.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- Examples
- --------
- >>> import altair as alt
- >>> chart = alt.Chart().transform_joinaggregate(x='sum(y)')
- >>> chart.transform[0]
- JoinAggregateTransform({
- joinaggregate: [JoinAggregateFieldDef({
- as: 'x',
- field: 'y',
- op: 'sum'
- })]
- })
-
- See Also
- --------
- alt.JoinAggregateTransform : underlying transform object
- """
- if joinaggregate is Undefined:
- joinaggregate = []
- for key, val in kwargs.items():
- parsed = utils.parse_shorthand(val)
- dct = {
- "as": key,
- "field": parsed.get("field", Undefined),
- "op": parsed.get("aggregate", Undefined),
- }
- joinaggregate.append(core.JoinAggregateFieldDef(**dct))
- return self._add_transform(
- core.JoinAggregateTransform(joinaggregate=joinaggregate, groupby=groupby)
- )
-
- def transform_filter(self, filter, **kwargs):
- """
- Add a FilterTransform to the schema.
-
- Parameters
- ----------
- filter : a filter expression or :class:`PredicateComposition`
- The `filter` property must be one of the predicate definitions:
- (1) a string or alt.expr expression
- (2) a range predicate
- (3) a selection predicate
- (4) a logical operand combining (1)-(3)
- (5) a Selection object
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.FilterTransform : underlying transform object
-
- """
- if isinstance(filter, Selection):
- filter = {"selection": filter.name}
- elif isinstance(filter, core.SelectionComposition):
- filter = {"selection": filter}
- return self._add_transform(core.FilterTransform(filter=filter, **kwargs))
-
- def transform_flatten(self, flatten, as_=Undefined):
- """Add a FlattenTransform to the schema.
-
- Parameters
- ----------
- flatten : List(string)
- An array of one or more data fields containing arrays to flatten.
- If multiple fields are specified, their array values should have a parallel
- structure, ideally with the same length.
- If the lengths of parallel arrays do not match,
- the longest array will be used with ``null`` values added for missing entries.
- as : List(string)
- The output field names for extracted array values.
- **Default value:** The field name of the corresponding array field
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.FlattenTransform : underlying transform object
- """
- return self._add_transform(
- core.FlattenTransform(flatten=flatten, **{"as": as_})
- )
-
- def transform_fold(self, fold, as_=Undefined):
- """Add a FoldTransform to the spec.
-
- Parameters
- ----------
- fold : List(string)
- An array of data fields indicating the properties to fold.
- as : [string, string]
- The output field names for the key and value properties produced by the fold
- transform. Default: ``["key", "value"]``
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- Chart.transform_pivot : pivot transform - opposite of fold.
- alt.FoldTransform : underlying transform object
- """
- return self._add_transform(core.FoldTransform(fold=fold, **{"as": as_}))
-
- def transform_loess(
- self, on, loess, as_=Undefined, bandwidth=Undefined, groupby=Undefined
- ):
- """Add a LoessTransform to the spec.
-
- Parameters
- ----------
- on : str
- The data field of the independent variable to use a predictor.
- loess : str
- The data field of the dependent variable to smooth.
- as_ : [str, str]
- The output field names for the smoothed points generated by the loess transform.
- **Default value:** The field names of the input x and y values.
- bandwidth : float
- A bandwidth parameter in the range ``[0, 1]`` that determines the amount of
- smoothing. **Default value:** ``0.3``
- groupby : List(str)
- The data fields to group by. If not specified, a single group containing all data
- objects will be used.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- Chart.transform_regression: regression transform
- alt.LoessTransform : underlying transform object
- """
- return self._add_transform(
- core.LoessTransform(
- loess=loess, on=on, bandwidth=bandwidth, groupby=groupby, **{"as": as_}
- )
- )
-
- def transform_lookup(
- self,
- lookup=Undefined,
- from_=Undefined,
- as_=Undefined,
- default=Undefined,
- **kwargs,
- ):
- """Add a DataLookupTransform or SelectionLookupTransform to the chart
-
- Attributes
- ----------
- lookup : string
- Key in primary data source.
- from_ : anyOf(:class:`LookupData`, :class:`LookupSelection`)
- Secondary data reference.
- as_ : anyOf(string, List(string))
- The output fields on which to store the looked up data values.
-
- For data lookups, this property may be left blank if ``from_.fields``
- has been specified (those field names will be used); if ``from_.fields``
- has not been specified, ``as_`` must be a string.
-
- For selection lookups, this property is optional: if unspecified,
- looked up values will be stored under a property named for the selection;
- and if specified, it must correspond to ``from_.fields``.
- default : string
- The default value to use if lookup fails. **Default value:** ``null``
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.DataLookupTransform : underlying transform object
- alt.SelectionLookupTransform : underlying transform object
- """
- if as_ is not Undefined:
- if "as" in kwargs:
- raise ValueError(
- "transform_lookup: both 'as_' and 'as' passed as arguments."
- )
- kwargs["as"] = as_
- if from_ is not Undefined:
- if "from" in kwargs:
- raise ValueError(
- "transform_lookup: both 'from_' and 'from' passed as arguments."
- )
- kwargs["from"] = from_
- kwargs["lookup"] = lookup
- kwargs["default"] = default
- return self._add_transform(core.LookupTransform(**kwargs))
-
- def transform_pivot(
- self, pivot, value, groupby=Undefined, limit=Undefined, op=Undefined
- ):
- """Add a pivot transform to the chart.
-
- Parameters
- ----------
- pivot : str
- The data field to pivot on. The unique values of this field become new field names
- in the output stream.
- value : str
- The data field to populate pivoted fields. The aggregate values of this field become
- the values of the new pivoted fields.
- groupby : List(str)
- The optional data fields to group by. If not specified, a single group containing
- all data objects will be used.
- limit : float
- An optional parameter indicating the maximum number of pivoted fields to generate.
- The default ( ``0`` ) applies no limit. The pivoted ``pivot`` names are sorted in
- ascending order prior to enforcing the limit.
- **Default value:** ``0``
- op : string
- The aggregation operation to apply to grouped ``value`` field values.
- **Default value:** ``sum``
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- Chart.transform_fold : fold transform - opposite of pivot.
- alt.PivotTransform : underlying transform object
- """
- return self._add_transform(
- core.PivotTransform(
- pivot=pivot, value=value, groupby=groupby, limit=limit, op=op
- )
- )
-
- def transform_quantile(
- self,
- quantile,
- as_=Undefined,
- groupby=Undefined,
- probs=Undefined,
- step=Undefined,
- ):
- """Add a quantile transform to the chart
-
- Parameters
- ----------
- quantile : str
- The data field for which to perform quantile estimation.
- as : [str, str]
- The output field names for the probability and quantile values.
- groupby : List(str)
- The data fields to group by. If not specified, a single group containing all data
- objects will be used.
- probs : List(float)
- An array of probabilities in the range (0, 1) for which to compute quantile values.
- If not specified, the *step* parameter will be used.
- step : float
- A probability step size (default 0.01) for sampling quantile values. All values from
- one-half the step size up to 1 (exclusive) will be sampled. This parameter is only
- used if the *probs* parameter is not provided. **Default value:** ``["prob", "value"]``
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.QuantileTransform : underlying transform object
- """
- return self._add_transform(
- core.QuantileTransform(
- quantile=quantile,
- groupby=groupby,
- probs=probs,
- step=step,
- **{"as": as_},
- )
- )
-
- def transform_regression(
- self,
- on,
- regression,
- as_=Undefined,
- extent=Undefined,
- groupby=Undefined,
- method=Undefined,
- order=Undefined,
- params=Undefined,
- ):
- """Add a RegressionTransform to the chart.
-
- Parameters
- ----------
- on : str
- The data field of the independent variable to use a predictor.
- regression : str
- The data field of the dependent variable to predict.
- as_ : [str, str]
- The output field names for the smoothed points generated by the regression
- transform. **Default value:** The field names of the input x and y values.
- extent : [float, float]
- A [min, max] domain over the independent (x) field for the starting and ending
- points of the generated trend line.
- groupby : List(str)
- The data fields to group by. If not specified, a single group containing all data
- objects will be used.
- method : enum('linear', 'log', 'exp', 'pow', 'quad', 'poly')
- The functional form of the regression model. One of ``"linear"``, ``"log"``,
- ``"exp"``, ``"pow"``, ``"quad"``, or ``"poly"``. **Default value:** ``"linear"``
- order : float
- The polynomial order (number of coefficients) for the 'poly' method.
- **Default value:** ``3``
- params : boolean
- A boolean flag indicating if the transform should return the regression model
- parameters (one object per group), rather than trend line points.
- The resulting objects include a ``coef`` array of fitted coefficient values
- (starting with the intercept term and then including terms of increasing order)
- and an ``rSquared`` value (indicating the total variance explained by the model).
- **Default value:** ``false``
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- Chart.transform_loess : LOESS transform
- alt.RegressionTransform : underlying transform object
- """
- return self._add_transform(
- core.RegressionTransform(
- regression=regression,
- on=on,
- extent=extent,
- groupby=groupby,
- method=method,
- order=order,
- params=params,
- **{"as": as_},
- )
- )
-
- def transform_sample(self, sample=1000):
- """
- Add a SampleTransform to the schema.
-
- Parameters
- ----------
- sample : float
- The maximum number of data objects to include in the sample. Default: 1000.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.SampleTransform : underlying transform object
- """
- return self._add_transform(core.SampleTransform(sample))
-
- def transform_stack(self, as_, stack, groupby, offset=Undefined, sort=Undefined):
- """
- Add a StackTransform to the schema.
-
- Parameters
- ----------
- as_ : anyOf(string, List(string))
- Output field names. This can be either a string or an array of strings with
- two elements denoting the name for the fields for stack start and stack end
- respectively.
- If a single string(eg."val") is provided, the end field will be "val_end".
- stack : string
- The field which is stacked.
- groupby : List(string)
- The data fields to group by.
- offset : enum('zero', 'center', 'normalize')
- Mode for stacking marks. Default: 'zero'.
- sort : List(:class:`SortField`)
- Field that determines the order of leaves in the stacked charts.
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- See Also
- --------
- alt.StackTransform : underlying transform object
- """
- return self._add_transform(
- core.StackTransform(
- stack=stack, groupby=groupby, offset=offset, sort=sort, **{"as": as_}
- )
- )
-
- def transform_timeunit(
- self, as_=Undefined, field=Undefined, timeUnit=Undefined, **kwargs
- ):
- """
- Add a TimeUnitTransform to the schema.
-
- Parameters
- ----------
- as_ : string
- The output field to write the timeUnit value.
- field : string
- The data field to apply time unit.
- timeUnit : :class:`TimeUnit`
- The timeUnit.
- **kwargs
- transforms can also be passed by keyword argument; see Examples
-
- Returns
- -------
- self : Chart object
- returns chart to allow for chaining
-
- Examples
- --------
- >>> import altair as alt
- >>> from altair import datum, expr
-
- >>> chart = alt.Chart().transform_timeunit(month='month(date)')
- >>> chart.transform[0]
- TimeUnitTransform({
- as: 'month',
- field: 'date',
- timeUnit: 'month'
- })
-
- It's also possible to pass the ``TimeUnitTransform`` arguments directly;
- this is most useful in cases where the desired field name is not a
- valid python identifier:
-
- >>> kwds = {'as': 'month', 'timeUnit': 'month', 'field': 'The Month'}
- >>> chart = alt.Chart().transform_timeunit(**kwds)
- >>> chart.transform[0]
- TimeUnitTransform({
- as: 'month',
- field: 'The Month',
- timeUnit: 'month'
- })
-
- As the first form is easier to write and understand, that is the
- recommended method.
-
- See Also
- --------
- alt.TimeUnitTransform : underlying transform object
-
- """
- if as_ is Undefined:
- as_ = kwargs.pop("as", Undefined)
- else:
- if "as" in kwargs:
- raise ValueError(
- "transform_timeunit: both 'as_' and 'as' passed as arguments."
- )
- if as_ is not Undefined:
- dct = {"as": as_, "timeUnit": timeUnit, "field": field}
- self = self._add_transform(core.TimeUnitTransform(**dct))
- for as_, shorthand in kwargs.items():
- dct = utils.parse_shorthand(
- shorthand,
- parse_timeunits=True,
- parse_aggregates=False,
- parse_types=False,
- )
- dct.pop("type", None)
- dct["as"] = as_
- if "timeUnit" not in dct:
- raise ValueError("'{}' must include a valid timeUnit".format(shorthand))
- self = self._add_transform(core.TimeUnitTransform(**dct))
- return self
-
- def transform_window(
- self,
- window=Undefined,
- frame=Undefined,
- groupby=Undefined,
- ignorePeers=Undefined,
- sort=Undefined,
- **kwargs,
- ):
- """Add a WindowTransform to the schema
-
- Parameters
- ----------
- window : List(:class:`WindowFieldDef`)
- The definition of the fields in the window, and what calculations to use.
- frame : List(anyOf(None, float))
- A frame specification as a two-element array indicating how the sliding window
- should proceed. The array entries should either be a number indicating the offset
- from the current data object, or null to indicate unbounded rows preceding or
- following the current data object. The default value is ``[null, 0]``, indicating
- that the sliding window includes the current object and all preceding objects. The
- value ``[-5, 5]`` indicates that the window should include five objects preceding
- and five objects following the current object. Finally, ``[null, null]`` indicates
- that the window frame should always include all data objects. The only operators
- affected are the aggregation operations and the ``first_value``, ``last_value``, and
- ``nth_value`` window operations. The other window operations are not affected by
- this.
-
- **Default value:** : ``[null, 0]`` (includes the current object and all preceding
- objects)
- groupby : List(string)
- The data fields for partitioning the data objects into separate windows. If
- unspecified, all data points will be in a single group.
- ignorePeers : boolean
- Indicates if the sliding window frame should ignore peer values. (Peer values are
- those considered identical by the sort criteria). The default is false, causing the
- window frame to expand to include all peer values. If set to true, the window frame
- will be defined by offset values only. This setting only affects those operations
- that depend on the window frame, namely aggregation operations and the first_value,
- last_value, and nth_value window operations.
-
- **Default value:** ``false``
- sort : List(:class:`SortField`)
- A sort field definition for sorting data objects within a window. If two data
- objects are considered equal by the comparator, they are considered “peer” values of
- equal rank. If sort is not specified, the order is undefined: data objects are
- processed in the order they are observed and none are considered peers (the
- ignorePeers parameter is ignored and treated as if set to ``true`` ).
- **kwargs
- transforms can also be passed by keyword argument; see Examples
-
- Examples
- --------
- A cumulative line chart
-
- >>> import altair as alt
- >>> import numpy as np
- >>> import pandas as pd
- >>> data = pd.DataFrame({'x': np.arange(100),
- ... 'y': np.random.randn(100)})
- >>> chart = alt.Chart(data).mark_line().encode(
- ... x='x:Q',
- ... y='ycuml:Q'
- ... ).transform_window(
- ... ycuml='sum(y)'
- ... )
- >>> chart.transform[0]
- WindowTransform({
- window: [WindowFieldDef({
- as: 'ycuml',
- field: 'y',
- op: 'sum'
- })]
- })
-
- """
- if kwargs:
- if window is Undefined:
- window = []
- for as_, shorthand in kwargs.items():
- kwds = {"as": as_}
- kwds.update(
- utils.parse_shorthand(
- shorthand,
- parse_aggregates=False,
- parse_window_ops=True,
- parse_timeunits=False,
- parse_types=False,
- )
- )
- window.append(core.WindowFieldDef(**kwds))
-
- return self._add_transform(
- core.WindowTransform(
- window=window,
- frame=frame,
- groupby=groupby,
- ignorePeers=ignorePeers,
- sort=sort,
- )
- )
-
- # Display-related methods
-
- def _repr_mimebundle_(self, include=None, exclude=None):
- """Return a MIME bundle for display in Jupyter frontends."""
- # Catch errors explicitly to get around issues in Jupyter frontend
- # see https://github.com/ipython/ipython/issues/11038
- try:
- dct = self.to_dict()
- except Exception:
- utils.display_traceback(in_ipython=True)
- return {}
- else:
- return renderers.get()(dct)
-
- def display(self, renderer=Undefined, theme=Undefined, actions=Undefined, **kwargs):
- """Display chart in Jupyter notebook or JupyterLab
-
- Parameters are passed as options to vega-embed within supported frontends.
- See https://github.com/vega/vega-embed#options for details.
-
- Parameters
- ----------
- renderer : string ('canvas' or 'svg')
- The renderer to use
- theme : string
- The Vega theme name to use; see https://github.com/vega/vega-themes
- actions : bool or dict
- Specify whether action links ("Open In Vega Editor", etc.) are
- included in the view.
- **kwargs :
- Additional parameters are also passed to vega-embed as options.
-
- """
- from IPython.display import display
-
- if renderer is not Undefined:
- kwargs["renderer"] = renderer
- if theme is not Undefined:
- kwargs["theme"] = theme
- if actions is not Undefined:
- kwargs["actions"] = actions
-
- if kwargs:
- options = renderers.options.copy()
- options["embed_options"] = options.get("embed_options", {}).copy()
- options["embed_options"].update(kwargs)
- with renderers.enable(**options):
- display(self)
- else:
- display(self)
-
- @utils.deprecation.deprecated(message="serve() is deprecated. Use show() instead.")
- def serve(
- self,
- ip="127.0.0.1",
- port=8888,
- n_retries=50,
- files=None,
- jupyter_warning=True,
- open_browser=True,
- http_server=None,
- **kwargs,
- ):
- """Open a browser window and display a rendering of the chart
-
- Parameters
- ----------
- html : string
- HTML to serve
- ip : string (default = '127.0.0.1')
- ip address at which the HTML will be served.
- port : int (default = 8888)
- the port at which to serve the HTML
- n_retries : int (default = 50)
- the number of nearby ports to search if the specified port
- is already in use.
- files : dictionary (optional)
- dictionary of extra content to serve
- jupyter_warning : bool (optional)
- if True (default), then print a warning if this is used
- within the Jupyter notebook
- open_browser : bool (optional)
- if True (default), then open a web browser to the given HTML
- http_server : class (optional)
- optionally specify an HTTPServer class to use for showing the
- figure. The default is Python's basic HTTPServer.
- **kwargs :
- additional keyword arguments passed to the save() method
-
- """
- from ...utils.server import serve
-
- html = io.StringIO()
- self.save(html, format="html", **kwargs)
- html.seek(0)
-
- serve(
- html.read(),
- ip=ip,
- port=port,
- n_retries=n_retries,
- files=files,
- jupyter_warning=jupyter_warning,
- open_browser=open_browser,
- http_server=http_server,
- )
-
- def show(self, embed_opt=None, open_browser=None):
- """Show the chart in an external browser window.
-
- This requires a recent version of the altair_viewer package.
-
- Parameters
- ----------
- embed_opt : dict (optional)
- The Vega embed options that control the dispay of the chart.
- open_browser : bool (optional)
- Specify whether a browser window should be opened. If not specified,
- a browser window will be opened only if the server is not already
- connected to a browser.
- """
- try:
- import altair_viewer # type: ignore
- except ImportError:
- raise ValueError(
- "show() method requires the altair_viewer package. "
- "See http://github.com/altair-viz/altair_viewer"
- )
- altair_viewer.show(self, embed_opt=embed_opt, open_browser=open_browser)
-
- @utils.use_signature(core.Resolve)
- def _set_resolve(self, **kwargs):
- """Copy the chart and update the resolve property with kwargs"""
- if not hasattr(self, "resolve"):
- raise ValueError(
- "{} object has no attribute " "'resolve'".format(self.__class__)
- )
- copy = self.copy(deep=["resolve"])
- if copy.resolve is Undefined:
- copy.resolve = core.Resolve()
- for key, val in kwargs.items():
- copy.resolve[key] = val
- return copy
-
- @utils.use_signature(core.AxisResolveMap)
- def resolve_axis(self, *args, **kwargs):
- return self._set_resolve(axis=core.AxisResolveMap(*args, **kwargs))
-
- @utils.use_signature(core.LegendResolveMap)
- def resolve_legend(self, *args, **kwargs):
- return self._set_resolve(legend=core.LegendResolveMap(*args, **kwargs))
-
- @utils.use_signature(core.ScaleResolveMap)
- def resolve_scale(self, *args, **kwargs):
- return self._set_resolve(scale=core.ScaleResolveMap(*args, **kwargs))
-
-
-class _EncodingMixin(object):
- @utils.use_signature(core.FacetedEncoding)
- def encode(self, *args, **kwargs):
- # Convert args to kwargs based on their types.
- kwargs = utils.infer_encoding_types(args, kwargs, channels)
-
- # get a copy of the dict representation of the previous encoding
- copy = self.copy(deep=["encoding"])
- encoding = copy._get("encoding", {})
- if isinstance(encoding, core.VegaLiteSchema):
- encoding = {k: v for k, v in encoding._kwds.items() if v is not Undefined}
-
- # update with the new encodings, and apply them to the copy
- encoding.update(kwargs)
- copy.encoding = core.FacetedEncoding(**encoding)
- return copy
-
- def facet(
- self,
- facet=Undefined,
- row=Undefined,
- column=Undefined,
- data=Undefined,
- columns=Undefined,
- **kwargs,
- ):
- """Create a facet chart from the current chart.
-
- Faceted charts require data to be specified at the top level; if data
- is not specified, the data from the current chart will be used at the
- top level.
-
- Parameters
- ----------
- facet : string or alt.Facet (optional)
- The data column to use as an encoding for a wrapped facet.
- If specified, then neither row nor column may be specified.
- column : string or alt.Column (optional)
- The data column to use as an encoding for a column facet.
- May be combined with row argument, but not with facet argument.
- row : string or alt.Column (optional)
- The data column to use as an encoding for a row facet.
- May be combined with column argument, but not with facet argument.
- data : string or dataframe (optional)
- The dataset to use for faceting. If not supplied, then data must
- be specified in the top-level chart that calls this method.
- columns : integer
- the maximum number of columns for a wrapped facet.
-
- Returns
- -------
- self :
- for chaining
- """
- facet_specified = facet is not Undefined
- rowcol_specified = row is not Undefined or column is not Undefined
-
- if facet_specified and rowcol_specified:
- raise ValueError(
- "facet argument cannot be combined with row/column argument."
- )
-
- if data is Undefined:
- if self.data is Undefined:
- raise ValueError(
- "Facet charts require data to be specified at the top level."
- )
- self = self.copy(deep=False)
- data, self.data = self.data, Undefined
-
- if facet_specified:
- if isinstance(facet, str):
- facet = channels.Facet(facet)
- else:
- facet = FacetMapping(row=row, column=column)
-
- return FacetChart(spec=self, facet=facet, data=data, columns=columns, **kwargs)
-
-
-class Chart(
- TopLevelMixin, _EncodingMixin, mixins.MarkMethodMixin, core.TopLevelUnitSpec
-):
- """Create a basic Altair/Vega-Lite chart.
-
- Although it is possible to set all Chart properties as constructor attributes,
- it is more idiomatic to use methods such as ``mark_point()``, ``encode()``,
- ``transform_filter()``, ``properties()``, etc. See Altair's documentation
- for details and examples: http://altair-viz.github.io/.
-
- Attributes
- ----------
- data : Data
- An object describing the data source
- mark : AnyMark
- A string describing the mark type (one of `"bar"`, `"circle"`, `"square"`, `"tick"`,
- `"line"`, * `"area"`, `"point"`, `"rule"`, `"geoshape"`, and `"text"`) or a
- MarkDef object.
- encoding : FacetedEncoding
- A key-value mapping between encoding channels and definition of fields.
- autosize : anyOf(AutosizeType, AutoSizeParams)
- Sets how the visualization size should be determined. If a string, should be one of
- `"pad"`, `"fit"` or `"none"`. Object values can additionally specify parameters for
- content sizing and automatic resizing. `"fit"` is only supported for single and
- layered views that don't use `rangeStep`. __Default value__: `pad`
- background : string
- CSS color property to use as the background of visualization.
-
- **Default value:** none (transparent)
- config : Config
- Vega-Lite configuration object. This property can only be defined at the top-level
- of a specification.
- description : string
- Description of this mark for commenting purpose.
- height : float
- The height of a visualization.
- name : string
- Name of the visualization for later reference.
- padding : Padding
- The default visualization padding, in pixels, from the edge of the visualization
- canvas to the data rectangle. If a number, specifies padding for all sides. If an
- object, the value should have the format `{"left": 5, "top": 5, "right": 5,
- "bottom": 5}` to specify padding for each side of the visualization. __Default
- value__: `5`
- projection : Projection
- An object defining properties of geographic projection. Works with `"geoshape"`
- marks and `"point"` or `"line"` marks that have a channel (one or more of `"X"`,
- `"X2"`, `"Y"`, `"Y2"`) with type `"latitude"`, or `"longitude"`.
- selection : Mapping(required=[])
- A key-value mapping between selection names and definitions.
- title : anyOf(string, TitleParams)
- Title for the plot.
- transform : List(Transform)
- An array of data transformations such as filter and new field calculation.
- width : float
- The width of a visualization.
- """
-
- def __init__(
- self,
- data=Undefined,
- encoding=Undefined,
- mark=Undefined,
- width=Undefined,
- height=Undefined,
- **kwargs,
- ):
- super(Chart, self).__init__(
- data=data,
- encoding=encoding,
- mark=mark,
- width=width,
- height=height,
- **kwargs,
- )
-
- @classmethod
- def from_dict(cls, dct, validate=True):
- """Construct class from a dictionary representation
-
- Parameters
- ----------
- dct : dictionary
- The dict from which to construct the class
- validate : boolean
- If True (default), then validate the input against the schema.
-
- Returns
- -------
- obj : Chart object
- The wrapped schema
-
- Raises
- ------
- jsonschema.ValidationError :
- if validate=True and dct does not conform to the schema
- """
- for class_ in TopLevelMixin.__subclasses__():
- if class_ is Chart:
- class_ = super(Chart, cls)
- try:
- return class_.from_dict(dct, validate=validate)
- except jsonschema.ValidationError:
- pass
-
- # As a last resort, try using the Root vegalite object
- return core.Root.from_dict(dct, validate)
-
- def to_dict(self, *args, **kwargs):
- """Convert the chart to a dictionary suitable for JSON export."""
- context = kwargs.get("context", {})
- if self.data is Undefined and "data" not in context:
- # No data specified here or in parent: inject empty data
- # for easier specification of datum encodings.
- copy = self.copy(deep=False)
- copy.data = core.InlineData(values=[{}])
- return super(Chart, copy).to_dict(*args, **kwargs)
- return super().to_dict(*args, **kwargs)
-
- def add_selection(self, *selections):
- """Add one or more selections to the chart."""
- if not selections:
- return self
- copy = self.copy(deep=["selection"])
- if copy.selection is Undefined:
- copy.selection = {}
-
- for s in selections:
- copy.selection[s.name] = s.selection
- return copy
-
- def interactive(self, name=None, bind_x=True, bind_y=True):
- """Make chart axes scales interactive
-
- Parameters
- ----------
- name : string
- The selection name to use for the axes scales. This name should be
- unique among all selections within the chart.
- bind_x : boolean, default True
- If true, then bind the interactive scales to the x-axis
- bind_y : boolean, default True
- If true, then bind the interactive scales to the y-axis
-
- Returns
- -------
- chart :
- copy of self, with interactive axes added
-
- """
- encodings = []
- if bind_x:
- encodings.append("x")
- if bind_y:
- encodings.append("y")
- return self.add_selection(
- selection_interval(bind="scales", encodings=encodings)
- )
-
-
-def _check_if_valid_subspec(spec, classname):
- """Check if the spec is a valid sub-spec.
-
- If it is not, then raise a ValueError
- """
- err = (
- 'Objects with "{0}" attribute cannot be used within {1}. '
- "Consider defining the {0} attribute in the {1} object instead."
- )
-
- if not isinstance(spec, (core.SchemaBase, dict)):
- raise ValueError("Only chart objects can be used in {0}.".format(classname))
- for attr in TOPLEVEL_ONLY_KEYS:
- if isinstance(spec, core.SchemaBase):
- val = getattr(spec, attr, Undefined)
- else:
- val = spec.get(attr, Undefined)
- if val is not Undefined:
- raise ValueError(err.format(attr, classname))
-
-
-def _check_if_can_be_layered(spec):
- """Check if the spec can be layered."""
-
- def _get(spec, attr):
- if isinstance(spec, core.SchemaBase):
- return spec._get(attr)
- else:
- return spec.get(attr, Undefined)
-
- encoding = _get(spec, "encoding")
- if encoding is not Undefined:
- for channel in ["row", "column", "facet"]:
- if _get(encoding, channel) is not Undefined:
- raise ValueError("Faceted charts cannot be layered.")
- if isinstance(spec, (Chart, LayerChart)):
- return
-
- if not isinstance(spec, (core.SchemaBase, dict)):
- raise ValueError("Only chart objects can be layered.")
- if _get(spec, "facet") is not Undefined:
- raise ValueError("Faceted charts cannot be layered.")
- if isinstance(spec, FacetChart) or _get(spec, "facet") is not Undefined:
- raise ValueError("Faceted charts cannot be layered.")
- if isinstance(spec, RepeatChart) or _get(spec, "repeat") is not Undefined:
- raise ValueError("Repeat charts cannot be layered.")
- if isinstance(spec, ConcatChart) or _get(spec, "concat") is not Undefined:
- raise ValueError("Concatenated charts cannot be layered.")
- if isinstance(spec, HConcatChart) or _get(spec, "hconcat") is not Undefined:
- raise ValueError("Concatenated charts cannot be layered.")
- if isinstance(spec, VConcatChart) or _get(spec, "vconcat") is not Undefined:
- raise ValueError("Concatenated charts cannot be layered.")
-
-
-@utils.use_signature(core.TopLevelRepeatSpec)
-class RepeatChart(TopLevelMixin, core.TopLevelRepeatSpec):
- """A chart repeated across rows and columns with small changes"""
-
- # Because TopLevelRepeatSpec is defined as a union as of Vega-Lite schema 4.9,
- # we set the arguments explicitly here.
- # TODO: Should we instead use tools/schemapi/codegen._get_args?
- def __init__(
- self,
- repeat=Undefined,
- spec=Undefined,
- align=Undefined,
- autosize=Undefined,
- background=Undefined,
- bounds=Undefined,
- center=Undefined,
- columns=Undefined,
- config=Undefined,
- data=Undefined,
- datasets=Undefined,
- description=Undefined,
- name=Undefined,
- padding=Undefined,
- params=Undefined,
- resolve=Undefined,
- spacing=Undefined,
- title=Undefined,
- transform=Undefined,
- usermeta=Undefined,
- **kwds,
- ):
- _check_if_valid_subspec(spec, "RepeatChart")
- super(RepeatChart, self).__init__(
- repeat=repeat,
- spec=spec,
- align=align,
- autosize=autosize,
- background=background,
- bounds=bounds,
- center=center,
- columns=columns,
- config=config,
- data=data,
- datasets=datasets,
- description=description,
- name=name,
- padding=padding,
- params=params,
- resolve=resolve,
- spacing=spacing,
- title=title,
- transform=transform,
- usermeta=usermeta,
- **kwds,
- )
-
- def interactive(self, name=None, bind_x=True, bind_y=True):
- """Make chart axes scales interactive
-
- Parameters
- ----------
- name : string
- The selection name to use for the axes scales. This name should be
- unique among all selections within the chart.
- bind_x : boolean, default True
- If true, then bind the interactive scales to the x-axis
- bind_y : boolean, default True
- If true, then bind the interactive scales to the y-axis
-
- Returns
- -------
- chart :
- copy of self, with interactive axes added
-
- """
- copy = self.copy(deep=False)
- copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
- return copy
-
- def add_selection(self, *selections):
- """Add one or more selections to the chart."""
- if not selections or self.spec is Undefined:
- return self
- copy = self.copy()
- copy.spec = copy.spec.add_selection(*selections)
- return copy
-
-
-def repeat(repeater="repeat"):
- """Tie a channel to the row or column within a repeated chart
-
- The output of this should be passed to the ``field`` attribute of
- a channel.
-
- Parameters
- ----------
- repeater : {'row'|'column'|'repeat'|'layer'}
- The repeater to tie the field to. Default is 'repeat'.
-
- Returns
- -------
- repeat : RepeatRef object
- """
- if repeater not in ["row", "column", "repeat", "layer"]:
- raise ValueError("repeater must be one of ['row', 'column', 'repeat', 'layer']")
- return core.RepeatRef(repeat=repeater)
-
-
-@utils.use_signature(core.TopLevelNormalizedConcatSpecGenericSpec)
-class ConcatChart(TopLevelMixin, core.TopLevelNormalizedConcatSpecGenericSpec):
- """A chart with horizontally-concatenated facets"""
-
- def __init__(self, data=Undefined, concat=(), columns=Undefined, **kwargs):
- # TODO: move common data to top level?
- for spec in concat:
- _check_if_valid_subspec(spec, "ConcatChart")
- super(ConcatChart, self).__init__(
- data=data, concat=list(concat), columns=columns, **kwargs
- )
- self.data, self.concat = _combine_subchart_data(self.data, self.concat)
-
- def __ior__(self, other):
- _check_if_valid_subspec(other, "ConcatChart")
- self.concat.append(other)
- self.data, self.concat = _combine_subchart_data(self.data, self.concat)
- return self
-
- def __or__(self, other):
- copy = self.copy(deep=["concat"])
- copy |= other
- return copy
-
- def add_selection(self, *selections):
- """Add one or more selections to all subcharts."""
- if not selections or not self.concat:
- return self
- copy = self.copy()
- copy.concat = [chart.add_selection(*selections) for chart in copy.concat]
- return copy
-
-
-def concat(*charts, **kwargs):
- """Concatenate charts horizontally"""
- return ConcatChart(concat=charts, **kwargs)
-
-
-@utils.use_signature(core.TopLevelNormalizedHConcatSpecGenericSpec)
-class HConcatChart(TopLevelMixin, core.TopLevelNormalizedHConcatSpecGenericSpec):
- """A chart with horizontally-concatenated facets"""
-
- def __init__(self, data=Undefined, hconcat=(), **kwargs):
- # TODO: move common data to top level?
- for spec in hconcat:
- _check_if_valid_subspec(spec, "HConcatChart")
- super(HConcatChart, self).__init__(data=data, hconcat=list(hconcat), **kwargs)
- self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
-
- def __ior__(self, other):
- _check_if_valid_subspec(other, "HConcatChart")
- self.hconcat.append(other)
- self.data, self.hconcat = _combine_subchart_data(self.data, self.hconcat)
- return self
-
- def __or__(self, other):
- copy = self.copy(deep=["hconcat"])
- copy |= other
- return copy
-
- def add_selection(self, *selections):
- """Add one or more selections to all subcharts."""
- if not selections or not self.hconcat:
- return self
- copy = self.copy()
- copy.hconcat = [chart.add_selection(*selections) for chart in copy.hconcat]
- return copy
-
-
-def hconcat(*charts, **kwargs):
- """Concatenate charts horizontally"""
- return HConcatChart(hconcat=charts, **kwargs)
-
-
-@utils.use_signature(core.TopLevelNormalizedVConcatSpecGenericSpec)
-class VConcatChart(TopLevelMixin, core.TopLevelNormalizedVConcatSpecGenericSpec):
- """A chart with vertically-concatenated facets"""
-
- def __init__(self, data=Undefined, vconcat=(), **kwargs):
- # TODO: move common data to top level?
- for spec in vconcat:
- _check_if_valid_subspec(spec, "VConcatChart")
- super(VConcatChart, self).__init__(data=data, vconcat=list(vconcat), **kwargs)
- self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
-
- def __iand__(self, other):
- _check_if_valid_subspec(other, "VConcatChart")
- self.vconcat.append(other)
- self.data, self.vconcat = _combine_subchart_data(self.data, self.vconcat)
- return self
-
- def __and__(self, other):
- copy = self.copy(deep=["vconcat"])
- copy &= other
- return copy
-
- def add_selection(self, *selections):
- """Add one or more selections to all subcharts."""
- if not selections or not self.vconcat:
- return self
- copy = self.copy()
- copy.vconcat = [chart.add_selection(*selections) for chart in copy.vconcat]
- return copy
-
-
-def vconcat(*charts, **kwargs):
- """Concatenate charts vertically"""
- return VConcatChart(vconcat=charts, **kwargs)
-
-
-@utils.use_signature(core.TopLevelLayerSpec)
-class LayerChart(TopLevelMixin, _EncodingMixin, core.TopLevelLayerSpec):
- """A Chart with layers within a single panel"""
-
- def __init__(self, data=Undefined, layer=(), **kwargs):
- # TODO: move common data to top level?
- # TODO: check for conflicting interaction
- for spec in layer:
- _check_if_valid_subspec(spec, "LayerChart")
- _check_if_can_be_layered(spec)
- super(LayerChart, self).__init__(data=data, layer=list(layer), **kwargs)
- self.data, self.layer = _combine_subchart_data(self.data, self.layer)
-
- def __iadd__(self, other):
- _check_if_valid_subspec(other, "LayerChart")
- _check_if_can_be_layered(other)
- self.layer.append(other)
- self.data, self.layer = _combine_subchart_data(self.data, self.layer)
- return self
-
- def __add__(self, other):
- copy = self.copy(deep=["layer"])
- copy += other
- return copy
-
- def add_layers(self, *layers):
- copy = self.copy(deep=["layer"])
- for layer in layers:
- copy += layer
- return copy
-
- def interactive(self, name=None, bind_x=True, bind_y=True):
- """Make chart axes scales interactive
-
- Parameters
- ----------
- name : string
- The selection name to use for the axes scales. This name should be
- unique among all selections within the chart.
- bind_x : boolean, default True
- If true, then bind the interactive scales to the x-axis
- bind_y : boolean, default True
- If true, then bind the interactive scales to the y-axis
-
- Returns
- -------
- chart :
- copy of self, with interactive axes added
-
- """
- if not self.layer:
- raise ValueError(
- "LayerChart: cannot call interactive() until a " "layer is defined"
- )
- copy = self.copy(deep=["layer"])
- copy.layer[0] = copy.layer[0].interactive(
- name=name, bind_x=bind_x, bind_y=bind_y
- )
- return copy
-
- def add_selection(self, *selections):
- """Add one or more selections to all subcharts."""
- if not selections or not self.layer:
- return self
- copy = self.copy()
- copy.layer[0] = copy.layer[0].add_selection(*selections)
- return copy
-
-
-def layer(*charts, **kwargs):
- """layer multiple charts"""
- return LayerChart(layer=charts, **kwargs)
-
-
-@utils.use_signature(core.TopLevelFacetSpec)
-class FacetChart(TopLevelMixin, core.TopLevelFacetSpec):
- """A Chart with layers within a single panel"""
-
- def __init__(self, data=Undefined, spec=Undefined, facet=Undefined, **kwargs):
- _check_if_valid_subspec(spec, "FacetChart")
- super(FacetChart, self).__init__(data=data, spec=spec, facet=facet, **kwargs)
-
- def interactive(self, name=None, bind_x=True, bind_y=True):
- """Make chart axes scales interactive
-
- Parameters
- ----------
- name : string
- The selection name to use for the axes scales. This name should be
- unique among all selections within the chart.
- bind_x : boolean, default True
- If true, then bind the interactive scales to the x-axis
- bind_y : boolean, default True
- If true, then bind the interactive scales to the y-axis
-
- Returns
- -------
- chart :
- copy of self, with interactive axes added
-
- """
- copy = self.copy(deep=False)
- copy.spec = copy.spec.interactive(name=name, bind_x=bind_x, bind_y=bind_y)
- return copy
-
- def add_selection(self, *selections):
- """Add one or more selections to the chart."""
- if not selections or self.spec is Undefined:
- return self
- copy = self.copy()
- copy.spec = copy.spec.add_selection(*selections)
- return copy
-
-
-def topo_feature(url, feature, **kwargs):
- """A convenience function for extracting features from a topojson url
-
- Parameters
- ----------
- url : string
- An URL from which to load the data set.
-
- feature : string
- The name of the TopoJSON object set to convert to a GeoJSON feature collection. For
- example, in a map of the world, there may be an object set named `"countries"`.
- Using the feature property, we can extract this set and generate a GeoJSON feature
- object for each country.
-
- **kwargs :
- additional keywords passed to TopoDataFormat
- """
- return core.UrlData(
- url=url, format=core.TopoDataFormat(type="topojson", feature=feature, **kwargs)
- )
-
-
-def _combine_subchart_data(data, subcharts):
- def remove_data(subchart):
- if subchart.data is not Undefined:
- subchart = subchart.copy()
- subchart.data = Undefined
- return subchart
-
- if not subcharts:
- # No subcharts = nothing to do.
- pass
- elif data is Undefined:
- # Top level has no data; all subchart data must
- # be identical to proceed.
- subdata = subcharts[0].data
- if subdata is not Undefined and all(c.data is subdata for c in subcharts):
- data = subdata
- subcharts = [remove_data(c) for c in subcharts]
- else:
- # Top level has data; subchart data must be either
- # undefined or identical to proceed.
- if all(c.data is Undefined or c.data is data for c in subcharts):
- subcharts = [remove_data(c) for c in subcharts]
-
- return data, subcharts
-
-
-@utils.use_signature(core.SequenceParams)
-def sequence(start, stop=None, step=Undefined, as_=Undefined, **kwds):
- """Sequence generator."""
- if stop is None:
- start, stop = 0, start
- params = core.SequenceParams(start=start, stop=stop, step=step, **{"as": as_})
- return core.SequenceGenerator(sequence=params, **kwds)
-
-
-@utils.use_signature(core.GraticuleParams)
-def graticule(**kwds):
- """Graticule generator."""
- if not kwds:
- # graticule: True indicates default parameters
- graticule = True
- else:
- graticule = core.GraticuleParams(**kwds)
- return core.GraticuleGenerator(graticule=graticule)
-
-
-def sphere():
- """Sphere generator."""
- return core.SphereGenerator(sphere=True)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attr/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attr/__init__.py
deleted file mode 100644
index f95c96dd5795b9c958adfebbd36ffad99cf23cc9..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/attr/__init__.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-from __future__ import absolute_import, division, print_function
-
-import sys
-
-from functools import partial
-
-from . import converters, exceptions, filters, setters, validators
-from ._cmp import cmp_using
-from ._config import get_run_validators, set_run_validators
-from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
-from ._make import (
- NOTHING,
- Attribute,
- Factory,
- attrib,
- attrs,
- fields,
- fields_dict,
- make_class,
- validate,
-)
-from ._version_info import VersionInfo
-
-
-__version__ = "21.4.0"
-__version_info__ = VersionInfo._from_version_string(__version__)
-
-__title__ = "attrs"
-__description__ = "Classes Without Boilerplate"
-__url__ = "https://www.attrs.org/"
-__uri__ = __url__
-__doc__ = __description__ + " <" + __uri__ + ">"
-
-__author__ = "Hynek Schlawack"
-__email__ = "hs@ox.cx"
-
-__license__ = "MIT"
-__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
-
-
-s = attributes = attrs
-ib = attr = attrib
-dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
-
-__all__ = [
- "Attribute",
- "Factory",
- "NOTHING",
- "asdict",
- "assoc",
- "astuple",
- "attr",
- "attrib",
- "attributes",
- "attrs",
- "cmp_using",
- "converters",
- "evolve",
- "exceptions",
- "fields",
- "fields_dict",
- "filters",
- "get_run_validators",
- "has",
- "ib",
- "make_class",
- "resolve_types",
- "s",
- "set_run_validators",
- "setters",
- "validate",
- "validators",
-]
-
-if sys.version_info[:2] >= (3, 6):
- from ._next_gen import define, field, frozen, mutable # noqa: F401
-
- __all__.extend(("define", "field", "frozen", "mutable"))
diff --git a/spaces/asd123Xiao/kafuu_chino_sovits4.0/hubert/__init__.py b/spaces/asd123Xiao/kafuu_chino_sovits4.0/hubert/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/app.component.spec.ts b/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/app.component.spec.ts
deleted file mode 100644
index a9484763e1897e0710785fcaf78296ce99f81422..0000000000000000000000000000000000000000
--- a/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/app.component.spec.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-import { TestBed } from '@angular/core/testing';
-import { RouterTestingModule } from '@angular/router/testing';
-import { AppComponent } from './app.component';
-
-describe('AppComponent', () => {
- beforeEach(async () => {
- await TestBed.configureTestingModule({
- imports: [
- RouterTestingModule
- ],
- declarations: [
- AppComponent
- ],
- }).compileComponents();
- });
-
- it('should create the app', () => {
- const fixture = TestBed.createComponent(AppComponent);
- const app = fixture.componentInstance;
- expect(app).toBeTruthy();
- });
-
- it(`should have as title 'fronty'`, () => {
- const fixture = TestBed.createComponent(AppComponent);
- const app = fixture.componentInstance;
- expect(app.title).toEqual('fronty');
- });
-
- it('should render title', () => {
- const fixture = TestBed.createComponent(AppComponent);
- fixture.detectChanges();
- const compiled = fixture.nativeElement as HTMLElement;
- expect(compiled.querySelector('.content span')?.textContent).toContain('fronty app is running!');
- });
-});
diff --git a/spaces/avorozhko/funbot/README.md b/spaces/avorozhko/funbot/README.md
deleted file mode 100644
index 92c44afcadb83d7d8556b1599c5e8186c032e2b7..0000000000000000000000000000000000000000
--- a/spaces/avorozhko/funbot/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: Funbot
-emoji: 🦀
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
-
-TODO: write new REDAME.MD
\ No newline at end of file
diff --git a/spaces/awacke1/3DVirtualFood/app.py b/spaces/awacke1/3DVirtualFood/app.py
deleted file mode 100644
index 06fd1947c7e9be88f0e449f073d510ed754a739b..0000000000000000000000000000000000000000
--- a/spaces/awacke1/3DVirtualFood/app.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import time
-import gradio as gr
-import os
-
-
-def load_mesh(mesh_file_name):
- time.sleep(2)
- return mesh_file_name
-
-description="3D Virtual Food 🥐🥑🥒🥓🥔🥕🥖🥗🥘🥙🥚🥛🥜🥝🥞🥟🥠🥡🥢🥣🥤🥥🥦🥧🥨🥩🥪🥫🥬🥭🥮🥯"
-
-inputs = gr.Model3D()
-outputs = gr.Model3D(clear_color=[0.8, 0.2, 0.2, 1.0])
-
-demo = gr.Interface(
- fn=load_mesh,
- inputs=inputs,
- outputs=outputs,
- examples=[
- [os.path.join(os.path.dirname(__file__), "FinalBaseMesh.obj")],
- [os.path.join(os.path.dirname(__file__), "BEAR_BLK.OBJ")]
- ],
- description=description,
- cache_examples=True,
-)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/awacke1/Docker.Jupyterlab.Integration.HF/start_server.sh b/spaces/awacke1/Docker.Jupyterlab.Integration.HF/start_server.sh
deleted file mode 100644
index 505a6f60f995b547565c578802442fe1b9064110..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Docker.Jupyterlab.Integration.HF/start_server.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-JUPYTER_TOKEN="${JUPYTER_TOKEN:=huggingface}"
-
-echo "Starting Jupyter Lab with token $JUPYTER_TOKEN"
-
-jupyter-lab \
- --ip 0.0.0.0 \
- --port 7860 \
- --no-browser \
- --allow-root \
- --ServerApp.token="$JUPYTER_TOKEN" \
- --ServerApp.tornado_settings="{'headers': {'Content-Security-Policy': 'frame-ancestors *'}}" \
- --ServerApp.cookie_options="{'SameSite': 'None', 'Secure': True}" \
- --ServerApp.disable_check_xsrf=True \
- --LabApp.news_url=None \
- --LabApp.check_for_updates_class="jupyterlab.NeverCheckForUpdate"
\ No newline at end of file
diff --git a/spaces/awacke1/LionImageSearch/app.py b/spaces/awacke1/LionImageSearch/app.py
deleted file mode 100644
index 8e9e2e6d9fea984f0930ff5c6f9c4501b82d4c5c..0000000000000000000000000000000000000000
--- a/spaces/awacke1/LionImageSearch/app.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import pandas as pd
-import gradio as gr
-from datasets import load_dataset
-
-dataset = load_dataset("laion/laion-art", split="train") # 1.2G parquet of description and url to images representing art
-df = pd.DataFrame(dataset)
-df = df[["TEXT", "URL"]] # just text description and URL to image
-df = df.sample(frac=1) # randomize order
-
-def display_df():
- df_images = df.head(100)
- return df_images
-
-def display_next100(dataframe, end):
- dataframe = dataframe.sample(frac=1)
- start = (end or dataframe.index[-1]) + 1
- end = start + 99
- df_images = df.loc[start:end]
- return df_images, end
-
-with gr.Blocks() as demo:
- gr.Markdown("🦁Lion Image Search🎨
")
- gr.Markdown("""Art Descriptions from Laion Art. Create Art Here. Papers,Code,Datasets for Image AI Datasets""")
-
- with gr.Row():
- num_end = gr.Number(visible=False)
- b1 = gr.Button("Images with Descriptions 0-100")
- b2 = gr.Button("Next 100 Images with Descriptions")
-
- with gr.Row():
- out_dataframe = gr.Dataframe(wrap=True, max_rows=100, overflow_row_behaviour= "paginate", headers=['TEXT','URL'])
-
- b1.click(fn=display_df, outputs=out_dataframe)
- b2.click(fn=display_next100, inputs= [out_dataframe, num_end ], outputs=[out_dataframe, num_end])
-
-demo.launch(debug=True, show_error=True)
\ No newline at end of file
diff --git a/spaces/awinml/api_vicuna-AlekseyKorshuk-7B-GPTQ-4bit-128g-GGML/README.md b/spaces/awinml/api_vicuna-AlekseyKorshuk-7B-GPTQ-4bit-128g-GGML/README.md
deleted file mode 100644
index ea7769c3a17dffa787cf3e65255fbb8762883689..0000000000000000000000000000000000000000
--- a/spaces/awinml/api_vicuna-AlekseyKorshuk-7B-GPTQ-4bit-128g-GGML/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Api Vicuna
-emoji: 👩💻
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.12.0
-python_version: 3.9.13
-app_file: app.py
-pinned: false
-license: mit
----
\ No newline at end of file
diff --git a/spaces/ayaanzaveri/whisper-webui/app.py b/spaces/ayaanzaveri/whisper-webui/app.py
deleted file mode 100644
index d05cd38547f127590eb2baec243fc45b01098fc9..0000000000000000000000000000000000000000
--- a/spaces/ayaanzaveri/whisper-webui/app.py
+++ /dev/null
@@ -1,515 +0,0 @@
-from datetime import datetime
-import math
-from typing import Iterator, Union
-import argparse
-
-from io import StringIO
-import os
-import pathlib
-import tempfile
-import zipfile
-import numpy as np
-
-import torch
-from src.config import ApplicationConfig
-from src.hooks.whisperProgressHook import ProgressListener, SubTaskProgressListener, create_progress_listener_handle
-from src.modelCache import ModelCache
-from src.source import get_audio_source_collection
-from src.vadParallel import ParallelContext, ParallelTranscription
-
-# External programs
-import ffmpeg
-
-# UI
-import gradio as gr
-
-from src.download import ExceededMaximumDuration, download_url
-from src.utils import slugify, write_srt, write_vtt
-from src.vad import AbstractTranscription, NonSpeechStrategy, PeriodicTranscriptionConfig, TranscriptionConfig, VadPeriodicTranscription, VadSileroTranscription
-from src.whisperContainer import WhisperContainer
-
-# Configure more application defaults in config.json5
-
-# Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself
-MAX_FILE_PREFIX_LENGTH = 17
-
-# Limit auto_parallel to a certain number of CPUs (specify vad_cpu_cores to get a higher number)
-MAX_AUTO_CPU_CORES = 8
-
-LANGUAGES = [
- "English", "Chinese", "German", "Spanish", "Russian", "Korean",
- "French", "Japanese", "Portuguese", "Turkish", "Polish", "Catalan",
- "Dutch", "Arabic", "Swedish", "Italian", "Indonesian", "Hindi",
- "Finnish", "Vietnamese", "Hebrew", "Ukrainian", "Greek", "Malay",
- "Czech", "Romanian", "Danish", "Hungarian", "Tamil", "Norwegian",
- "Thai", "Urdu", "Croatian", "Bulgarian", "Lithuanian", "Latin",
- "Maori", "Malayalam", "Welsh", "Slovak", "Telugu", "Persian",
- "Latvian", "Bengali", "Serbian", "Azerbaijani", "Slovenian",
- "Kannada", "Estonian", "Macedonian", "Breton", "Basque", "Icelandic",
- "Armenian", "Nepali", "Mongolian", "Bosnian", "Kazakh", "Albanian",
- "Swahili", "Galician", "Marathi", "Punjabi", "Sinhala", "Khmer",
- "Shona", "Yoruba", "Somali", "Afrikaans", "Occitan", "Georgian",
- "Belarusian", "Tajik", "Sindhi", "Gujarati", "Amharic", "Yiddish",
- "Lao", "Uzbek", "Faroese", "Haitian Creole", "Pashto", "Turkmen",
- "Nynorsk", "Maltese", "Sanskrit", "Luxembourgish", "Myanmar", "Tibetan",
- "Tagalog", "Malagasy", "Assamese", "Tatar", "Hawaiian", "Lingala",
- "Hausa", "Bashkir", "Javanese", "Sundanese"
-]
-
-WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]
-
-class WhisperTranscriber:
- def __init__(self, input_audio_max_duration: float = None, vad_process_timeout: float = None,
- vad_cpu_cores: int = 1, delete_uploaded_files: bool = False, output_dir: str = None,
- app_config: ApplicationConfig = None):
- self.model_cache = ModelCache()
- self.parallel_device_list = None
- self.gpu_parallel_context = None
- self.cpu_parallel_context = None
- self.vad_process_timeout = vad_process_timeout
- self.vad_cpu_cores = vad_cpu_cores
-
- self.vad_model = None
- self.inputAudioMaxDuration = input_audio_max_duration
- self.deleteUploadedFiles = delete_uploaded_files
- self.output_dir = output_dir
-
- self.app_config = app_config
-
- def set_parallel_devices(self, vad_parallel_devices: str):
- self.parallel_device_list = [ device.strip() for device in vad_parallel_devices.split(",") ] if vad_parallel_devices else None
-
- def set_auto_parallel(self, auto_parallel: bool):
- if auto_parallel:
- if torch.cuda.is_available():
- self.parallel_device_list = [ str(gpu_id) for gpu_id in range(torch.cuda.device_count())]
-
- self.vad_cpu_cores = min(os.cpu_count(), MAX_AUTO_CPU_CORES)
- print("[Auto parallel] Using GPU devices " + str(self.parallel_device_list) + " and " + str(self.vad_cpu_cores) + " CPU cores for VAD/transcription.")
-
- # Entry function for the simple tab
- def transcribe_webui_simple(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow,
- progress=gr.Progress()):
- return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow,
- progress=progress)
-
- # Entry function for the full tab
- def transcribe_webui_full(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow,
- initial_prompt: str, temperature: float, best_of: int, beam_size: int, patience: float, length_penalty: float, suppress_tokens: str,
- condition_on_previous_text: bool, fp16: bool, temperature_increment_on_fallback: float,
- compression_ratio_threshold: float, logprob_threshold: float, no_speech_threshold: float,
- progress=gr.Progress()):
-
- # Handle temperature_increment_on_fallback
- if temperature_increment_on_fallback is not None:
- temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback))
- else:
- temperature = [temperature]
-
- return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow,
- initial_prompt=initial_prompt, temperature=temperature, best_of=best_of, beam_size=beam_size, patience=patience, length_penalty=length_penalty, suppress_tokens=suppress_tokens,
- condition_on_previous_text=condition_on_previous_text, fp16=fp16,
- compression_ratio_threshold=compression_ratio_threshold, logprob_threshold=logprob_threshold, no_speech_threshold=no_speech_threshold,
- progress=progress)
-
- def transcribe_webui(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow,
- progress: gr.Progress = None, **decodeOptions: dict):
- try:
- sources = self.__get_source(urlData, multipleFiles, microphoneData)
-
- try:
- selectedLanguage = languageName.lower() if len(languageName) > 0 else None
- selectedModel = modelName if modelName is not None else "base"
-
- model = WhisperContainer(model_name=selectedModel, cache=self.model_cache, models=self.app_config.models)
-
- # Result
- download = []
- zip_file_lookup = {}
- text = ""
- vtt = ""
-
- # Write result
- downloadDirectory = tempfile.mkdtemp()
- source_index = 0
-
- outputDirectory = self.output_dir if self.output_dir is not None else downloadDirectory
-
- # Progress
- total_duration = sum([source.get_audio_duration() for source in sources])
- current_progress = 0
-
- # A listener that will report progress to Gradio
- root_progress_listener = self._create_progress_listener(progress)
-
- # Execute whisper
- for source in sources:
- source_prefix = ""
- source_audio_duration = source.get_audio_duration()
-
- if (len(sources) > 1):
- # Prefix (minimum 2 digits)
- source_index += 1
- source_prefix = str(source_index).zfill(2) + "_"
- print("Transcribing ", source.source_path)
-
- scaled_progress_listener = SubTaskProgressListener(root_progress_listener,
- base_task_total=total_duration,
- sub_task_start=current_progress,
- sub_task_total=source_audio_duration)
-
- # Transcribe
- result = self.transcribe_file(model, source.source_path, selectedLanguage, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, scaled_progress_listener, **decodeOptions)
- filePrefix = slugify(source_prefix + source.get_short_name(), allow_unicode=True)
-
- # Update progress
- current_progress += source_audio_duration
-
- source_download, source_text, source_vtt = self.write_result(result, filePrefix, outputDirectory)
-
- if len(sources) > 1:
- # Add new line separators
- if (len(source_text) > 0):
- source_text += os.linesep + os.linesep
- if (len(source_vtt) > 0):
- source_vtt += os.linesep + os.linesep
-
- # Append file name to source text too
- source_text = source.get_full_name() + ":" + os.linesep + source_text
- source_vtt = source.get_full_name() + ":" + os.linesep + source_vtt
-
- # Add to result
- download.extend(source_download)
- text += source_text
- vtt += source_vtt
-
- if (len(sources) > 1):
- # Zip files support at least 260 characters, but we'll play it safe and use 200
- zipFilePrefix = slugify(source_prefix + source.get_short_name(max_length=200), allow_unicode=True)
-
- # File names in ZIP file can be longer
- for source_download_file in source_download:
- # Get file postfix (after last -)
- filePostfix = os.path.basename(source_download_file).split("-")[-1]
- zip_file_name = zipFilePrefix + "-" + filePostfix
- zip_file_lookup[source_download_file] = zip_file_name
-
- # Create zip file from all sources
- if len(sources) > 1:
- downloadAllPath = os.path.join(downloadDirectory, "All_Output-" + datetime.now().strftime("%Y%m%d-%H%M%S") + ".zip")
-
- with zipfile.ZipFile(downloadAllPath, 'w', zipfile.ZIP_DEFLATED) as zip:
- for download_file in download:
- # Get file name from lookup
- zip_file_name = zip_file_lookup.get(download_file, os.path.basename(download_file))
- zip.write(download_file, arcname=zip_file_name)
-
- download.insert(0, downloadAllPath)
-
- return download, text, vtt
-
- finally:
- # Cleanup source
- if self.deleteUploadedFiles:
- for source in sources:
- print("Deleting source file " + source.source_path)
-
- try:
- os.remove(source.source_path)
- except Exception as e:
- # Ignore error - it's just a cleanup
- print("Error deleting source file " + source.source_path + ": " + str(e))
-
- except ExceededMaximumDuration as e:
- return [], ("[ERROR]: Maximum remote video length is " + str(e.maxDuration) + "s, file was " + str(e.videoDuration) + "s"), "[ERROR]"
-
- def transcribe_file(self, model: WhisperContainer, audio_path: str, language: str, task: str = None, vad: str = None,
- vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1,
- progressListener: ProgressListener = None, **decodeOptions: dict):
-
- initial_prompt = decodeOptions.pop('initial_prompt', None)
-
- if progressListener is None:
- # Default progress listener
- progressListener = ProgressListener()
-
- if ('task' in decodeOptions):
- task = decodeOptions.pop('task')
-
- # Callable for processing an audio file
- whisperCallable = model.create_callback(language, task, initial_prompt, **decodeOptions)
-
- # The results
- if (vad == 'silero-vad'):
- # Silero VAD where non-speech gaps are transcribed
- process_gaps = self._create_silero_config(NonSpeechStrategy.CREATE_SEGMENT, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, self.vad_model, process_gaps, progressListener=progressListener)
- elif (vad == 'silero-vad-skip-gaps'):
- # Silero VAD where non-speech gaps are simply ignored
- skip_gaps = self._create_silero_config(NonSpeechStrategy.SKIP, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, self.vad_model, skip_gaps, progressListener=progressListener)
- elif (vad == 'silero-vad-expand-into-gaps'):
- # Use Silero VAD where speech-segments are expanded into non-speech gaps
- expand_gaps = self._create_silero_config(NonSpeechStrategy.EXPAND_SEGMENT, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, self.vad_model, expand_gaps, progressListener=progressListener)
- elif (vad == 'periodic-vad'):
- # Very simple VAD - mark every 5 minutes as speech. This makes it less likely that Whisper enters an infinite loop, but
- # it may create a break in the middle of a sentence, causing some artifacts.
- periodic_vad = VadPeriodicTranscription()
- period_config = PeriodicTranscriptionConfig(periodic_duration=vadMaxMergeSize, max_prompt_window=vadPromptWindow)
- result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config, progressListener=progressListener)
-
- else:
- if (self._has_parallel_devices()):
- # Use a simple period transcription instead, as we need to use the parallel context
- periodic_vad = VadPeriodicTranscription()
- period_config = PeriodicTranscriptionConfig(periodic_duration=math.inf, max_prompt_window=1)
-
- result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config, progressListener=progressListener)
- else:
- # Default VAD
- result = whisperCallable.invoke(audio_path, 0, None, None, progress_listener=progressListener)
-
- return result
-
- def _create_progress_listener(self, progress: gr.Progress):
- if (progress is None):
- # Dummy progress listener
- return ProgressListener()
-
- class ForwardingProgressListener(ProgressListener):
- def __init__(self, progress: gr.Progress):
- self.progress = progress
-
- def on_progress(self, current: Union[int, float], total: Union[int, float]):
- # From 0 to 1
- self.progress(current / total)
-
- def on_finished(self):
- self.progress(1)
-
- return ForwardingProgressListener(progress)
-
- def process_vad(self, audio_path, whisperCallable, vadModel: AbstractTranscription, vadConfig: TranscriptionConfig,
- progressListener: ProgressListener = None):
- if (not self._has_parallel_devices()):
- # No parallel devices, so just run the VAD and Whisper in sequence
- return vadModel.transcribe(audio_path, whisperCallable, vadConfig, progressListener=progressListener)
-
- gpu_devices = self.parallel_device_list
-
- if (gpu_devices is None or len(gpu_devices) == 0):
- # No GPU devices specified, pass the current environment variable to the first GPU process. This may be NULL.
- gpu_devices = [os.environ.get("CUDA_VISIBLE_DEVICES", None)]
-
- # Create parallel context if needed
- if (self.gpu_parallel_context is None):
- # Create a context wih processes and automatically clear the pool after 1 hour of inactivity
- self.gpu_parallel_context = ParallelContext(num_processes=len(gpu_devices), auto_cleanup_timeout_seconds=self.vad_process_timeout)
- # We also need a CPU context for the VAD
- if (self.cpu_parallel_context is None):
- self.cpu_parallel_context = ParallelContext(num_processes=self.vad_cpu_cores, auto_cleanup_timeout_seconds=self.vad_process_timeout)
-
- parallel_vad = ParallelTranscription()
- return parallel_vad.transcribe_parallel(transcription=vadModel, audio=audio_path, whisperCallable=whisperCallable,
- config=vadConfig, cpu_device_count=self.vad_cpu_cores, gpu_devices=gpu_devices,
- cpu_parallel_context=self.cpu_parallel_context, gpu_parallel_context=self.gpu_parallel_context,
- progress_listener=progressListener)
-
- def _has_parallel_devices(self):
- return (self.parallel_device_list is not None and len(self.parallel_device_list) > 0) or self.vad_cpu_cores > 1
-
- def _concat_prompt(self, prompt1, prompt2):
- if (prompt1 is None):
- return prompt2
- elif (prompt2 is None):
- return prompt1
- else:
- return prompt1 + " " + prompt2
-
- def _create_silero_config(self, non_speech_strategy: NonSpeechStrategy, vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1):
- # Use Silero VAD
- if (self.vad_model is None):
- self.vad_model = VadSileroTranscription()
-
- config = TranscriptionConfig(non_speech_strategy = non_speech_strategy,
- max_silent_period=vadMergeWindow, max_merge_size=vadMaxMergeSize,
- segment_padding_left=vadPadding, segment_padding_right=vadPadding,
- max_prompt_window=vadPromptWindow)
-
- return config
-
- def write_result(self, result: dict, source_name: str, output_dir: str):
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
-
- text = result["text"]
- language = result["language"]
- languageMaxLineWidth = self.__get_max_line_width(language)
-
- print("Max line width " + str(languageMaxLineWidth))
- vtt = self.__get_subs(result["segments"], "vtt", languageMaxLineWidth)
- srt = self.__get_subs(result["segments"], "srt", languageMaxLineWidth)
-
- output_files = []
- output_files.append(self.__create_file(srt, output_dir, source_name + "-subs.srt"));
- output_files.append(self.__create_file(vtt, output_dir, source_name + "-subs.vtt"));
- output_files.append(self.__create_file(text, output_dir, source_name + "-transcript.txt"));
-
- return output_files, text, vtt
-
- def clear_cache(self):
- self.model_cache.clear()
- self.vad_model = None
-
- def __get_source(self, urlData, multipleFiles, microphoneData):
- return get_audio_source_collection(urlData, multipleFiles, microphoneData, self.inputAudioMaxDuration)
-
- def __get_max_line_width(self, language: str) -> int:
- if (language and language.lower() in ["japanese", "ja", "chinese", "zh"]):
- # Chinese characters and kana are wider, so limit line length to 40 characters
- return 40
- else:
- # TODO: Add more languages
- # 80 latin characters should fit on a 1080p/720p screen
- return 80
-
- def __get_subs(self, segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
- segmentStream = StringIO()
-
- if format == 'vtt':
- write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
- elif format == 'srt':
- write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
- else:
- raise Exception("Unknown format " + format)
-
- segmentStream.seek(0)
- return segmentStream.read()
-
- def __create_file(self, text: str, directory: str, fileName: str) -> str:
- # Write the text to a file
- with open(os.path.join(directory, fileName), 'w+', encoding="utf-8") as file:
- file.write(text)
-
- return file.name
-
- def close(self):
- print("Closing parallel contexts")
- self.clear_cache()
-
- if (self.gpu_parallel_context is not None):
- self.gpu_parallel_context.close()
- if (self.cpu_parallel_context is not None):
- self.cpu_parallel_context.close()
-
-
-def create_ui(app_config: ApplicationConfig):
- ui = WhisperTranscriber(app_config.input_audio_max_duration, app_config.vad_process_timeout, app_config.vad_cpu_cores,
- app_config.delete_uploaded_files, app_config.output_dir, app_config)
-
- # Specify a list of devices to use for parallel processing
- ui.set_parallel_devices(app_config.vad_parallel_devices)
- ui.set_auto_parallel(app_config.auto_parallel)
-
- ui_description = "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse "
- ui_description += " audio and is also a multi-task model that can perform multilingual speech recognition "
- ui_description += " as well as speech translation and language identification. "
-
- ui_description += "\n\n\n\nFor longer audio files (>10 minutes) not in English, it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option."
-
- if app_config.input_audio_max_duration > 0:
- ui_description += "\n\n" + "Max audio file length: " + str(app_config.input_audio_max_duration) + " s"
-
- ui_article = "Read the [documentation here](https://gitlab.com/aadnk/whisper-webui/-/blob/main/docs/options.md)"
-
- whisper_models = app_config.get_model_names()
-
- simple_inputs = lambda : [
- gr.Dropdown(choices=whisper_models, value=app_config.default_model_name, label="Model"),
- gr.Dropdown(choices=sorted(LANGUAGES), label="Language", value=app_config.language),
- gr.Text(label="URL (YouTube, etc.)"),
- gr.File(label="Upload Files", file_count="multiple"),
- gr.Audio(source="microphone", type="filepath", label="Microphone Input"),
- gr.Dropdown(choices=["transcribe", "translate"], label="Task", value=app_config.task),
- gr.Dropdown(choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], value=app_config.default_vad, label="VAD"),
- gr.Number(label="VAD - Merge Window (s)", precision=0, value=app_config.vad_merge_window),
- gr.Number(label="VAD - Max Merge Size (s)", precision=0, value=app_config.vad_max_merge_size),
- gr.Number(label="VAD - Padding (s)", precision=None, value=app_config.vad_padding),
- gr.Number(label="VAD - Prompt Window (s)", precision=None, value=app_config.vad_prompt_window),
- ]
-
- simple_transcribe = gr.Interface(fn=ui.transcribe_webui_simple, description=ui_description, article=ui_article, inputs=simple_inputs(), outputs=[
- gr.File(label="Download"),
- gr.Text(label="Transcription"),
- gr.Text(label="Segments")
- ])
-
- full_description = ui_description + "\n\n\n\n" + "Be careful when changing some of the options in the full interface - this can cause the model to crash."
-
- full_transcribe = gr.Interface(fn=ui.transcribe_webui_full, description=full_description, article=ui_article, inputs=[
- *simple_inputs(),
- gr.TextArea(label="Initial Prompt"),
- gr.Number(label="Temperature", value=app_config.temperature),
- gr.Number(label="Best Of - Non-zero temperature", value=app_config.best_of, precision=0),
- gr.Number(label="Beam Size - Zero temperature", value=app_config.beam_size, precision=0),
- gr.Number(label="Patience - Zero temperature", value=app_config.patience),
- gr.Number(label="Length Penalty - Any temperature", value=app_config.length_penalty),
- gr.Text(label="Suppress Tokens - Comma-separated list of token IDs", value=app_config.suppress_tokens),
- gr.Checkbox(label="Condition on previous text", value=app_config.condition_on_previous_text),
- gr.Checkbox(label="FP16", value=app_config.fp16),
- gr.Number(label="Temperature increment on fallback", value=app_config.temperature_increment_on_fallback),
- gr.Number(label="Compression ratio threshold", value=app_config.compression_ratio_threshold),
- gr.Number(label="Logprob threshold", value=app_config.logprob_threshold),
- gr.Number(label="No speech threshold", value=app_config.no_speech_threshold)
- ], outputs=[
- gr.File(label="Download"),
- gr.Text(label="Transcription"),
- gr.Text(label="Segments")
- ])
-
- demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"])
-
- # Queue up the demo
- if app_config.queue_concurrency_count is not None and app_config.queue_concurrency_count > 0:
- demo.queue(concurrency_count=app_config.queue_concurrency_count)
-
- demo.launch(share=app_config.share, server_name=app_config.server_name, server_port=app_config.server_port)
-
- # Clean up
- ui.close()
-
-if __name__ == '__main__':
- app_config = ApplicationConfig.create_default()
- whisper_models = app_config.get_model_names()
-
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument("--input_audio_max_duration", type=int, default=app_config.input_audio_max_duration, \
- help="Maximum audio file length in seconds, or -1 for no limit.") # 600
- parser.add_argument("--share", type=bool, default=app_config.share, \
- help="True to share the app on HuggingFace.") # False
- parser.add_argument("--server_name", type=str, default=app_config.server_name, \
- help="The host or IP to bind to. If None, bind to localhost.") # None
- parser.add_argument("--server_port", type=int, default=app_config.server_port, \
- help="The port to bind to.") # 7860
- parser.add_argument("--queue_concurrency_count", type=int, default=app_config.queue_concurrency_count, \
- help="The number of concurrent requests to process.") # 1
- parser.add_argument("--default_model_name", type=str, choices=whisper_models, default=app_config.default_model_name, \
- help="The default model name.") # medium
- parser.add_argument("--default_vad", type=str, default=app_config.default_vad, \
- help="The default VAD.") # silero-vad
- parser.add_argument("--vad_parallel_devices", type=str, default=app_config.vad_parallel_devices, \
- help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") # ""
- parser.add_argument("--vad_cpu_cores", type=int, default=app_config.vad_cpu_cores, \
- help="The number of CPU cores to use for VAD pre-processing.") # 1
- parser.add_argument("--vad_process_timeout", type=float, default=app_config.vad_process_timeout, \
- help="The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.") # 1800
- parser.add_argument("--auto_parallel", type=bool, default=app_config.auto_parallel, \
- help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") # False
- parser.add_argument("--output_dir", "-o", type=str, default=app_config.output_dir, \
- help="directory to save the outputs") # None
-
- args = parser.parse_args().__dict__
-
- updated_config = app_config.update(**args)
- create_ui(app_config=updated_config)
\ No newline at end of file
diff --git a/spaces/barani/ControlNet/app_mlsd.py b/spaces/barani/ControlNet/app_mlsd.py
deleted file mode 100644
index 9440f2f480b3713aa081258909221eab792157b5..0000000000000000000000000000000000000000
--- a/spaces/barani/ControlNet/app_mlsd.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-
-import gradio as gr
-
-from utils import randomize_seed_fn
-
-
-def create_demo(process, max_images=12, default_num_images=3):
- with gr.Blocks() as demo:
- with gr.Row():
- with gr.Column():
- image = gr.Image()
- prompt = gr.Textbox(label='Prompt')
- run_button = gr.Button('Run')
- with gr.Accordion('Advanced options', open=False):
- num_samples = gr.Slider(label='Number of images',
- minimum=1,
- maximum=max_images,
- value=default_num_images,
- step=1)
- image_resolution = gr.Slider(label='Image resolution',
- minimum=256,
- maximum=512,
- value=512,
- step=256)
- preprocess_resolution = gr.Slider(
- label='Preprocess resolution',
- minimum=128,
- maximum=512,
- value=512,
- step=1)
- mlsd_value_threshold = gr.Slider(
- label='Hough value threshold (MLSD)',
- minimum=0.01,
- maximum=2.0,
- value=0.1,
- step=0.01)
- mlsd_distance_threshold = gr.Slider(
- label='Hough distance threshold (MLSD)',
- minimum=0.01,
- maximum=20.0,
- value=0.1,
- step=0.01)
- num_steps = gr.Slider(label='Number of steps',
- minimum=1,
- maximum=100,
- value=20,
- step=1)
- guidance_scale = gr.Slider(label='Guidance scale',
- minimum=0.1,
- maximum=30.0,
- value=9.0,
- step=0.1)
- seed = gr.Slider(label='Seed',
- minimum=0,
- maximum=1000000,
- step=1,
- value=0,
- randomize=True)
- randomize_seed = gr.Checkbox(label='Randomize seed',
- value=True)
- a_prompt = gr.Textbox(
- label='Additional prompt',
- value='best quality, extremely detailed')
- n_prompt = gr.Textbox(
- label='Negative prompt',
- value=
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
- )
- with gr.Column():
- result = gr.Gallery(label='Output', show_label=False).style(
- columns=2, object_fit='scale-down')
- inputs = [
- image,
- prompt,
- a_prompt,
- n_prompt,
- num_samples,
- image_resolution,
- preprocess_resolution,
- num_steps,
- guidance_scale,
- seed,
- mlsd_value_threshold,
- mlsd_distance_threshold,
- ]
- prompt.submit(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- ).then(
- fn=process,
- inputs=inputs,
- outputs=result,
- )
- run_button.click(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- ).then(
- fn=process,
- inputs=inputs,
- outputs=result,
- api_name='mlsd',
- )
- return demo
-
-
-if __name__ == '__main__':
- from model import Model
- model = Model(task_name='MLSD')
- demo = create_demo(model.process_mlsd)
- demo.queue().launch()
diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/sd_hijack_open_clip.py b/spaces/bigjoker/stable-diffusion-webui/modules/sd_hijack_open_clip.py
deleted file mode 100644
index f13feb5c0373d10248d266e02dd28dd88b02c175..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/modules/sd_hijack_open_clip.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import open_clip.tokenizer
-import torch
-
-from modules import sd_hijack_clip, devices
-from modules.shared import opts
-
-tokenizer = open_clip.tokenizer._tokenizer
-
-
-class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase):
- def __init__(self, wrapped, hijack):
- super().__init__(wrapped, hijack)
-
- self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0]
- self.id_start = tokenizer.encoder[""]
- self.id_end = tokenizer.encoder[""]
- self.id_pad = 0
-
- def tokenize(self, texts):
- assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip'
-
- tokenized = [tokenizer.encode(text) for text in texts]
-
- return tokenized
-
- def encode_with_transformers(self, tokens):
- # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers
- z = self.wrapped.encode_with_transformer(tokens)
-
- return z
-
- def encode_embedding_init_text(self, init_text, nvpt):
- ids = tokenizer.encode(init_text)
- ids = torch.asarray([ids], device=devices.device, dtype=torch.int)
- embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0)
-
- return embedded
diff --git a/spaces/bigjoker/stable-diffusion-webui/scripts/sd_upscale.py b/spaces/bigjoker/stable-diffusion-webui/scripts/sd_upscale.py
deleted file mode 100644
index dd64d7d385b34f960eebd7aec3233a84ea90609a..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/scripts/sd_upscale.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import math
-
-import modules.scripts as scripts
-import gradio as gr
-from PIL import Image
-
-from modules import processing, shared, sd_samplers, images, devices
-from modules.processing import Processed
-from modules.shared import opts, cmd_opts, state
-
-
-class Script(scripts.Script):
- def title(self):
- return "SD upscale"
-
- def show(self, is_img2img):
- return is_img2img
-
- def ui(self, is_img2img):
- info = gr.HTML("Will upscale the image by the selected scale factor; use width and height sliders to set tile size
")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap"))
- scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor"))
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=self.elem_id("upscaler_index"))
-
- return [info, overlap, upscaler_index, scale_factor]
-
- def run(self, p, _, overlap, upscaler_index, scale_factor):
- if isinstance(upscaler_index, str):
- upscaler_index = [x.name.lower() for x in shared.sd_upscalers].index(upscaler_index.lower())
- processing.fix_seed(p)
- upscaler = shared.sd_upscalers[upscaler_index]
-
- p.extra_generation_params["SD upscale overlap"] = overlap
- p.extra_generation_params["SD upscale upscaler"] = upscaler.name
-
- initial_info = None
- seed = p.seed
-
- init_img = p.init_images[0]
- init_img = images.flatten(init_img, opts.img2img_background_color)
-
- if upscaler.name != "None":
- img = upscaler.scaler.upscale(init_img, scale_factor, upscaler.data_path)
- else:
- img = init_img
-
- devices.torch_gc()
-
- grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=overlap)
-
- batch_size = p.batch_size
- upscale_count = p.n_iter
- p.n_iter = 1
- p.do_not_save_grid = True
- p.do_not_save_samples = True
-
- work = []
-
- for y, h, row in grid.tiles:
- for tiledata in row:
- work.append(tiledata[2])
-
- batch_count = math.ceil(len(work) / batch_size)
- state.job_count = batch_count * upscale_count
-
- print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.")
-
- result_images = []
- for n in range(upscale_count):
- start_seed = seed + n
- p.seed = start_seed
-
- work_results = []
- for i in range(batch_count):
- p.batch_size = batch_size
- p.init_images = work[i * batch_size:(i + 1) * batch_size]
-
- state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
- processed = processing.process_images(p)
-
- if initial_info is None:
- initial_info = processed.info
-
- p.seed = processed.seed + 1
- work_results += processed.images
-
- image_index = 0
- for y, h, row in grid.tiles:
- for tiledata in row:
- tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
- image_index += 1
-
- combined_image = images.combine_grid(grid)
- result_images.append(combined_image)
-
- if opts.samples_save:
- images.save_image(combined_image, p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p)
-
- processed = Processed(p, result_images, seed, initial_info)
-
- return processed
diff --git a/spaces/bigslime/stablediffusion-infinity/PyPatchMatch/travis.sh b/spaces/bigslime/stablediffusion-infinity/PyPatchMatch/travis.sh
deleted file mode 100644
index a6ea538775e25b4e9b8c855a38e400c82f9121bf..0000000000000000000000000000000000000000
--- a/spaces/bigslime/stablediffusion-infinity/PyPatchMatch/travis.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/bash
-#
-# travis.sh
-# Copyright (C) 2020 Jiayuan Mao
-#
-# Distributed under terms of the MIT license.
-#
-
-make clean && make
diff --git a/spaces/bioriAsaeru/text-to-voice/IGO.v8.4.2.139242-320x240.apk Download and Install the Ultimate Navigation for Android.md b/spaces/bioriAsaeru/text-to-voice/IGO.v8.4.2.139242-320x240.apk Download and Install the Ultimate Navigation for Android.md
deleted file mode 100644
index 0eb6e03fd5dd3572e03a39d43458de5121447e9a..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/IGO.v8.4.2.139242-320x240.apk Download and Install the Ultimate Navigation for Android.md
+++ /dev/null
@@ -1,6 +0,0 @@
-IGO.v8.4.2.139242-320x240.apk
Download ⇔ https://urloso.com/2uyPuY
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/brainblow/MusiCreator/audiocraft/utils/utils.py b/spaces/brainblow/MusiCreator/audiocraft/utils/utils.py
deleted file mode 100644
index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000
--- a/spaces/brainblow/MusiCreator/audiocraft/utils/utils.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from concurrent.futures import ProcessPoolExecutor
-from functools import wraps
-import hashlib
-import logging
-import typing as tp
-
-import flashy
-import flashy.distrib
-import omegaconf
-import torch
-from torch.nn.utils.rnn import pad_sequence
-
-
-logger = logging.getLogger(__name__)
-
-
-def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
- """Convenience function to map an omegaconf configuration to a dictionary.
-
- Args:
- cfg (omegaconf.DictConfig): Original configuration to map to dict.
- Returns:
- dict: Config as dictionary object.
- """
- dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
- assert isinstance(dct, dict)
- return dct
-
-
-def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
- if max_samples >= len(dataset):
- return dataset
-
- generator = torch.Generator().manual_seed(seed)
- perm = torch.randperm(len(dataset), generator=generator)
- return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
-
-
-def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
- num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
- """Convenience function to load dataset into a dataloader with optional subset sampling.
-
- Args:
- dataset: Dataset to load.
- num_samples (Optional[int]): Number of samples to limit subset size.
- batch_size (int): Batch size.
- num_workers (int): Number of workers for data loading.
- seed (int): Random seed.
- """
- if num_samples is not None:
- dataset = random_subset(dataset, num_samples, seed)
-
- dataloader = flashy.distrib.loader(
- dataset,
- batch_size=batch_size,
- num_workers=num_workers,
- **kwargs
- )
- return dataloader
-
-
-def get_dataset_from_loader(dataloader):
- dataset = dataloader.dataset
- if isinstance(dataset, torch.utils.data.Subset):
- return dataset.dataset
- else:
- return dataset
-
-
-def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
- """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
-
- Args:
- input (torch.Tensor): The input tensor containing probabilities.
- num_samples (int): Number of samples to draw.
- replacement (bool): Whether to draw with replacement or not.
- Keywords args:
- generator (torch.Generator): A pseudorandom number generator for sampling.
- Returns:
- torch.Tensor: Last dimension contains num_samples indices
- sampled from the multinomial probability distribution
- located in the last dimension of tensor input.
- """
- input_ = input.reshape(-1, input.shape[-1])
- output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
- output = output_.reshape(*list(input.shape[:-1]), -1)
- return output
-
-
-def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
- """Sample next token from top K values along the last dimension of the input probs tensor.
-
- Args:
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
- k (int): The k in “top-k”.
- Returns:
- torch.Tensor: Sampled tokens.
- """
- top_k_value, _ = torch.topk(probs, k, dim=-1)
- min_value_top_k = top_k_value[..., [-1]]
- probs *= (probs >= min_value_top_k).float()
- probs.div_(probs.sum(dim=-1, keepdim=True))
- next_token = multinomial(probs, num_samples=1)
- return next_token
-
-
-def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
- """Sample next token from top P probabilities along the last dimension of the input probs tensor.
-
- Args:
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
- p (int): The p in “top-p”.
- Returns:
- torch.Tensor: Sampled tokens.
- """
- probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
- probs_sum = torch.cumsum(probs_sort, dim=-1)
- mask = probs_sum - probs_sort > p
- probs_sort *= (~mask).float()
- probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
- next_token = multinomial(probs_sort, num_samples=1)
- next_token = torch.gather(probs_idx, -1, next_token)
- return next_token
-
-
-class DummyPoolExecutor:
- """Dummy pool executor to use when we actually have only 1 worker.
- (e.g. instead of ProcessPoolExecutor).
- """
- class DummyResult:
- def __init__(self, func, *args, **kwargs):
- self.func = func
- self.args = args
- self.kwargs = kwargs
-
- def result(self):
- return self.func(*self.args, **self.kwargs)
-
- def __init__(self, workers, mp_context=None):
- pass
-
- def submit(self, func, *args, **kwargs):
- return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_tb):
- return
-
-
-def get_pool_executor(num_workers: int, mp_context=None):
- return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
-
-
-def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
- """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
- For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
-
- Args:
- lengths (torch.Tensor): tensor with lengths
- max_len (int): can set the max length manually. Defaults to None.
- Returns:
- torch.Tensor: mask with 0s where there is pad tokens else 1s
- """
- assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
- final_length = lengths.max().item() if not max_len else max_len
- final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
- return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
-
-
-def hash_trick(word: str, vocab_size: int) -> int:
- """Hash trick to pair each word with an index
-
- Args:
- word (str): word we wish to convert to an index
- vocab_size (int): size of the vocabulary
- Returns:
- int: index of the word in the embedding LUT
- """
- hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
- return hash % vocab_size
-
-
-def with_rank_rng(base_seed: int = 1234):
- """Decorator for a function so that the function will use a Random Number Generator
- whose state depend on the GPU rank. The original RNG state is restored upon returning.
-
- Args:
- base_seed (int): Random seed.
- """
- def _decorator(fun: tp.Callable):
- @wraps(fun)
- def _decorated(*args, **kwargs):
- state = torch.get_rng_state()
- seed = base_seed ^ flashy.distrib.rank()
- torch.manual_seed(seed)
- logger.debug('Rank dependent seed set to %d', seed)
- try:
- return fun(*args, **kwargs)
- finally:
- torch.set_rng_state(state)
- logger.debug('RNG state restored.')
- return _decorated
- return _decorator
-
-
-def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- """Get a list of tensors and collate them to a single tensor. according to the following logic:
- - `dim` specifies the time dimension which will be stacked and padded.
- - The output will contain 1 new dimension (dimension index 0) which will be the size of
- of the original list.
-
- Args:
- tensors (tp.List[torch.Tensor]): List of tensors to collate.
- dim (int): Dimension which will be stacked and padded.
- Returns:
- tp.Tuple[torch.Tensor, torch.Tensor]:
- torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
- (dimension index 0) which will be the size of the original list.
- torch.Tensor: Tensor containing length of original tensor sizes (without padding).
- """
- tensors = [x.transpose(0, dim) for x in tensors]
- lens = torch.LongTensor([len(x) for x in tensors])
- padded_tensors = pad_sequence(tensors)
- padded_tensors = padded_tensors.transpose(0, 1)
- padded_tensors = padded_tensors.transpose(1, dim + 1)
- return padded_tensors, lens
diff --git a/spaces/captainChan/CaptainChan/README.md b/spaces/captainChan/CaptainChan/README.md
deleted file mode 100644
index 4699e9be714f7d1e256a1b8b7adbda07c140f6ca..0000000000000000000000000000000000000000
--- a/spaces/captainChan/CaptainChan/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Reader
-emoji: 🏃
-colorFrom: indigo
-colorTo: red
-sdk: gradio
-sdk_version: 2.8.12
-app_file: app.py
-pinned: false
-license: bsd
----
\ No newline at end of file
diff --git a/spaces/chansung/LLaMA-13B/llama/model.py b/spaces/chansung/LLaMA-13B/llama/model.py
deleted file mode 100644
index 871f7e2acbfbe028a87435cc4bf8ddd8f9b09c73..0000000000000000000000000000000000000000
--- a/spaces/chansung/LLaMA-13B/llama/model.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# This software may be used and distributed according to the terms of the GNU General Public License version 3.
-
-from contextvars import ContextVar
-
-from typing import Optional, Tuple, Type
-from dataclasses import dataclass
-import math
-
-import torch
-from torch import nn
-import torch.nn.functional as F
-import bitsandbytes as bnb
-
-import tqdm
-
-
-@dataclass
-class ModelArgs:
- dim: int = 512
- n_layers: int = 8
- n_heads: int = 8
- vocab_size: int = -1 # defined later by tokenizer
- multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
- norm_eps: float = 1e-5
-
- max_batch_size: int = 32
- max_seq_len: int = 1024
-
-
-class RMSNorm(torch.nn.Module):
- def __init__(self, dim: int, eps: float = 1e-6):
- super().__init__()
- self.eps = eps
- self.weight = nn.Parameter(torch.ones(dim))
-
- def _norm(self, x):
- return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
-
- def forward(self, x):
- output = self._norm(x.float()).type_as(x)
- return output * self.weight
-
-
-def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
- freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
- t = torch.arange(end, device=freqs.device) # type: ignore
- freqs = torch.outer(t, freqs).float() # type: ignore
- freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
- return freqs_cis
-
-
-def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
- ndim = x.ndim
- assert 0 <= 1 < ndim
- assert freqs_cis.shape == (x.shape[1], x.shape[-1])
- shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
- return freqs_cis.view(*shape)
-
-
-def apply_rotary_emb(
- xq: torch.Tensor,
- xk: torch.Tensor,
- freqs_cis: torch.Tensor,
-) -> Tuple[torch.Tensor, torch.Tensor]:
- xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
- xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
- freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
- xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
- xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
- return xq_out.type_as(xq), xk_out.type_as(xk)
-
-
-class UninitializedLinear(nn.Linear):
- def reset_parameters(self) -> None:
- pass
-
-
-class InferenceQuantizedLinear(bnb.nn.Linear8bitLt):
- def __init__(self, *args, **kwargs):
- super().__init__(has_fp16_weights=False, *args, **kwargs)
-
- def reset_parameters(self) -> None:
- pass
-
-
-default_quantize: ContextVar[bool] = ContextVar("default_quantize", default=False)
-
-
-def get_linear_class() -> Type[nn.Linear]:
- if default_quantize.get():
- return InferenceQuantizedLinear
- return UninitializedLinear
-
-
-class Attention(nn.Module):
- def __init__(self, args: ModelArgs):
- super().__init__()
-
- self.n_local_heads = (
- args.n_heads // 1
- ) # fs_init.get_model_parallel_world_size()
- self.head_dim = args.dim // args.n_heads
-
- Linear = get_linear_class()
- self.wq = Linear(
- args.dim,
- args.n_heads * self.head_dim,
- bias=False,
- )
- self.wk = Linear(
- args.dim,
- args.n_heads * self.head_dim,
- bias=False,
- )
- self.wv = Linear(
- args.dim,
- args.n_heads * self.head_dim,
- bias=False,
- )
- self.wo = Linear(
- args.dim,
- args.n_heads * self.head_dim,
- bias=False,
- )
-
- self.cache_k = torch.zeros(
- (args.max_batch_size, args.max_seq_len, self.n_local_heads, self.head_dim)
- ).cuda()
- self.cache_v = torch.zeros(
- (args.max_batch_size, args.max_seq_len, self.n_local_heads, self.head_dim)
- ).cuda()
-
- def forward(
- self,
- x: torch.Tensor,
- start_pos: int,
- freqs_cis: torch.Tensor,
- mask: Optional[torch.Tensor],
- ):
- bsz, seqlen, _ = x.shape
- xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
-
- xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
- xk = xk.view(bsz, seqlen, self.n_local_heads, self.head_dim)
- xv = xv.view(bsz, seqlen, self.n_local_heads, self.head_dim)
-
- xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
-
- self.cache_k = self.cache_k.to(xq)
- self.cache_v = self.cache_v.to(xq)
-
- self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
- self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
-
- keys = self.cache_k[:bsz, : start_pos + seqlen]
- values = self.cache_v[:bsz, : start_pos + seqlen]
-
- xq = xq.transpose(1, 2)
- keys = keys.transpose(1, 2)
- values = values.transpose(1, 2)
- scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
- if mask is not None:
- scores = scores + mask # (bs, n_local_heads, slen, cache_len + slen)
- scores = F.softmax(scores.float(), dim=-1).type_as(xq)
- output = torch.matmul(scores, values) # (bs, n_local_heads, slen, head_dim)
- output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
-
- return self.wo(output)
-
-
-class FeedForward(nn.Module):
- def __init__(
- self,
- dim: int,
- hidden_dim: int,
- multiple_of: int,
- ):
- super().__init__()
- hidden_dim = int(2 * hidden_dim / 3)
- hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
-
- Linear = get_linear_class()
- self.w1 = Linear(dim, hidden_dim, bias=False)
- self.w2 = Linear(
- hidden_dim,
- dim,
- bias=False,
- )
- self.w3 = Linear(
- dim,
- hidden_dim,
- bias=False,
- )
-
- def forward(self, x):
- return self.w2(F.silu(self.w1(x)) * self.w3(x))
-
-
-class TransformerBlock(nn.Module):
- def __init__(self, layer_id: int, args: ModelArgs):
- super().__init__()
- self.n_heads = args.n_heads
- self.dim = args.dim
- self.head_dim = args.dim // args.n_heads
- self.attention = Attention(args)
- self.feed_forward = FeedForward(
- dim=args.dim, hidden_dim=4 * args.dim, multiple_of=args.multiple_of
- )
- self.layer_id = layer_id
- self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
- self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
-
- def forward(
- self,
- x: torch.Tensor,
- start_pos: int,
- freqs_cis: torch.Tensor,
- mask: Optional[torch.Tensor],
- ):
- h = x + self.attention.forward(
- self.attention_norm(x), start_pos, freqs_cis, mask
- )
- out = h + self.feed_forward.forward(self.ffn_norm(h))
- return out
-
-
-def convert_linear_to_bnb(float_linear):
- new_layer = InferenceQuantizedLinear(
- float_linear.in_features,
- float_linear.out_features,
- bias=float_linear.bias is not None,
- )
- new_layer._parameters["weight"] = bnb.nn.Int8Params(
- float_linear.weight.data.cpu(),
- requires_grad=False,
- has_fp16_weights=False,
- )
- if float_linear.bias is not None:
- new_layer._parameters["bias"] = float_linear.bias
- return new_layer
-
-
-class Transformer(nn.Module):
- def __init__(self, params: ModelArgs):
- super().__init__()
- self.params = params
- self.vocab_size = params.vocab_size
- self.n_layers = params.n_layers
-
- self.tok_embeddings = torch.nn.Embedding(params.vocab_size, params.dim)
-
- self.layers = torch.nn.ModuleList()
- for layer_id in range(params.n_layers):
- self.layers.append(TransformerBlock(layer_id, params))
-
- self.norm = RMSNorm(params.dim, eps=params.norm_eps)
-
- Linear = get_linear_class()
- self.output = Linear(params.dim, params.vocab_size, bias=False)
-
- self.freqs_cis = precompute_freqs_cis(
- self.params.dim // self.params.n_heads, self.params.max_seq_len * 2
- )
-
- @torch.inference_mode()
- def forward(self, tokens: torch.Tensor, start_pos: int):
- _bsz, seqlen = tokens.shape
- h = self.tok_embeddings(tokens)
- self.freqs_cis = self.freqs_cis.to(h.device)
- freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
-
- mask = None
- if seqlen > 1:
- mask = torch.full(
- (1, 1, seqlen, seqlen), float("-inf"), device=tokens.device
- )
- mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h)
-
- for layer in self.layers:
- h = layer(h, start_pos, freqs_cis, mask)
- h = self.norm(h)
- output = self.output(h[:, -1, :]) # only compute last logits
- return output.float()
-
- def quantize(self):
- # https://github.com/pytorch/vision/issues/2391#issuecomment-653900218
- def get_layer(model, name):
- layer = model
- for attr in name.split("."):
- layer = getattr(layer, attr)
- return layer
-
- def set_layer(model, name, layer):
- try:
- attrs, name = name.rsplit(".", 1)
- model = get_layer(model, attrs)
- except ValueError:
- pass
- setattr(model, name, layer)
-
- linear_layers = {
- k: v for k, v in self.named_modules() if isinstance(v, nn.Linear)
- }
-
- print("Quantizing", len(linear_layers), "layers")
- for name, layer in tqdm.tqdm(linear_layers.items()):
- new_layer = convert_linear_to_bnb(layer)
- set_layer(self, name, new_layer)
- self.cuda()
\ No newline at end of file
diff --git a/spaces/chauvet/stabilityai-stable-diffusion-2-1/app.py b/spaces/chauvet/stabilityai-stable-diffusion-2-1/app.py
deleted file mode 100644
index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000
--- a/spaces/chauvet/stabilityai-stable-diffusion-2-1/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch()
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/finetune.sh b/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/finetune.sh
deleted file mode 100644
index 1f518835d638594c21b03713dbd88d783567ec0d..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/legacy/seq2seq/finetune.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2020 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# the proper usage is documented in the README, you need to specify data_dir, output_dir and model_name_or_path
-# run ./finetune.sh --help to see all the possible options
-python finetune_trainer.py \
- --learning_rate=3e-5 \
- --fp16 \
- --do_train --do_eval --do_predict \
- --evaluation_strategy steps \
- --predict_with_generate \
- --n_val 1000 \
- "$@"
diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/text-classification/run_xnli.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/text-classification/run_xnli.py
deleted file mode 100644
index cef43056410c666fab33a421b94edc88856f7fb5..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/pytorch/text-classification/run_xnli.py
+++ /dev/null
@@ -1,442 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
-# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
- Adapted from `examples/text-classification/run_glue.py`"""
-
-import logging
-import os
-import random
-import sys
-from dataclasses import dataclass, field
-from typing import Optional
-
-import datasets
-import evaluate
-import numpy as np
-from datasets import load_dataset
-
-import transformers
-from transformers import (
- AutoConfig,
- AutoModelForSequenceClassification,
- AutoTokenizer,
- DataCollatorWithPadding,
- EvalPrediction,
- HfArgumentParser,
- Trainer,
- TrainingArguments,
- default_data_collator,
- set_seed,
-)
-from transformers.trainer_utils import get_last_checkpoint
-from transformers.utils import check_min_version, send_example_telemetry
-from transformers.utils.versions import require_version
-
-
-# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
-check_min_version("4.28.0")
-
-require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class DataTrainingArguments:
- """
- Arguments pertaining to what data we are going to input our model for training and eval.
-
- Using `HfArgumentParser` we can turn this class
- into argparse arguments to be able to specify them on
- the command line.
- """
-
- max_seq_length: Optional[int] = field(
- default=128,
- metadata={
- "help": (
- "The maximum total input sequence length after tokenization. Sequences longer "
- "than this will be truncated, sequences shorter will be padded."
- )
- },
- )
- overwrite_cache: bool = field(
- default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
- )
- pad_to_max_length: bool = field(
- default=True,
- metadata={
- "help": (
- "Whether to pad all samples to `max_seq_length`. "
- "If False, will pad the samples dynamically when batching to the maximum length in the batch."
- )
- },
- )
- max_train_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of training examples to this "
- "value if set."
- )
- },
- )
- max_eval_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
- "value if set."
- )
- },
- )
- max_predict_samples: Optional[int] = field(
- default=None,
- metadata={
- "help": (
- "For debugging purposes or quicker training, truncate the number of prediction examples to this "
- "value if set."
- )
- },
- )
-
-
-@dataclass
-class ModelArguments:
- """
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
- """
-
- model_name_or_path: str = field(
- default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
- )
- language: str = field(
- default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
- )
- train_language: Optional[str] = field(
- default=None, metadata={"help": "Train language if it is different from the evaluation language."}
- )
- config_name: Optional[str] = field(
- default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
- )
- tokenizer_name: Optional[str] = field(
- default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
- )
- cache_dir: Optional[str] = field(
- default=None,
- metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
- )
- do_lower_case: Optional[bool] = field(
- default=False,
- metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
- )
- use_fast_tokenizer: bool = field(
- default=True,
- metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
- )
- model_revision: str = field(
- default="main",
- metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
- )
- use_auth_token: bool = field(
- default=False,
- metadata={
- "help": (
- "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
- "with private models)."
- )
- },
- )
- ignore_mismatched_sizes: bool = field(
- default=False,
- metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
- )
-
-
-def main():
- # See all possible arguments in src/transformers/training_args.py
- # or by passing the --help flag to this script.
- # We now keep distinct sets of args, for a cleaner separation of concerns.
-
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
-
- # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
- # information sent is the one passed as arguments along with your Python/PyTorch versions.
- send_example_telemetry("run_xnli", model_args)
-
- # Setup logging
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- handlers=[logging.StreamHandler(sys.stdout)],
- )
-
- if training_args.should_log:
- # The default of training_args.log_level is passive, so we set log level at info here to have that default.
- transformers.utils.logging.set_verbosity_info()
-
- log_level = training_args.get_process_log_level()
- logger.setLevel(log_level)
- datasets.utils.logging.set_verbosity(log_level)
- transformers.utils.logging.set_verbosity(log_level)
- transformers.utils.logging.enable_default_handler()
- transformers.utils.logging.enable_explicit_format()
-
- # Log on each process the small summary:
- logger.warning(
- f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
- + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
- )
- logger.info(f"Training/evaluation parameters {training_args}")
-
- # Detecting last checkpoint.
- last_checkpoint = None
- if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
- last_checkpoint = get_last_checkpoint(training_args.output_dir)
- if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
- raise ValueError(
- f"Output directory ({training_args.output_dir}) already exists and is not empty. "
- "Use --overwrite_output_dir to overcome."
- )
- elif last_checkpoint is not None:
- logger.info(
- f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
- "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
- )
-
- # Set seed before initializing model.
- set_seed(training_args.seed)
-
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
- # download the dataset.
- # Downloading and loading xnli dataset from the hub.
- if training_args.do_train:
- if model_args.train_language is None:
- train_dataset = load_dataset(
- "xnli",
- model_args.language,
- split="train",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- else:
- train_dataset = load_dataset(
- "xnli",
- model_args.train_language,
- split="train",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- label_list = train_dataset.features["label"].names
-
- if training_args.do_eval:
- eval_dataset = load_dataset(
- "xnli",
- model_args.language,
- split="validation",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- label_list = eval_dataset.features["label"].names
-
- if training_args.do_predict:
- predict_dataset = load_dataset(
- "xnli",
- model_args.language,
- split="test",
- cache_dir=model_args.cache_dir,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- label_list = predict_dataset.features["label"].names
-
- # Labels
- num_labels = len(label_list)
-
- # Load pretrained model and tokenizer
- # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
- # download model & vocab.
- config = AutoConfig.from_pretrained(
- model_args.config_name if model_args.config_name else model_args.model_name_or_path,
- num_labels=num_labels,
- id2label={str(i): label for i, label in enumerate(label_list)},
- label2id={label: i for i, label in enumerate(label_list)},
- finetuning_task="xnli",
- cache_dir=model_args.cache_dir,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- tokenizer = AutoTokenizer.from_pretrained(
- model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
- do_lower_case=model_args.do_lower_case,
- cache_dir=model_args.cache_dir,
- use_fast=model_args.use_fast_tokenizer,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- )
- model = AutoModelForSequenceClassification.from_pretrained(
- model_args.model_name_or_path,
- from_tf=bool(".ckpt" in model_args.model_name_or_path),
- config=config,
- cache_dir=model_args.cache_dir,
- revision=model_args.model_revision,
- use_auth_token=True if model_args.use_auth_token else None,
- ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
- )
-
- # Preprocessing the datasets
- # Padding strategy
- if data_args.pad_to_max_length:
- padding = "max_length"
- else:
- # We will pad later, dynamically at batch creation, to the max sequence length in each batch
- padding = False
-
- def preprocess_function(examples):
- # Tokenize the texts
- return tokenizer(
- examples["premise"],
- examples["hypothesis"],
- padding=padding,
- max_length=data_args.max_seq_length,
- truncation=True,
- )
-
- if training_args.do_train:
- if data_args.max_train_samples is not None:
- max_train_samples = min(len(train_dataset), data_args.max_train_samples)
- train_dataset = train_dataset.select(range(max_train_samples))
- with training_args.main_process_first(desc="train dataset map pre-processing"):
- train_dataset = train_dataset.map(
- preprocess_function,
- batched=True,
- load_from_cache_file=not data_args.overwrite_cache,
- desc="Running tokenizer on train dataset",
- )
- # Log a few random samples from the training set:
- for index in random.sample(range(len(train_dataset)), 3):
- logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
-
- if training_args.do_eval:
- if data_args.max_eval_samples is not None:
- max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
- eval_dataset = eval_dataset.select(range(max_eval_samples))
- with training_args.main_process_first(desc="validation dataset map pre-processing"):
- eval_dataset = eval_dataset.map(
- preprocess_function,
- batched=True,
- load_from_cache_file=not data_args.overwrite_cache,
- desc="Running tokenizer on validation dataset",
- )
-
- if training_args.do_predict:
- if data_args.max_predict_samples is not None:
- max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
- predict_dataset = predict_dataset.select(range(max_predict_samples))
- with training_args.main_process_first(desc="prediction dataset map pre-processing"):
- predict_dataset = predict_dataset.map(
- preprocess_function,
- batched=True,
- load_from_cache_file=not data_args.overwrite_cache,
- desc="Running tokenizer on prediction dataset",
- )
-
- # Get the metric function
- metric = evaluate.load("xnli")
-
- # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
- # predictions and label_ids field) and has to return a dictionary string to float.
- def compute_metrics(p: EvalPrediction):
- preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
- preds = np.argmax(preds, axis=1)
- return metric.compute(predictions=preds, references=p.label_ids)
-
- # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
- if data_args.pad_to_max_length:
- data_collator = default_data_collator
- elif training_args.fp16:
- data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
- else:
- data_collator = None
-
- # Initialize our Trainer
- trainer = Trainer(
- model=model,
- args=training_args,
- train_dataset=train_dataset if training_args.do_train else None,
- eval_dataset=eval_dataset if training_args.do_eval else None,
- compute_metrics=compute_metrics,
- tokenizer=tokenizer,
- data_collator=data_collator,
- )
-
- # Training
- if training_args.do_train:
- checkpoint = None
- if training_args.resume_from_checkpoint is not None:
- checkpoint = training_args.resume_from_checkpoint
- elif last_checkpoint is not None:
- checkpoint = last_checkpoint
- train_result = trainer.train(resume_from_checkpoint=checkpoint)
- metrics = train_result.metrics
- max_train_samples = (
- data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
- )
- metrics["train_samples"] = min(max_train_samples, len(train_dataset))
-
- trainer.save_model() # Saves the tokenizer too for easy upload
-
- trainer.log_metrics("train", metrics)
- trainer.save_metrics("train", metrics)
- trainer.save_state()
-
- # Evaluation
- if training_args.do_eval:
- logger.info("*** Evaluate ***")
- metrics = trainer.evaluate(eval_dataset=eval_dataset)
-
- max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
- metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
-
- trainer.log_metrics("eval", metrics)
- trainer.save_metrics("eval", metrics)
-
- # Prediction
- if training_args.do_predict:
- logger.info("*** Predict ***")
- predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
-
- max_predict_samples = (
- data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
- )
- metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
-
- trainer.log_metrics("predict", metrics)
- trainer.save_metrics("predict", metrics)
-
- predictions = np.argmax(predictions, axis=1)
- output_predict_file = os.path.join(training_args.output_dir, "predictions.txt")
- if trainer.is_world_process_zero():
- with open(output_predict_file, "w") as writer:
- writer.write("index\tprediction\n")
- for index, item in enumerate(predictions):
- item = label_list[item]
- writer.write(f"{index}\t{item}\n")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/bertology/run_prune_gpt.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/bertology/run_prune_gpt.py
deleted file mode 100644
index fa7484a787b6c2f0d47f05ac8d100c8cfdcf2525..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/bertology/run_prune_gpt.py
+++ /dev/null
@@ -1,391 +0,0 @@
-#!/usr/bin/env python3
-""" This script is adapted from the Bertology pruning code (https://github.com/huggingface/transformers/blob/783d7d2629e97c5f0c5f9ef01b8c66410275c204/examples/research_projects/bertology/run_bertology.py)
-to prune GPT-like models. The author is @altsoph.
-"""
-
-import argparse
-import logging
-import os
-from datetime import datetime
-
-import numpy as np
-import torch
-from torch import nn
-from torch.utils.data import DataLoader, RandomSampler, TensorDataset
-from tqdm import tqdm
-
-from transformers import GPT2LMHeadModel
-
-
-logger = logging.getLogger(__name__)
-
-
-def save_model(model, dirpath):
- # save results
- if os.path.exists(dirpath):
- if os.path.exists(os.path.join(dirpath, "config.json")) and os.path.isfile(
- os.path.join(dirpath, "config.json")
- ):
- os.remove(os.path.join(dirpath, "config.json"))
- if os.path.exists(os.path.join(dirpath, "pytorch_model.bin")) and os.path.isfile(
- os.path.join(dirpath, "pytorch_model.bin")
- ):
- os.remove(os.path.join(dirpath, "pytorch_model.bin"))
- else:
- os.makedirs(dirpath)
- model.save_pretrained(dirpath)
-
-
-def entropy(p, unlogit=False):
- """Compute the entropy of a probability distribution"""
- exponent = 2
- if unlogit:
- p = torch.pow(p, exponent)
- plogp = p * torch.log(p)
- plogp[p == 0] = 0
- return -plogp.sum(dim=-1)
-
-
-def print_2d_tensor(tensor):
- """Print a 2D tensor"""
- logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
- for row in range(len(tensor)):
- if tensor.dtype != torch.long:
- logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
- else:
- logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
-
-
-def compute_heads_importance(
- args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False
-):
- """This method shows how to compute:
- - head attention entropy
- - head importance scores according to http://arxiv.org/abs/1905.10650
- """
- # Prepare our tensors
- n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads
- head_importance = torch.zeros(n_layers, n_heads).to(args.device)
- attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
-
- if head_mask is None:
- head_mask = torch.ones(n_layers, n_heads).to(args.device)
-
- head_mask.requires_grad_(requires_grad=True)
- # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
- if actually_pruned:
- head_mask = None
-
- tot_tokens = 0.0
- total_loss = 0.0
- for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
- inputs = tuple(t.to(args.device) for t in inputs)
- (input_ids,) = inputs
-
- # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
- outputs = model(input_ids, labels=input_ids, head_mask=head_mask)
- # (loss), lm_logits, presents, (all hidden_states), (attentions)
- loss, _, all_attentions = (
- outputs[0],
- outputs[1],
- outputs[-1],
- ) # Loss and logits are the first, attention the last
- loss.backward() # Backpropagate to populate the gradients in the head mask
- total_loss += loss.detach().cpu().numpy()
- if compute_entropy:
- for layer, attn in enumerate(all_attentions):
- masked_entropy = entropy(attn.detach(), True)
- attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
-
- if compute_importance:
- head_importance += head_mask.grad.abs().detach()
- tot_tokens += torch.ones_like(input_ids).float().detach().sum().data
-
- # Normalize
- attn_entropy /= tot_tokens
- head_importance /= tot_tokens
- # Layerwise importance normalization
- if not args.dont_normalize_importance_by_layer:
- exponent = 2
- norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
- head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
-
- if not args.dont_normalize_global_importance:
- head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
-
- # Print matrices
- if compute_entropy:
- logger.info("Attention entropies")
- print_2d_tensor(attn_entropy)
- if compute_importance:
- logger.info("Head importance scores")
- print_2d_tensor(head_importance)
- logger.info("Head ranked by importance scores")
- head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
- head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(
- head_importance.numel(), device=args.device
- )
- head_ranks = head_ranks.view_as(head_importance)
- print_2d_tensor(head_ranks)
- return attn_entropy, head_importance, total_loss
-
-
-def mask_heads(args, model, eval_dataloader):
- """This method shows how to mask head (set some heads to zero), to test the effect on the network,
- based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
- """
- _, head_importance, loss = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
- original_score = 1 / loss # instead of downsteam score use the LM loss
- logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
-
- new_head_mask = torch.ones_like(head_importance)
- num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
-
- current_score = original_score
- while current_score >= original_score * args.masking_threshold:
- head_mask = new_head_mask.clone().detach() # save current head mask
- # heads from least important to most - keep only not-masked heads
- head_importance[head_mask == 0.0] = float("Inf")
- current_heads_to_mask = head_importance.view(-1).sort()[1]
-
- if len(current_heads_to_mask) <= num_to_mask:
- print("BREAK BY num_to_mask")
- break
-
- # mask heads
- current_heads_to_mask = current_heads_to_mask[:num_to_mask]
- logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
- new_head_mask = new_head_mask.view(-1)
- new_head_mask[current_heads_to_mask] = 0.0
- new_head_mask = new_head_mask.view_as(head_mask)
- new_head_mask = new_head_mask.clone().detach()
- print_2d_tensor(new_head_mask)
-
- # Compute metric and head importance again
- _, head_importance, loss = compute_heads_importance(
- args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask
- )
- current_score = 1 / loss
- logger.info(
- "Masking: current score: %f, remaining heads %d (%.1f percents)",
- current_score,
- new_head_mask.sum(),
- new_head_mask.sum() / new_head_mask.numel() * 100,
- )
-
- logger.info("Final head mask")
- print_2d_tensor(head_mask)
- np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy())
-
- return head_mask
-
-
-def prune_heads(args, model, eval_dataloader, head_mask):
- """This method shows how to prune head (remove heads weights) based on
- the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
- """
- # Try pruning and test time speedup
- # Pruning is like masking but we actually remove the masked weights
- before_time = datetime.now()
- _, _, loss = compute_heads_importance(
- args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask
- )
- score_masking = 1 / loss
- original_time = datetime.now() - before_time
-
- original_num_params = sum(p.numel() for p in model.parameters())
- heads_to_prune = {
- layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(head_mask))
- }
-
- for k, v in heads_to_prune.items():
- if isinstance(v, int):
- heads_to_prune[k] = [
- v,
- ]
-
- assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
- model.prune_heads(heads_to_prune)
- pruned_num_params = sum(p.numel() for p in model.parameters())
-
- before_time = datetime.now()
- _, _, loss = compute_heads_importance(
- args,
- model,
- eval_dataloader,
- compute_entropy=False,
- compute_importance=False,
- head_mask=None,
- actually_pruned=True,
- )
-
- score_pruning = 1 / loss
- new_time = datetime.now() - before_time
-
- logger.info(
- "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)",
- original_num_params,
- pruned_num_params,
- pruned_num_params / original_num_params * 100,
- )
- logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
- logger.info("Pruning: speed ratio (original timing / new timing): %f percents", original_time / new_time * 100)
- save_model(model, args.output_dir)
-
-
-def main():
- parser = argparse.ArgumentParser()
- # Required parameters
- parser.add_argument(
- "--data_dir",
- default=None,
- type=str,
- required=True,
- help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
- )
- parser.add_argument(
- "--model_name_or_path",
- default=None,
- type=str,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models",
- )
- parser.add_argument(
- "--output_dir",
- default=None,
- type=str,
- required=True,
- help="The output directory where the model predictions and checkpoints will be written.",
- )
-
- # Other parameters
- parser.add_argument(
- "--config_name",
- default="",
- type=str,
- help="Pretrained config name or path if not the same as model_name_or_path",
- )
- parser.add_argument(
- "--tokenizer_name",
- default="",
- type=str,
- help="Pretrained tokenizer name or path if not the same as model_name_or_path",
- )
- parser.add_argument(
- "--cache_dir",
- default=None,
- type=str,
- help="Where do you want to store the pre-trained models downloaded from s3",
- )
- parser.add_argument(
- "--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances."
- )
- parser.add_argument(
- "--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory"
- )
- parser.add_argument(
- "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
- )
-
- parser.add_argument(
- "--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers"
- )
- parser.add_argument(
- "--dont_normalize_global_importance",
- action="store_true",
- help="Don't normalize all importance scores between 0 and 1",
- )
-
- parser.add_argument(
- "--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy."
- )
- parser.add_argument(
- "--masking_threshold",
- default=0.9,
- type=float,
- help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
- )
- parser.add_argument(
- "--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step."
- )
- parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.")
-
- parser.add_argument(
- "--max_seq_length",
- default=128,
- type=int,
- help=(
- "The maximum total input sequence length after WordPiece tokenization. \n"
- "Sequences longer than this will be truncated, sequences shorter padded."
- ),
- )
- parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
-
- parser.add_argument("--seed", type=int, default=42)
- parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
- parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
- parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
- parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
- args = parser.parse_args()
-
- if args.server_ip and args.server_port:
- # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
- import ptvsd
-
- print("Waiting for debugger attach")
- ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
- ptvsd.wait_for_attach()
-
- # Setup devices and distributed training
- if args.local_rank == -1 or args.no_cuda:
- args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
- args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
- else:
- torch.cuda.set_device(args.local_rank)
- args.device = torch.device("cuda", args.local_rank)
- args.n_gpu = 1
- torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
-
- # Setup logging
- logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
- logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
-
- model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
-
- # Distributed and parallel training
- model.to(args.device)
- if args.local_rank != -1:
- model = nn.parallel.DistributedDataParallel(
- model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
- )
- elif args.n_gpu > 1:
- model = nn.DataParallel(model)
-
- # Print/save training arguments
- os.makedirs(args.output_dir, exist_ok=True)
- torch.save(args, os.path.join(args.output_dir, "run_args.bin"))
- logger.info("Training/evaluation parameters %s", args)
-
- # Prepare dataset
- numpy_data = np.concatenate(
- [
- np.loadtxt(args.data_dir, dtype=np.int64),
- ]
- )
- train_tensor_dataset = (torch.from_numpy(numpy_data),)
- train_data = TensorDataset(*train_tensor_dataset)
- train_sampler = RandomSampler(train_data)
- eval_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size)
-
- # Compute head entropy and importance score
- compute_heads_importance(args, model, eval_dataloader)
-
- # Try head masking (set heads to zero until the score goes under a threshole)
- # and head pruning (remove masked heads and see the effect on the network)
- if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
- head_mask = mask_heads(args, model, eval_dataloader)
- prune_heads(args, model, eval_dataloader, head_mask)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/distillation/train.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/distillation/train.py
deleted file mode 100644
index bb35a1df853943b414827bc4ab67cb2521f0af91..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/distillation/train.py
+++ /dev/null
@@ -1,324 +0,0 @@
-# coding=utf-8
-# Copyright 2019-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Training the distilled model.
-Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2.
-"""
-import argparse
-import json
-import os
-import pickle
-import shutil
-
-import numpy as np
-import torch
-from distiller import Distiller
-from lm_seqs_dataset import LmSeqsDataset
-
-from transformers import (
- BertConfig,
- BertForMaskedLM,
- BertTokenizer,
- DistilBertConfig,
- DistilBertForMaskedLM,
- DistilBertTokenizer,
- GPT2Config,
- GPT2LMHeadModel,
- GPT2Tokenizer,
- RobertaConfig,
- RobertaForMaskedLM,
- RobertaTokenizer,
-)
-from utils import git_log, init_gpu_params, logger, set_seed
-
-
-MODEL_CLASSES = {
- "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
- "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
- "bert": (BertConfig, BertForMaskedLM, BertTokenizer),
- "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
-}
-
-
-def sanity_checks(args):
- """
- A bunch of args sanity checks to perform even starting...
- """
- assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
- assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
- if args.mlm:
- assert os.path.isfile(args.token_counts)
- assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
- else:
- assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
-
- assert args.teacher_type == args.student_type or (
- args.student_type == "distilbert" and args.teacher_type == "bert"
- )
- assert os.path.isfile(args.student_config)
- if args.student_pretrained_weights is not None:
- assert os.path.isfile(args.student_pretrained_weights)
-
- if args.freeze_token_type_embds:
- assert args.student_type in ["roberta"]
-
- assert args.alpha_ce >= 0.0
- assert args.alpha_mlm >= 0.0
- assert args.alpha_clm >= 0.0
- assert args.alpha_mse >= 0.0
- assert args.alpha_cos >= 0.0
- assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
-
-
-def freeze_pos_embeddings(student, args):
- if args.student_type == "roberta":
- student.roberta.embeddings.position_embeddings.weight.requires_grad = False
- elif args.student_type == "gpt2":
- student.transformer.wpe.weight.requires_grad = False
-
-
-def freeze_token_type_embeddings(student, args):
- if args.student_type == "roberta":
- student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False
-
-
-def main():
- parser = argparse.ArgumentParser(description="Training")
- parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.")
-
- parser.add_argument(
- "--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)"
- )
- parser.add_argument(
- "--data_file",
- type=str,
- required=True,
- help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.",
- )
-
- parser.add_argument(
- "--student_type",
- type=str,
- choices=["distilbert", "roberta", "gpt2"],
- required=True,
- help="The student type (DistilBERT, RoBERTa).",
- )
- parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.")
- parser.add_argument(
- "--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint."
- )
-
- parser.add_argument(
- "--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)."
- )
- parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.")
-
- parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.")
- parser.add_argument(
- "--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0."
- )
- parser.add_argument(
- "--alpha_mlm",
- default=0.0,
- type=float,
- help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.",
- )
- parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.")
- parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.")
- parser.add_argument(
- "--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0."
- )
-
- parser.add_argument(
- "--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM."
- )
- parser.add_argument(
- "--mlm_mask_prop",
- default=0.15,
- type=float,
- help="Proportion of tokens for which we need to make a prediction.",
- )
- parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.")
- parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.")
- parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.")
- parser.add_argument(
- "--mlm_smoothing",
- default=0.7,
- type=float,
- help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).",
- )
- parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.")
-
- parser.add_argument(
- "--restrict_ce_to_mask",
- action="store_true",
- help="If true, compute the distillation loss only the [MLM] prediction distribution.",
- )
- parser.add_argument(
- "--freeze_pos_embs",
- action="store_true",
- help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.",
- )
- parser.add_argument(
- "--freeze_token_type_embds",
- action="store_true",
- help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.",
- )
-
- parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.")
- parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).")
- parser.add_argument(
- "--group_by_size",
- action="store_false",
- help="If true, group sequences that have similar length into the same batch. Default is true.",
- )
-
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=50,
- help="Gradient accumulation for larger training batches.",
- )
- parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.")
- parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
- parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.")
- parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.")
- parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.")
- parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.")
-
- parser.add_argument(
- "--fp16",
- action="store_true",
- help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
- )
- parser.add_argument(
- "--fp16_opt_level",
- type=str,
- default="O1",
- help=(
- "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
- "See details at https://nvidia.github.io/apex/amp.html"
- ),
- )
- parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.")
- parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank")
- parser.add_argument("--seed", type=int, default=56, help="Random seed")
-
- parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.")
- parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.")
- args = parser.parse_args()
- sanity_checks(args)
-
- # ARGS #
- init_gpu_params(args)
- set_seed(args)
- if args.is_master:
- if os.path.exists(args.dump_path):
- if not args.force:
- raise ValueError(
- f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
- " itUse `--force` if you want to overwrite it"
- )
- else:
- shutil.rmtree(args.dump_path)
-
- if not os.path.exists(args.dump_path):
- os.makedirs(args.dump_path)
- logger.info(f"Experiment will be dumped and logged in {args.dump_path}")
-
- # SAVE PARAMS #
- logger.info(f"Param: {args}")
- with open(os.path.join(args.dump_path, "parameters.json"), "w") as f:
- json.dump(vars(args), f, indent=4)
- git_log(args.dump_path)
-
- student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type]
- teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type]
-
- # TOKENIZER #
- tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name)
- special_tok_ids = {}
- for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
- idx = tokenizer.all_special_tokens.index(tok_symbol)
- special_tok_ids[tok_name] = tokenizer.all_special_ids[idx]
- logger.info(f"Special tokens {special_tok_ids}")
- args.special_tok_ids = special_tok_ids
- args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name]
-
- # DATA LOADER #
- logger.info(f"Loading data from {args.data_file}")
- with open(args.data_file, "rb") as fp:
- data = pickle.load(fp)
-
- if args.mlm:
- logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)")
- with open(args.token_counts, "rb") as fp:
- counts = pickle.load(fp)
-
- token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing
- for idx in special_tok_ids.values():
- token_probs[idx] = 0.0 # do not predict special tokens
- token_probs = torch.from_numpy(token_probs)
- else:
- token_probs = None
-
- train_lm_seq_dataset = LmSeqsDataset(params=args, data=data)
- logger.info("Data loader created.")
-
- # STUDENT #
- logger.info(f"Loading student config from {args.student_config}")
- stu_architecture_config = student_config_class.from_pretrained(args.student_config)
- stu_architecture_config.output_hidden_states = True
-
- if args.student_pretrained_weights is not None:
- logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}")
- student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config)
- else:
- student = student_model_class(stu_architecture_config)
-
- if args.n_gpu > 0:
- student.to(f"cuda:{args.local_rank}")
- logger.info("Student loaded.")
-
- # TEACHER #
- teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True)
- if args.n_gpu > 0:
- teacher.to(f"cuda:{args.local_rank}")
- logger.info(f"Teacher loaded from {args.teacher_name}.")
-
- # FREEZING #
- if args.freeze_pos_embs:
- freeze_pos_embeddings(student, args)
- if args.freeze_token_type_embds:
- freeze_token_type_embeddings(student, args)
-
- # SANITY CHECKS #
- assert student.config.vocab_size == teacher.config.vocab_size
- assert student.config.hidden_size == teacher.config.hidden_size
- assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
- if args.mlm:
- assert token_probs.size(0) == stu_architecture_config.vocab_size
-
- # DISTILLER #
- torch.cuda.empty_cache()
- distiller = Distiller(
- params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher
- )
- distiller.train()
- logger.info("Let's go get some drinks.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md
deleted file mode 100644
index 08e05f38931943134ac8c4457ded19da7d41abc4..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md
+++ /dev/null
@@ -1,109 +0,0 @@
-# How to propose a Flax/JAX + Transformers project
-
-Great that you've opened this document!
-While we at 🤗 are proposing a couple of projects, we strongly
-believe that the community can come up with much more **creative**, **fun**, and
-**impactful** projects on their own. This being said, we are really looking forward
-to seeing your project proposal!
-
-## What a project should be about
-
-The proposed project should fall into the machine learning fields of **Natural Language Processing (NLP)** and/or **Computer Vision (CV)** (possibly also **Speech Recognition (ASR)** depending on whether Speech Recognition models are available in Flax in due time) and aim at solving a specific task.
-Possible tasks can belong to:
-
- * text classification
- * text generation
- * image recognition
- * image processing
- * image captioning
- * audio classification
- * and other tasks you can think of!
-
-The clearer a task is defined, the better your project proposal is.
-*E.g.* "Using a T5 model to learn grammar correction in French" or "Adapting a pre-trained CLIP model for zero-shot image classification in Spanish" are **well-defined and clear** project proposals, while something like "Train a language model" or "Image classification" are **too vague**.
-
-There is no limit to your creativity as long as the project is feasible and ethical.
-The more creative & specific your project proposal, the more interesting it will be,
-and the more likely will you find motivated team members to work on your project!
-To get an idea of how to formulate your project proposals, you can browse through
-existing project proposals on the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22).
-
-## How to submit a project proposal
-
-First, you should make sure that you are [logged in](https://huggingface.co/login?sso=bm9uY2U9OTRlNjZjZmZhYjMwMmJmMWMyYjc5MmFiMTMyMzY5ODYmcmV0dXJuX3Nzb191cmw9aHR0cHMlM0ElMkYlMkZkaXNjdXNzLmh1Z2dpbmdmYWNlLmNvJTJGc2Vzc2lvbiUyRnNzb19sb2dpbg%3D%3D&sig=429ad8924bcb33c40f9823027ea749abb55d393f4f58924f36a2dba3ab0a48da) with your Hugging Face account on the forum.
-
-Second, make sure that your project idea doesn't already exist by checking [existing projects](https://discuss.huggingface.co/c/flax-jax-projects/22).
-If your project already exists - great! This means that you can comment and improve
-the existing idea and join the project to form a team! If your project idea already
-exists for a different language, feel free to submit the same project idea, just in
-a different language.
-
-Third, having ensured that your project doesn't exist, click on the *"New Topic"*
-button on the [Flax/JAX Projects Forum category](https://discuss.huggingface.co/c/flax-jax-projects/22) to create a new project proposal.
-
-Fourth, make sure that your project proposal includes the following information:
-
-1. *A clear description of the project*
-2. *In which language should the project be conducted?* English, German, Chinese, ...? It can also be a multi-lingual project
-3. *Which model should be used?* If you want to adapt an existing model, you can add the link to one of the 4000 available checkpoints in JAX [here](https://huggingface.co/models?filter=jax) If you want to train a model from scratch, you can simply state the model architecture to be used, *e.g.* BERT, CLIP, etc. You can also base your project on a model that is not part of transformers. For an overview of libraries based on JAX, you can take a look at [awesome-jax](https://github.com/n2cholas/awesome-jax#awesome-jax-). **Note** that for a project that is not based on Transformers it will be more difficult for the 🤗 team to help you. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what model architectures are currently supported in 🤗 Transformers.
-4. *What data should be used?* It is important to state at least what kind of data you would like to use. Ideally, you can already point to publicly available data or a dataset in the 🤗 Datasets library.
-5. *Are similar training scripts available in Flax/JAX?* It would be important to find similar training scripts that already exist in Flax/JAX. *E.g.* if you are working on a Seq-to-Seq task, you can make use of the [`run_summarization_flax.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) script which is very similar to any seq2seq training. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what training scripts are currently supported in 🤗 Transformers.
-6. *(Optionally) What are possible challenges?* List possible difficulties with your project. *E.g.* If you know that training convergence usually takes a lot of time, it is worth stating this here!
-7. *(Optionally) What is the desired project outcome?* - How would you like to demo your project? One could *e.g.* create a Streamlit application.
-8. *(Optionally) Links to read upon* - Can you provide any links that would help the reader to better understand your project idea?
-
-Feel free to copy-paste the following format for your project proposal and fill out the respective sections:
-
-```
-#
-
-
-
-## 2. Language
-
-The model will be trained in .
-
-## 3. Model
-
-
-
-## 4. Datasets
-
-
-
-Possible links to publicly available datasets include:
--
--
--
-
-## 5. Training scripts
-
-
-
-We can make use of to train the model.>
-
-## 6. (Optional) Challenges
-
-<(Optionally) FILL ME: 6. What are possible challenges?>
-
-## 7. (Optional) Desired project outcome
-
-<(Optionally) FILL ME: 7. What is the desired project outcome? A demo?>
-
-## 8. (Optional) Reads
-
-The following links can be useful to better understand the project and
-what has previously been done.
-
--
--
--
-```
-
-To see how a proposed project looks like, please have a look at submitted project
-proposals [here](https://discuss.huggingface.co/c/flax-jax-projects/22).
-
-## Will my project proposal be selected?
-
-Having submitted a project proposal, you can now promote your idea in the Slack channel `#flax-jax-community-week` to try to convince other participants to join your project!
-Once other people have joined your project, one of the organizers (`@Suzana, @valhalla, @osanseviero, @patrickvonplaten`) will officially create a team for your project and add your project to [this google sheet](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing).
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/lightning_base.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/lightning_base.py
deleted file mode 100644
index e78a758239587536bc256a130a6c98d3a85dd96e..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/rag/lightning_base.py
+++ /dev/null
@@ -1,404 +0,0 @@
-import argparse
-import logging
-import os
-from pathlib import Path
-from typing import Any, Dict
-
-import pytorch_lightning as pl
-from pytorch_lightning.utilities import rank_zero_info
-
-from transformers import (
- AdamW,
- AutoConfig,
- AutoModel,
- AutoModelForPreTraining,
- AutoModelForQuestionAnswering,
- AutoModelForSeq2SeqLM,
- AutoModelForSequenceClassification,
- AutoModelForTokenClassification,
- AutoModelWithLMHead,
- AutoTokenizer,
- PretrainedConfig,
- PreTrainedTokenizer,
-)
-from transformers.optimization import (
- Adafactor,
- get_cosine_schedule_with_warmup,
- get_cosine_with_hard_restarts_schedule_with_warmup,
- get_linear_schedule_with_warmup,
- get_polynomial_decay_schedule_with_warmup,
-)
-from transformers.utils.versions import require_version
-
-
-logger = logging.getLogger(__name__)
-
-require_version("pytorch_lightning>=1.0.4")
-
-MODEL_MODES = {
- "base": AutoModel,
- "sequence-classification": AutoModelForSequenceClassification,
- "question-answering": AutoModelForQuestionAnswering,
- "pretraining": AutoModelForPreTraining,
- "token-classification": AutoModelForTokenClassification,
- "language-modeling": AutoModelWithLMHead,
- "summarization": AutoModelForSeq2SeqLM,
- "translation": AutoModelForSeq2SeqLM,
-}
-
-
-# update this and the import above to support new schedulers from transformers.optimization
-arg_to_scheduler = {
- "linear": get_linear_schedule_with_warmup,
- "cosine": get_cosine_schedule_with_warmup,
- "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
- "polynomial": get_polynomial_decay_schedule_with_warmup,
- # '': get_constant_schedule, # not supported for now
- # '': get_constant_schedule_with_warmup, # not supported for now
-}
-arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
-arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"
-
-
-class BaseTransformer(pl.LightningModule):
- def __init__(
- self,
- hparams: argparse.Namespace,
- num_labels=None,
- mode="base",
- config=None,
- tokenizer=None,
- model=None,
- **config_kwargs,
- ):
- """Initialize a model, tokenizer and config."""
- super().__init__()
- # TODO: move to self.save_hyperparameters()
- # self.save_hyperparameters()
- # can also expand arguments into trainer signature for easier reading
-
- self.save_hyperparameters(hparams)
- self.step_count = 0
- self.output_dir = Path(self.hparams.output_dir)
- cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
- if config is None:
- self.config = AutoConfig.from_pretrained(
- self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
- **({"num_labels": num_labels} if num_labels is not None else {}),
- cache_dir=cache_dir,
- **config_kwargs,
- )
- else:
- self.config: PretrainedConfig = config
-
- extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
- for p in extra_model_params:
- if getattr(self.hparams, p, None):
- assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
- setattr(self.config, p, getattr(self.hparams, p))
-
- if tokenizer is None:
- self.tokenizer = AutoTokenizer.from_pretrained(
- self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
- cache_dir=cache_dir,
- )
- else:
- self.tokenizer: PreTrainedTokenizer = tokenizer
- self.model_type = MODEL_MODES[mode]
- if model is None:
- self.model = self.model_type.from_pretrained(
- self.hparams.model_name_or_path,
- from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
- config=self.config,
- cache_dir=cache_dir,
- )
- else:
- self.model = model
-
- def load_hf_checkpoint(self, *args, **kwargs):
- self.model = self.model_type.from_pretrained(*args, **kwargs)
-
- def get_lr_scheduler(self):
- get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
- scheduler = get_schedule_func(
- self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps()
- )
- scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
- return scheduler
-
- def configure_optimizers(self):
- """Prepare optimizer and schedule (linear warmup and decay)"""
- model = self.model
- no_decay = ["bias", "LayerNorm.weight"]
- optimizer_grouped_parameters = [
- {
- "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
- "weight_decay": self.hparams.weight_decay,
- },
- {
- "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
- "weight_decay": 0.0,
- },
- ]
- if self.hparams.adafactor:
- optimizer = Adafactor(
- optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
- )
-
- else:
- optimizer = AdamW(
- optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
- )
- self.opt = optimizer
-
- scheduler = self.get_lr_scheduler()
-
- return [optimizer], [scheduler]
-
- def test_step(self, batch, batch_nb):
- return self.validation_step(batch, batch_nb)
-
- def test_epoch_end(self, outputs):
- return self.validation_end(outputs)
-
- def total_steps(self) -> int:
- """The number of total training steps that will be run. Used for lr scheduler purposes."""
- num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
- effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
- return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
-
- def setup(self, stage):
- if stage == "test":
- self.dataset_size = len(self.test_dataloader().dataset)
- else:
- self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
- self.dataset_size = len(self.train_dataloader().dataset)
-
- def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False):
- raise NotImplementedError("You must implement this for your task")
-
- def train_dataloader(self):
- return self.train_loader
-
- def val_dataloader(self):
- return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
-
- def test_dataloader(self):
- return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
-
- def _feature_file(self, mode):
- return os.path.join(
- self.hparams.data_dir,
- "cached_{}_{}_{}".format(
- mode,
- list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
- str(self.hparams.max_seq_length),
- ),
- )
-
- @pl.utilities.rank_zero_only
- def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
- save_path = self.output_dir.joinpath("best_tfmr")
- self.model.config.save_step = self.step_count
- self.model.save_pretrained(save_path)
- self.tokenizer.save_pretrained(save_path)
-
- @staticmethod
- def add_model_specific_args(parser, root_dir):
- parser.add_argument(
- "--model_name_or_path",
- default=None,
- type=str,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models",
- )
- parser.add_argument(
- "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
- )
- parser.add_argument(
- "--tokenizer_name",
- default=None,
- type=str,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--cache_dir",
- default="",
- type=str,
- help="Where do you want to store the pre-trained models downloaded from huggingface.co",
- )
- parser.add_argument(
- "--encoder_layerdrop",
- type=float,
- help="Encoder layer dropout probability (Optional). Goes into model.config",
- )
- parser.add_argument(
- "--decoder_layerdrop",
- type=float,
- help="Decoder layer dropout probability (Optional). Goes into model.config",
- )
- parser.add_argument(
- "--dropout",
- type=float,
- help="Dropout probability (Optional). Goes into model.config",
- )
- parser.add_argument(
- "--attention_dropout",
- type=float,
- help="Attention dropout probability (Optional). Goes into model.config",
- )
- parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
- parser.add_argument(
- "--lr_scheduler",
- default="linear",
- choices=arg_to_scheduler_choices,
- metavar=arg_to_scheduler_metavar,
- type=str,
- help="Learning rate scheduler",
- )
- parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
- parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
- parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
- parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
- parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
- parser.add_argument("--train_batch_size", default=32, type=int)
- parser.add_argument("--eval_batch_size", default=32, type=int)
- parser.add_argument("--adafactor", action="store_true")
-
-
-class InitCallback(pl.Callback):
- # This method is better that using a custom DDP plugging with the latest pytorch-lightning (@shamanez)
- def on_sanity_check_start(self, trainer, pl_module):
- if (
- trainer.is_global_zero and trainer.global_rank == 0
- ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
- pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
-
-
-class LoggingCallback(pl.Callback):
- def on_batch_end(self, trainer, pl_module):
- lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
- lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
- pl_module.logger.log_metrics(lrs)
-
- def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
- rank_zero_info("***** Validation results *****")
- metrics = trainer.callback_metrics
- # Log results
- for key in sorted(metrics):
- if key not in ["log", "progress_bar"]:
- rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
-
- def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
- rank_zero_info("***** Test results *****")
- metrics = trainer.callback_metrics
- # Log and save results to file
- output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
- with open(output_test_results_file, "w") as writer:
- for key in sorted(metrics):
- if key not in ["log", "progress_bar"]:
- rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
- writer.write("{} = {}\n".format(key, str(metrics[key])))
-
-
-def add_generic_args(parser, root_dir) -> None:
- # To allow all pl args uncomment the following line
- # parser = pl.Trainer.add_argparse_args(parser)
- parser.add_argument(
- "--output_dir",
- default=None,
- type=str,
- required=True,
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument(
- "--fp16",
- action="store_true",
- help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
- )
-
- parser.add_argument(
- "--fp16_opt_level",
- type=str,
- default="O2",
- help=(
- "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
- "See details at https://nvidia.github.io/apex/amp.html"
- ),
- )
- parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
- parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
- parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
- parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
- parser.add_argument(
- "--gradient_accumulation_steps",
- dest="accumulate_grad_batches",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
- parser.add_argument(
- "--data_dir",
- default=None,
- type=str,
- required=True,
- help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
- )
-
-
-def generic_train(
- model: BaseTransformer,
- args: argparse.Namespace,
- early_stopping_callback=None,
- logger=True, # can pass WandbLogger() here
- custom_ddp_plugin=None,
- extra_callbacks=[],
- checkpoint_callback=None,
- logging_callback=None,
- **extra_train_kwargs,
-):
- pl.seed_everything(args.seed)
-
- # init model
- odir = Path(model.hparams.output_dir)
- odir.mkdir(exist_ok=True)
-
- # add custom checkpoints
- if checkpoint_callback is None:
- checkpoint_callback = pl.callbacks.ModelCheckpoint(
- filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
- )
- if early_stopping_callback:
- extra_callbacks.append(early_stopping_callback)
- if logging_callback is None:
- logging_callback = LoggingCallback()
-
- train_params = {}
-
- # TODO: remove with PyTorch 1.6 since pl uses native amp
- if args.fp16:
- train_params["precision"] = 16
- # train_params["amp_level"] = args.fp16_opt_level
-
- if args.gpus > 1:
- train_params["accelerator"] = "auto" # "ddp"
- train_params["strategy"] = "ddp"
-
- train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
- train_params["profiler"] = None # extra_train_kwargs.get("profiler", None) #get unwanted logs
- train_params["devices"] = "auto"
-
- trainer = pl.Trainer.from_argparse_args(
- args,
- weights_summary=None,
- callbacks=[logging_callback] + extra_callbacks + [checkpoint_callback] + [InitCallback()],
- # plugins=[custom_ddp_plugin],
- logger=logger,
- **train_params,
- )
-
- if args.do_train:
- trainer.fit(model)
-
- return trainer
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/db/test_system.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/db/test_system.py
deleted file mode 100644
index 02ff97895b8df62b924e990b6d27390f0c4d4cd8..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/db/test_system.py
+++ /dev/null
@@ -1,315 +0,0 @@
-import pytest
-from typing import Generator, List, Callable, Dict, Union
-from chromadb.types import Collection, Segment, SegmentScope
-from chromadb.db.impl.sqlite import SqliteDB
-from chromadb.config import System, Settings
-from chromadb.db.system import SysDB
-from chromadb.db.base import NotFoundError, UniqueConstraintError
-from pytest import FixtureRequest
-import uuid
-
-
-def sqlite() -> Generator[SysDB, None, None]:
- """Fixture generator for sqlite DB"""
- db = SqliteDB(System(Settings(sqlite_database=":memory:", allow_reset=True)))
- db.start()
- yield db
- db.stop()
-
-
-def db_fixtures() -> List[Callable[[], Generator[SysDB, None, None]]]:
- return [sqlite]
-
-
-@pytest.fixture(scope="module", params=db_fixtures())
-def sysdb(request: FixtureRequest) -> Generator[SysDB, None, None]:
- yield next(request.param())
-
-
-sample_collections = [
- Collection(
- id=uuid.uuid4(),
- name="test_collection_1",
- topic="test_topic_1",
- metadata={"test_str": "str1", "test_int": 1, "test_float": 1.3},
- dimension=128,
- ),
- Collection(
- id=uuid.uuid4(),
- name="test_collection_2",
- topic="test_topic_2",
- metadata={"test_str": "str2", "test_int": 2, "test_float": 2.3},
- dimension=None,
- ),
- Collection(
- id=uuid.uuid4(),
- name="test_collection_3",
- topic="test_topic_3",
- metadata={"test_str": "str3", "test_int": 3, "test_float": 3.3},
- dimension=None,
- ),
-]
-
-
-def test_create_get_delete_collections(sysdb: SysDB) -> None:
- sysdb.reset_state()
-
- for collection in sample_collections:
- sysdb.create_collection(collection)
-
- results = sysdb.get_collections()
- results = sorted(results, key=lambda c: c["name"])
-
- assert sorted(results, key=lambda c: c["name"]) == sample_collections
-
- # Duplicate create fails
- with pytest.raises(UniqueConstraintError):
- sysdb.create_collection(sample_collections[0])
-
- # Find by name
- for collection in sample_collections:
- result = sysdb.get_collections(name=collection["name"])
- assert result == [collection]
-
- # Find by topic
- for collection in sample_collections:
- result = sysdb.get_collections(topic=collection["topic"])
- assert result == [collection]
-
- # Find by id
- for collection in sample_collections:
- result = sysdb.get_collections(id=collection["id"])
- assert result == [collection]
-
- # Find by id and topic (positive case)
- for collection in sample_collections:
- result = sysdb.get_collections(id=collection["id"], topic=collection["topic"])
- assert result == [collection]
-
- # find by id and topic (negative case)
- for collection in sample_collections:
- result = sysdb.get_collections(id=collection["id"], topic="other_topic")
- assert result == []
-
- # Delete
- c1 = sample_collections[0]
- sysdb.delete_collection(c1["id"])
-
- results = sysdb.get_collections()
- assert c1 not in results
- assert len(results) == len(sample_collections) - 1
- assert sorted(results, key=lambda c: c["name"]) == sample_collections[1:]
-
- by_id_result = sysdb.get_collections(id=c1["id"])
- assert by_id_result == []
-
- # Duplicate delete throws an exception
- with pytest.raises(NotFoundError):
- sysdb.delete_collection(c1["id"])
-
-
-def test_update_collections(sysdb: SysDB) -> None:
- metadata: Dict[str, Union[str, int, float]] = {
- "test_str": "str1",
- "test_int": 1,
- "test_float": 1.3,
- }
- coll = Collection(
- id=uuid.uuid4(),
- name="test_collection_1",
- topic="test_topic_1",
- metadata=metadata,
- dimension=None,
- )
-
- sysdb.reset_state()
-
- sysdb.create_collection(coll)
-
- # Update name
- coll["name"] = "new_name"
- sysdb.update_collection(coll["id"], name=coll["name"])
- result = sysdb.get_collections(name=coll["name"])
- assert result == [coll]
-
- # Update topic
- coll["topic"] = "new_topic"
- sysdb.update_collection(coll["id"], topic=coll["topic"])
- result = sysdb.get_collections(topic=coll["topic"])
- assert result == [coll]
-
- # Update dimension
- coll["dimension"] = 128
- sysdb.update_collection(coll["id"], dimension=coll["dimension"])
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
- # Reset the metadata
- coll["metadata"] = {"test_str2": "str2"}
- sysdb.update_collection(coll["id"], metadata=coll["metadata"])
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
- # Delete all metadata keys
- coll["metadata"] = None
- sysdb.update_collection(coll["id"], metadata=None)
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
-
-sample_segments = [
- Segment(
- id=uuid.UUID("00000000-d7d7-413b-92e1-731098a6e492"),
- type="test_type_a",
- scope=SegmentScope.VECTOR,
- topic=None,
- collection=sample_collections[0]["id"],
- metadata={"test_str": "str1", "test_int": 1, "test_float": 1.3},
- ),
- Segment(
- id=uuid.UUID("11111111-d7d7-413b-92e1-731098a6e492"),
- type="test_type_b",
- topic="test_topic_2",
- scope=SegmentScope.VECTOR,
- collection=sample_collections[1]["id"],
- metadata={"test_str": "str2", "test_int": 2, "test_float": 2.3},
- ),
- Segment(
- id=uuid.UUID("22222222-d7d7-413b-92e1-731098a6e492"),
- type="test_type_b",
- topic="test_topic_3",
- scope=SegmentScope.METADATA,
- collection=None,
- metadata={"test_str": "str3", "test_int": 3, "test_float": 3.3},
- ),
-]
-
-
-def test_create_get_delete_segments(sysdb: SysDB) -> None:
- sysdb.reset_state()
-
- for collection in sample_collections:
- sysdb.create_collection(collection)
-
- for segment in sample_segments:
- sysdb.create_segment(segment)
-
- results = sysdb.get_segments()
- results = sorted(results, key=lambda c: c["id"])
-
- assert results == sample_segments
-
- # Duplicate create fails
- with pytest.raises(UniqueConstraintError):
- sysdb.create_segment(sample_segments[0])
-
- # Find by id
- for segment in sample_segments:
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Find by type
- result = sysdb.get_segments(type="test_type_a")
- assert result == sample_segments[:1]
-
- result = sysdb.get_segments(type="test_type_b")
- assert result == sample_segments[1:]
-
- # Find by collection ID
- result = sysdb.get_segments(collection=sample_collections[0]["id"])
- assert result == sample_segments[:1]
-
- # Find by type and collection ID (positive case)
- result = sysdb.get_segments(
- type="test_type_a", collection=sample_collections[0]["id"]
- )
- assert result == sample_segments[:1]
-
- # Find by type and collection ID (negative case)
- result = sysdb.get_segments(
- type="test_type_b", collection=sample_collections[0]["id"]
- )
- assert result == []
-
- # Delete
- s1 = sample_segments[0]
- sysdb.delete_segment(s1["id"])
-
- results = sysdb.get_segments()
- assert s1 not in results
- assert len(results) == len(sample_segments) - 1
- assert sorted(results, key=lambda c: c["type"]) == sample_segments[1:]
-
- # Duplicate delete throws an exception
- with pytest.raises(NotFoundError):
- sysdb.delete_segment(s1["id"])
-
-
-def test_update_segment(sysdb: SysDB) -> None:
- metadata: Dict[str, Union[str, int, float]] = {
- "test_str": "str1",
- "test_int": 1,
- "test_float": 1.3,
- }
- segment = Segment(
- id=uuid.uuid4(),
- type="test_type_a",
- scope=SegmentScope.VECTOR,
- topic="test_topic_a",
- collection=sample_collections[0]["id"],
- metadata=metadata,
- )
-
- sysdb.reset_state()
- for c in sample_collections:
- sysdb.create_collection(c)
-
- sysdb.create_segment(segment)
-
- # Update topic to new value
- segment["topic"] = "new_topic"
- sysdb.update_segment(segment["id"], topic=segment["topic"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update topic to None
- segment["topic"] = None
- sysdb.update_segment(segment["id"], topic=segment["topic"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update collection to new value
- segment["collection"] = sample_collections[1]["id"]
- sysdb.update_segment(segment["id"], collection=segment["collection"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update collection to None
- segment["collection"] = None
- sysdb.update_segment(segment["id"], collection=segment["collection"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Add a new metadata key
- metadata["test_str2"] = "str2"
- sysdb.update_segment(segment["id"], metadata={"test_str2": "str2"})
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update a metadata key
- metadata["test_str"] = "str3"
- sysdb.update_segment(segment["id"], metadata={"test_str": "str3"})
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Delete a metadata key
- del metadata["test_str"]
- sysdb.update_segment(segment["id"], metadata={"test_str": None})
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Delete all metadata keys
- segment["metadata"] = None
- sysdb.update_segment(segment["id"], metadata=None)
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/ciphers/aead.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/ciphers/aead.py
deleted file mode 100644
index 957b2d221b62741490044fe114e8c0bc4c90e693..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/ciphers/aead.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from __future__ import annotations
-
-import os
-import typing
-
-from cryptography import exceptions, utils
-from cryptography.hazmat.backends.openssl import aead
-from cryptography.hazmat.backends.openssl.backend import backend
-from cryptography.hazmat.bindings._rust import FixedPool
-
-
-class ChaCha20Poly1305:
- _MAX_SIZE = 2**31 - 1
-
- def __init__(self, key: bytes):
- if not backend.aead_cipher_supported(self):
- raise exceptions.UnsupportedAlgorithm(
- "ChaCha20Poly1305 is not supported by this version of OpenSSL",
- exceptions._Reasons.UNSUPPORTED_CIPHER,
- )
- utils._check_byteslike("key", key)
-
- if len(key) != 32:
- raise ValueError("ChaCha20Poly1305 key must be 32 bytes.")
-
- self._key = key
- self._pool = FixedPool(self._create_fn)
-
- @classmethod
- def generate_key(cls) -> bytes:
- return os.urandom(32)
-
- def _create_fn(self):
- return aead._aead_create_ctx(backend, self, self._key)
-
- def encrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
- # This is OverflowError to match what cffi would raise
- raise OverflowError(
- "Data or associated data too long. Max 2**31 - 1 bytes"
- )
-
- self._check_params(nonce, data, associated_data)
- with self._pool.acquire() as ctx:
- return aead._encrypt(
- backend, self, nonce, data, [associated_data], 16, ctx
- )
-
- def decrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- self._check_params(nonce, data, associated_data)
- with self._pool.acquire() as ctx:
- return aead._decrypt(
- backend, self, nonce, data, [associated_data], 16, ctx
- )
-
- def _check_params(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: bytes,
- ) -> None:
- utils._check_byteslike("nonce", nonce)
- utils._check_byteslike("data", data)
- utils._check_byteslike("associated_data", associated_data)
- if len(nonce) != 12:
- raise ValueError("Nonce must be 12 bytes")
-
-
-class AESCCM:
- _MAX_SIZE = 2**31 - 1
-
- def __init__(self, key: bytes, tag_length: int = 16):
- utils._check_byteslike("key", key)
- if len(key) not in (16, 24, 32):
- raise ValueError("AESCCM key must be 128, 192, or 256 bits.")
-
- self._key = key
- if not isinstance(tag_length, int):
- raise TypeError("tag_length must be an integer")
-
- if tag_length not in (4, 6, 8, 10, 12, 14, 16):
- raise ValueError("Invalid tag_length")
-
- self._tag_length = tag_length
-
- if not backend.aead_cipher_supported(self):
- raise exceptions.UnsupportedAlgorithm(
- "AESCCM is not supported by this version of OpenSSL",
- exceptions._Reasons.UNSUPPORTED_CIPHER,
- )
-
- @classmethod
- def generate_key(cls, bit_length: int) -> bytes:
- if not isinstance(bit_length, int):
- raise TypeError("bit_length must be an integer")
-
- if bit_length not in (128, 192, 256):
- raise ValueError("bit_length must be 128, 192, or 256")
-
- return os.urandom(bit_length // 8)
-
- def encrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
- # This is OverflowError to match what cffi would raise
- raise OverflowError(
- "Data or associated data too long. Max 2**31 - 1 bytes"
- )
-
- self._check_params(nonce, data, associated_data)
- self._validate_lengths(nonce, len(data))
- return aead._encrypt(
- backend, self, nonce, data, [associated_data], self._tag_length
- )
-
- def decrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- self._check_params(nonce, data, associated_data)
- return aead._decrypt(
- backend, self, nonce, data, [associated_data], self._tag_length
- )
-
- def _validate_lengths(self, nonce: bytes, data_len: int) -> None:
- # For information about computing this, see
- # https://tools.ietf.org/html/rfc3610#section-2.1
- l_val = 15 - len(nonce)
- if 2 ** (8 * l_val) < data_len:
- raise ValueError("Data too long for nonce")
-
- def _check_params(
- self, nonce: bytes, data: bytes, associated_data: bytes
- ) -> None:
- utils._check_byteslike("nonce", nonce)
- utils._check_byteslike("data", data)
- utils._check_byteslike("associated_data", associated_data)
- if not 7 <= len(nonce) <= 13:
- raise ValueError("Nonce must be between 7 and 13 bytes")
-
-
-class AESGCM:
- _MAX_SIZE = 2**31 - 1
-
- def __init__(self, key: bytes):
- utils._check_byteslike("key", key)
- if len(key) not in (16, 24, 32):
- raise ValueError("AESGCM key must be 128, 192, or 256 bits.")
-
- self._key = key
-
- @classmethod
- def generate_key(cls, bit_length: int) -> bytes:
- if not isinstance(bit_length, int):
- raise TypeError("bit_length must be an integer")
-
- if bit_length not in (128, 192, 256):
- raise ValueError("bit_length must be 128, 192, or 256")
-
- return os.urandom(bit_length // 8)
-
- def encrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
- # This is OverflowError to match what cffi would raise
- raise OverflowError(
- "Data or associated data too long. Max 2**31 - 1 bytes"
- )
-
- self._check_params(nonce, data, associated_data)
- return aead._encrypt(backend, self, nonce, data, [associated_data], 16)
-
- def decrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- self._check_params(nonce, data, associated_data)
- return aead._decrypt(backend, self, nonce, data, [associated_data], 16)
-
- def _check_params(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: bytes,
- ) -> None:
- utils._check_byteslike("nonce", nonce)
- utils._check_byteslike("data", data)
- utils._check_byteslike("associated_data", associated_data)
- if len(nonce) < 8 or len(nonce) > 128:
- raise ValueError("Nonce must be between 8 and 128 bytes")
-
-
-class AESOCB3:
- _MAX_SIZE = 2**31 - 1
-
- def __init__(self, key: bytes):
- utils._check_byteslike("key", key)
- if len(key) not in (16, 24, 32):
- raise ValueError("AESOCB3 key must be 128, 192, or 256 bits.")
-
- self._key = key
-
- if not backend.aead_cipher_supported(self):
- raise exceptions.UnsupportedAlgorithm(
- "OCB3 is not supported by this version of OpenSSL",
- exceptions._Reasons.UNSUPPORTED_CIPHER,
- )
-
- @classmethod
- def generate_key(cls, bit_length: int) -> bytes:
- if not isinstance(bit_length, int):
- raise TypeError("bit_length must be an integer")
-
- if bit_length not in (128, 192, 256):
- raise ValueError("bit_length must be 128, 192, or 256")
-
- return os.urandom(bit_length // 8)
-
- def encrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:
- # This is OverflowError to match what cffi would raise
- raise OverflowError(
- "Data or associated data too long. Max 2**31 - 1 bytes"
- )
-
- self._check_params(nonce, data, associated_data)
- return aead._encrypt(backend, self, nonce, data, [associated_data], 16)
-
- def decrypt(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: typing.Optional[bytes],
- ) -> bytes:
- if associated_data is None:
- associated_data = b""
-
- self._check_params(nonce, data, associated_data)
- return aead._decrypt(backend, self, nonce, data, [associated_data], 16)
-
- def _check_params(
- self,
- nonce: bytes,
- data: bytes,
- associated_data: bytes,
- ) -> None:
- utils._check_byteslike("nonce", nonce)
- utils._check_byteslike("data", data)
- utils._check_byteslike("associated_data", associated_data)
- if len(nonce) < 12 or len(nonce) > 15:
- raise ValueError("Nonce must be between 12 and 15 bytes")
-
-
-class AESSIV:
- _MAX_SIZE = 2**31 - 1
-
- def __init__(self, key: bytes):
- utils._check_byteslike("key", key)
- if len(key) not in (32, 48, 64):
- raise ValueError("AESSIV key must be 256, 384, or 512 bits.")
-
- self._key = key
-
- if not backend.aead_cipher_supported(self):
- raise exceptions.UnsupportedAlgorithm(
- "AES-SIV is not supported by this version of OpenSSL",
- exceptions._Reasons.UNSUPPORTED_CIPHER,
- )
-
- @classmethod
- def generate_key(cls, bit_length: int) -> bytes:
- if not isinstance(bit_length, int):
- raise TypeError("bit_length must be an integer")
-
- if bit_length not in (256, 384, 512):
- raise ValueError("bit_length must be 256, 384, or 512")
-
- return os.urandom(bit_length // 8)
-
- def encrypt(
- self,
- data: bytes,
- associated_data: typing.Optional[typing.List[bytes]],
- ) -> bytes:
- if associated_data is None:
- associated_data = []
-
- self._check_params(data, associated_data)
-
- if len(data) > self._MAX_SIZE or any(
- len(ad) > self._MAX_SIZE for ad in associated_data
- ):
- # This is OverflowError to match what cffi would raise
- raise OverflowError(
- "Data or associated data too long. Max 2**31 - 1 bytes"
- )
-
- return aead._encrypt(backend, self, b"", data, associated_data, 16)
-
- def decrypt(
- self,
- data: bytes,
- associated_data: typing.Optional[typing.List[bytes]],
- ) -> bytes:
- if associated_data is None:
- associated_data = []
-
- self._check_params(data, associated_data)
-
- return aead._decrypt(backend, self, b"", data, associated_data, 16)
-
- def _check_params(
- self,
- data: bytes,
- associated_data: typing.List[bytes],
- ) -> None:
- utils._check_byteslike("data", data)
- if len(data) == 0:
- raise ValueError("data must not be zero length")
-
- if not isinstance(associated_data, list):
- raise TypeError(
- "associated_data must be a list of bytes-like objects or None"
- )
- for x in associated_data:
- utils._check_byteslike("associated_data elements", x)
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/cihyFjudo/fairness-paper-search/Ansoftfix.exe Free Download Everything You Need to Know About the Ansys Fixer.md b/spaces/cihyFjudo/fairness-paper-search/Ansoftfix.exe Free Download Everything You Need to Know About the Ansys Fixer.md
deleted file mode 100644
index 804b199b6725a49e362c1f6ccc9e35454a15df90..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Ansoftfix.exe Free Download Everything You Need to Know About the Ansys Fixer.md
+++ /dev/null
@@ -1,6 +0,0 @@
-ansoftfix.exe free download
Download Zip ⇒⇒⇒ https://tinurli.com/2uwiQd
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Chakravyuh Telugu Dubbed Movie Free Download Stepmania Casinos St What You Need to Know About the Movie and Its Cast.md b/spaces/cihyFjudo/fairness-paper-search/Chakravyuh Telugu Dubbed Movie Free Download Stepmania Casinos St What You Need to Know About the Movie and Its Cast.md
deleted file mode 100644
index 9135e16ab3fef415c0b5a56b8c7756ca4f7f7aaa..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Chakravyuh Telugu Dubbed Movie Free Download Stepmania Casinos St What You Need to Know About the Movie and Its Cast.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Chakravyuh Telugu Dubbed Movie Free Download stepmania casinos st
Download File ✺✺✺ https://tinurli.com/2uwk2C
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Ummy Video Downloader 1.10.5.3 Crack With Serial Key A Must-Have Tool for Video Lovers.md b/spaces/cihyFjudo/fairness-paper-search/Ummy Video Downloader 1.10.5.3 Crack With Serial Key A Must-Have Tool for Video Lovers.md
deleted file mode 100644
index 68b14ad805e9d5dfa93e20bb8a6550181febd750..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Ummy Video Downloader 1.10.5.3 Crack With Serial Key A Must-Have Tool for Video Lovers.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-It comes with a simple download procedure that does not have to waste your time while you learn it. Simply open the video you want to download, copy the link in this software and you will be ready to download it.
-Compared to other software, this software is better because of its speed and efficiency. In addition, the crack of the Ummy video downloader is available on any PC and laptop. If you have many files to download, the process can be a bit slow, but if you can download individual files, you can do it at high speed. Before downloading videos, remember not to download videos.
-Ummy Video Downloader 1.10.5.3 Crack With Serial Key Free Download
Download File ———>>> https://tinurli.com/2uwkOA
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/colakin/video-generater/public/ffmpeg/doc/Makefile b/spaces/colakin/video-generater/public/ffmpeg/doc/Makefile
deleted file mode 100644
index 67586e4b7488de6dc310f1192c55b2e9d56ffc4f..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/doc/Makefile
+++ /dev/null
@@ -1,158 +0,0 @@
-LIBRARIES-$(CONFIG_AVUTIL) += libavutil
-LIBRARIES-$(CONFIG_SWSCALE) += libswscale
-LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
-LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
-LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
-LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
-LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
-
-COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
-COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
-COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
-COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
-COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
-COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
-COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
-
-MANPAGES1 = $(AVPROGS-yes:%=doc/%.1) $(AVPROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
-MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
-MANPAGES = $(MANPAGES1) $(MANPAGES3)
-PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
-HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
- doc/community.html \
- doc/developer.html \
- doc/faq.html \
- doc/fate.html \
- doc/general.html \
- doc/git-howto.html \
- doc/mailing-list-faq.html \
- doc/nut.html \
- doc/platform.html \
- $(SRC_PATH)/doc/bootstrap.min.css \
- $(SRC_PATH)/doc/style.min.css \
- $(SRC_PATH)/doc/default.css \
-
-TXTPAGES = doc/fate.txt \
-
-
-DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
-DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
-DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
-DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
-DOCS = $(DOCS-yes)
-
-all-$(CONFIG_DOC): doc
-
-doc: documentation
-
-apidoc: doc/doxy/html
-documentation: $(DOCS)
-
-TEXIDEP = perl $(SRC_PATH)/doc/texidep.pl $(SRC_PATH) $< $@ >$(@:%=%.d)
-
-doc/%.txt: TAG = TXT
-doc/%.txt: doc/%.texi
- $(Q)$(TEXIDEP)
- $(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
-
-GENTEXI = format codec
-GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
-
-$(GENTEXI): TAG = GENTEXI
-$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
- $(M)doc/print_options $* > $@
-
-doc/%.html: TAG = HTML
-doc/%-all.html: TAG = HTML
-
-ifdef HAVE_MAKEINFO_HTML
-doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
- $(Q)$(TEXIDEP)
- $(M)makeinfo --html -I doc --no-split -D config-not-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
-
-doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
- $(Q)$(TEXIDEP)
- $(M)makeinfo --html -I doc --no-split -D config-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
-else
-doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
- $(Q)$(TEXIDEP)
- $(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
-
-doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
- $(Q)$(TEXIDEP)
- $(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
-endif
-
-doc/%.pod: TAG = POD
-doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
- $(Q)$(TEXIDEP)
- $(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-not-all=yes -Idoc $< $@
-
-doc/%-all.pod: TAG = POD
-doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
- $(Q)$(TEXIDEP)
- $(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-all=yes -Idoc $< $@
-
-doc/%.1 doc/%.3: TAG = MAN
-doc/%.1: doc/%.pod $(GENTEXI)
- $(M)pod2man --section=1 --center=" " --release=" " --date=" " $< > $@
-doc/%.3: doc/%.pod $(GENTEXI)
- $(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
-
-$(DOCS) doc/doxy/html: | doc/
-
-DOXY_INPUT = $(INSTHEADERS)
-DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
-
-doc/doxy/html: TAG = DOXY
-doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
- $(M)$(SRC_PATH)/doc/doxy-wrapper.sh $$PWD/doc/doxy $(SRC_PATH) doc/Doxyfile $(DOXYGEN) $(DOXY_INPUT);
-
-install-doc: install-html install-man
-
-install-html:
-
-install-man:
-
-ifdef CONFIG_HTMLPAGES
-install-progs-$(CONFIG_DOC): install-html
-
-install-html: $(HTMLPAGES)
- $(Q)mkdir -p "$(DOCDIR)"
- $(INSTALL) -m 644 $(HTMLPAGES) "$(DOCDIR)"
-endif
-
-ifdef CONFIG_MANPAGES
-install-progs-$(CONFIG_DOC): install-man
-
-install-man: $(MANPAGES)
- $(Q)mkdir -p "$(MANDIR)/man1"
- $(INSTALL) -m 644 $(MANPAGES1) "$(MANDIR)/man1"
- $(Q)mkdir -p "$(MANDIR)/man3"
- $(INSTALL) -m 644 $(MANPAGES3) "$(MANDIR)/man3"
-endif
-
-uninstall: uninstall-doc
-
-uninstall-doc: uninstall-html uninstall-man
-
-uninstall-html:
- $(RM) -r "$(DOCDIR)"
-
-uninstall-man:
- $(RM) $(addprefix "$(MANDIR)/man1/",$(AVPROGS-yes:%=%.1) $(AVPROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
- $(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
-
-clean:: docclean
-
-distclean:: docclean
- $(RM) doc/config.texi
-
-docclean::
- $(RM) $(CLEANSUFFIXES:%=doc/%)
- $(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 doc/avoptions_*.texi
- $(RM) -r doc/doxy/html
-
--include $(wildcard $(DOCS:%=%.d))
-
-.PHONY: apidoc doc documentation
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adx_parser.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adx_parser.c
deleted file mode 100644
index 52aa14b7ad377fbe94d7a015fd910adc360cbe9e..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adx_parser.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2011 Justin Ruggles
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * ADX audio parser
- *
- * Splits packets into individual blocks.
- */
-
-#include "libavutil/intreadwrite.h"
-#include "parser.h"
-#include "adx.h"
-
-typedef struct ADXParseContext {
- ParseContext pc;
- int header_size;
- int block_size;
- int remaining;
-} ADXParseContext;
-
-static int adx_parse(AVCodecParserContext *s1,
- AVCodecContext *avctx,
- const uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size)
-{
- ADXParseContext *s = s1->priv_data;
- ParseContext *pc = &s->pc;
- int next = END_NOT_FOUND;
- int i;
- uint64_t state = pc->state64;
-
- if (!s->header_size) {
- for (i = 0; i < buf_size; i++) {
- state = (state << 8) | buf[i];
- /* check for fixed fields in ADX header for possible match */
- if ((state & 0xFFFF0000FFFFFF00) == 0x8000000003120400ULL) {
- int channels = state & 0xFF;
- int header_size = ((state >> 32) & 0xFFFF) + 4;
- if (channels > 0 && header_size >= 8) {
- s->header_size = header_size;
- s->block_size = BLOCK_SIZE * channels;
- s->remaining = i - 7 + s->header_size + s->block_size;
- break;
- }
- }
- }
- pc->state64 = state;
- }
-
- if (s->header_size) {
- if (!s->remaining)
- s->remaining = s->block_size;
- if (s->remaining <= buf_size) {
- next = s->remaining;
- s->remaining = 0;
- } else
- s->remaining -= buf_size;
- }
-
- if (ff_combine_frame(pc, next, &buf, &buf_size) < 0 || !buf_size) {
- *poutbuf = NULL;
- *poutbuf_size = 0;
- return buf_size;
- }
-
- s1->duration = BLOCK_SAMPLES;
-
- *poutbuf = buf;
- *poutbuf_size = buf_size;
- return next;
-}
-
-const AVCodecParser ff_adx_parser = {
- .codec_ids = { AV_CODEC_ID_ADPCM_ADX },
- .priv_data_size = sizeof(ADXParseContext),
- .parser_parse = adx_parse,
- .parser_close = ff_parse_close,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bitpacked_dec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bitpacked_dec.c
deleted file mode 100644
index a1ffef185ceab5a9e71e658dcb46f9e9490294b3..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/bitpacked_dec.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Unpack bit-packed streams to formats supported by FFmpeg
- * Copyright (c) 2017 Savoir-faire Linux, Inc
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/* Development sponsored by CBC/Radio-Canada */
-
-/**
- * @file
- * Bitpacked
- */
-
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "get_bits.h"
-#include "libavutil/imgutils.h"
-#include "thread.h"
-
-struct BitpackedContext {
- int (*decode)(AVCodecContext *avctx, AVFrame *frame,
- const AVPacket *pkt);
-};
-
-/* For this format, it's a simple passthrough */
-static int bitpacked_decode_uyvy422(AVCodecContext *avctx, AVFrame *frame,
- const AVPacket *avpkt)
-{
- int ret;
-
- /* there is no need to copy as the data already match
- * a known pixel format */
- frame->buf[0] = av_buffer_ref(avpkt->buf);
- if (!frame->buf[0]) {
- return AVERROR(ENOMEM);
- }
-
- ret = av_image_fill_arrays(frame->data, frame->linesize, avpkt->data,
- avctx->pix_fmt, avctx->width, avctx->height, 1);
- if (ret < 0) {
- av_buffer_unref(&frame->buf[0]);
- return ret;
- }
-
- return 0;
-}
-
-static int bitpacked_decode_yuv422p10(AVCodecContext *avctx, AVFrame *frame,
- const AVPacket *avpkt)
-{
- uint64_t frame_size = (uint64_t)avctx->width * (uint64_t)avctx->height * 20;
- uint64_t packet_size = (uint64_t)avpkt->size * 8;
- GetBitContext bc;
- uint16_t *y, *u, *v;
- int ret, i, j;
-
- ret = ff_thread_get_buffer(avctx, frame, 0);
- if (ret < 0)
- return ret;
-
- if (frame_size > packet_size)
- return AVERROR_INVALIDDATA;
-
- if (avctx->width % 2)
- return AVERROR_PATCHWELCOME;
-
- ret = init_get_bits(&bc, avpkt->data, avctx->width * avctx->height * 20);
- if (ret)
- return ret;
-
- for (i = 0; i < avctx->height; i++) {
- y = (uint16_t*)(frame->data[0] + i * frame->linesize[0]);
- u = (uint16_t*)(frame->data[1] + i * frame->linesize[1]);
- v = (uint16_t*)(frame->data[2] + i * frame->linesize[2]);
-
- for (j = 0; j < avctx->width; j += 2) {
- *u++ = get_bits(&bc, 10);
- *y++ = get_bits(&bc, 10);
- *v++ = get_bits(&bc, 10);
- *y++ = get_bits(&bc, 10);
- }
- }
-
- return 0;
-}
-
-static av_cold int bitpacked_init_decoder(AVCodecContext *avctx)
-{
- struct BitpackedContext *bc = avctx->priv_data;
-
- if (!avctx->codec_tag || !avctx->width || !avctx->height)
- return AVERROR_INVALIDDATA;
-
- if (avctx->codec_tag == MKTAG('U', 'Y', 'V', 'Y')) {
- if (avctx->bits_per_coded_sample == 16 &&
- avctx->pix_fmt == AV_PIX_FMT_UYVY422)
- bc->decode = bitpacked_decode_uyvy422;
- else if (avctx->bits_per_coded_sample == 20 &&
- avctx->pix_fmt == AV_PIX_FMT_YUV422P10)
- bc->decode = bitpacked_decode_yuv422p10;
- else
- return AVERROR_INVALIDDATA;
- } else {
- return AVERROR_INVALIDDATA;
- }
-
- return 0;
-}
-
-static int bitpacked_decode(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame, AVPacket *avpkt)
-{
- struct BitpackedContext *bc = avctx->priv_data;
- int buf_size = avpkt->size;
- int res;
-
- res = bc->decode(avctx, frame, avpkt);
- if (res)
- return res;
-
- frame->pict_type = AV_PICTURE_TYPE_I;
- frame->key_frame = 1;
-
- *got_frame = 1;
- return buf_size;
-
-}
-
-const FFCodec ff_bitpacked_decoder = {
- .p.name = "bitpacked",
- CODEC_LONG_NAME("Bitpacked"),
- .p.type = AVMEDIA_TYPE_VIDEO,
- .p.id = AV_CODEC_ID_BITPACKED,
- .p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
- .priv_data_size = sizeof(struct BitpackedContext),
- .init = bitpacked_init_decoder,
- FF_CODEC_DECODE_CB(bitpacked_decode),
- .codec_tags = (const uint32_t []){
- MKTAG('U', 'Y', 'V', 'Y'),
- FF_CODEC_TAGS_END,
- },
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dsicinaudio.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dsicinaudio.c
deleted file mode 100644
index aa14966c7b0ac13610e496077ff8f832aab2cf5d..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dsicinaudio.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Delphine Software International CIN audio decoder
- * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * Delphine Software International CIN audio decoder
- */
-
-#include "libavutil/channel_layout.h"
-#include "libavutil/intreadwrite.h"
-
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "decode.h"
-#include "mathops.h"
-
-typedef struct CinAudioContext {
- int initial_decode_frame;
- int delta;
-} CinAudioContext;
-
-
-/* table defining a geometric sequence with multiplier = 32767 ^ (1 / 128) */
-static const int16_t cinaudio_delta16_table[256] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, -30210, -27853, -25680, -23677, -21829,
- -20126, -18556, -17108, -15774, -14543, -13408, -12362, -11398,
- -10508, -9689, -8933, -8236, -7593, -7001, -6455, -5951,
- -5487, -5059, -4664, -4300, -3964, -3655, -3370, -3107,
- -2865, -2641, -2435, -2245, -2070, -1908, -1759, -1622,
- -1495, -1379, -1271, -1172, -1080, -996, -918, -847,
- -781, -720, -663, -612, -564, -520, -479, -442,
- -407, -376, -346, -319, -294, -271, -250, -230,
- -212, -196, -181, -166, -153, -141, -130, -120,
- -111, -102, -94, -87, -80, -74, -68, -62,
- -58, -53, -49, -45, -41, -38, -35, -32,
- -30, -27, -25, -23, -21, -20, -18, -17,
- -15, -14, -13, -12, -11, -10, -9, -8,
- -7, -6, -5, -4, -3, -2, -1, 0,
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 17, 18, 20, 21, 23, 25, 27, 30,
- 32, 35, 38, 41, 45, 49, 53, 58,
- 62, 68, 74, 80, 87, 94, 102, 111,
- 120, 130, 141, 153, 166, 181, 196, 212,
- 230, 250, 271, 294, 319, 346, 376, 407,
- 442, 479, 520, 564, 612, 663, 720, 781,
- 847, 918, 996, 1080, 1172, 1271, 1379, 1495,
- 1622, 1759, 1908, 2070, 2245, 2435, 2641, 2865,
- 3107, 3370, 3655, 3964, 4300, 4664, 5059, 5487,
- 5951, 6455, 7001, 7593, 8236, 8933, 9689, 10508,
- 11398, 12362, 13408, 14543, 15774, 17108, 18556, 20126,
- 21829, 23677, 25680, 27853, 30210, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
-{
- CinAudioContext *cin = avctx->priv_data;
-
- cin->initial_decode_frame = 1;
- cin->delta = 0;
- avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- av_channel_layout_uninit(&avctx->ch_layout);
- avctx->ch_layout = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
-
- return 0;
-}
-
-static int cinaudio_decode_frame(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame_ptr, AVPacket *avpkt)
-{
- const uint8_t *buf = avpkt->data;
- CinAudioContext *cin = avctx->priv_data;
- const uint8_t *buf_end = buf + avpkt->size;
- int16_t *samples;
- int delta, ret;
-
- /* get output buffer */
- frame->nb_samples = avpkt->size - cin->initial_decode_frame;
- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
- return ret;
- samples = (int16_t *)frame->data[0];
-
- delta = cin->delta;
- if (cin->initial_decode_frame) {
- cin->initial_decode_frame = 0;
- delta = sign_extend(AV_RL16(buf), 16);
- buf += 2;
- *samples++ = delta;
- }
- while (buf < buf_end) {
- delta += cinaudio_delta16_table[*buf++];
- delta = av_clip_int16(delta);
- *samples++ = delta;
- }
- cin->delta = delta;
-
- *got_frame_ptr = 1;
-
- return avpkt->size;
-}
-
-const FFCodec ff_dsicinaudio_decoder = {
- .p.name = "dsicinaudio",
- CODEC_LONG_NAME("Delphine Software International CIN audio"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_DSICINAUDIO,
- .priv_data_size = sizeof(CinAudioContext),
- .init = cinaudio_decode_init,
- FF_CODEC_DECODE_CB(cinaudio_decode_frame),
- .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
-};
diff --git a/spaces/congsaPfin/Manga-OCR/logs/808 TikTok Download How to Get the Best Sounds and Loops for Your Videos.md b/spaces/congsaPfin/Manga-OCR/logs/808 TikTok Download How to Get the Best Sounds and Loops for Your Videos.md
deleted file mode 100644
index 5996f8e1e91e61a42c50ac15dd720999eda47604..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/808 TikTok Download How to Get the Best Sounds and Loops for Your Videos.md
+++ /dev/null
@@ -1,150 +0,0 @@
-
-How to Edit TikTok Videos
-TikTok is a popular social media app that allows users to create and share short videos with music, filters, stickers, and other effects. Editing TikTok videos can be fun and easy if you follow some simple steps. In this article, we will show you how to edit TikTok videos using the in-app editor and some external tools or apps. Whether you want to make your videos more creative, engaging, or professional, we have got you covered.
-808 tiktok download
Download Zip >>>>> https://urlca.com/2uO7oI
- Step 1: Choose a video format and quality for TikTok
-The first step to edit TikTok videos is to choose the right video format and quality for the app. According to the official TikTok guidelines, the recommended video format is MP4 or MOV, the recommended resolution is 1080x1920 pixels, the recommended aspect ratio is 9:16 (portrait mode), and the maximum file size is 287.76 MB for iOS devices and 72 MB for Android devices. The maximum video length is 3 minutes, but shorter videos tend to perform better on the platform.
- Step 2: Record or upload a video to TikTok
-The next step is to record or upload a video to TikTok. You can either use the default camera app on your phone or use a third-party app or device to record your video. Make sure your camera resolution is set to 1080p or higher for better quality. If you already have a video on your phone or computer that you want to use, you can upload it to TikTok by tapping the "+" button at the bottom of the screen and selecting "Upload" from the options. You can also add multiple videos from your library and merge them into one.
- Step 3: Use the TikTok in-app editor to trim, crop, rotate, adjust speed, add filters, stickers, text, and sound effects
-Once you have your video ready, you can use the TikTok in-app editor to edit it as you like. You will see a number of icons on the right side of the screen that represent various editing and customization options. Here are some of the things you can do with the in-app editor:
-808 samples for tiktok music production
-How to make 808 bass for tiktok songs
-Free 808 loops and sounds for tiktok beats
-Best 808 drum kits for tiktok producers
-Tiktok 808 bass pack download
-808 tiktok drumkit reddit
-Tiktok 808 challenge download
-How to use 808 in tiktok videos
-Tiktok 808 tutorial download
-808 tiktok sound effect download
-Tiktok 808 type beat download
-808 tiktok remix download
-Tiktok 808 song download
-808 tiktok ringtone download
-Tiktok 808 meme download
-Tiktok 808 filter download
-Tiktok 808 generator download
-Tiktok 808 maker download
-Tiktok 808 editor download
-Tiktok 808 app download
-Tiktok 808 plugin download
-Tiktok 808 vst download
-Tiktok 808 midi download
-Tiktok 808 wav download
-Tiktok 808 mp3 download
-Tiktok 808 fl studio download
-Tiktok 808 ableton download
-Tiktok 808 logic pro download
-Tiktok 808 garageband download
-Tiktok 808 serum download
-Tiktok 808 massive download
-Tiktok 808 kontakt download
-Tiktok 808 nexus download
-Tiktok 808 omnisphere download
-Tiktok 808 sylenth1 download
-Tiktok 808 spire download
-Tiktok 808 dune download
-Tiktok 808 ana2 download
-Tiktok 808 pigments download
-Tiktok 808 vital download
-Tiktok trap 808 drum kit free download
-How to mix and master 808 for tiktok
-Best free online tiktok video downloader with sound
-How to make a viral tiktok song with an 808
-How to get the best quality sound from tiktok downloads
-How to convert tiktok videos to mp3 with an online tool
-How to add an 808 bass to your tiktok video using an app
-How to create an original tiktok sound with an 808
-How to find and use royalty-free 808 samples for your tiktok
-
-- Trim: You can trim your video by dragging the white bars at the bottom of the screen. You can also split your video into segments by tapping the "scissors" icon and selecting "Split".
-- Crop: You can crop your video by tapping the "Adjust clips" icon and using the slider to zoom in or out. You can also change the aspect ratio of your video by tapping the "9:16" icon and choosing from different options.
-- Rotate: You can rotate your video by tapping the "Rotate" icon and choosing from 90 degrees clockwise or counterclockwise.
-- Adjust speed: You can adjust the speed of your video by tapping the "Speed" icon and choosing from 0.3x to 3x. You can also apply different speeds to different segments of your video by splitting it first.
-- Add filters: You can add filters to your video by tapping the "Filters" icon and choosing from different categories such as Portrait, Landscape, Food, Vibe, etc. You can also adjust the intensity of each filter by using the slider.
-- Add stickers: You can add stickers to your video by tapping the "Stickers" icon and choosing from different categories such as Emoji, Text, GIFs, Effects, etc. You can also resize, rotate, and move the stickers as you like. You can also add your own stickers by tapping the "Add sticker" icon and choosing from your gallery or camera.
-- Add text: You can add text to your video by tapping the "Text" icon and typing your message. You can also change the font, color, size, alignment, and animation of the text by tapping the "Aa" icon. You can also resize, rotate, and move the text as you like.
-- Add sound effects: You can add sound effects to your video by tapping the "Sounds" icon and choosing from different categories such as Trending, New, Effects, etc. You can also adjust the volume of each sound effect by using the slider. You can also record your own voice or add music from your library by tapping the "Voiceover" or "Music" icons respectively.
-
-After you have edited your video with the in-app editor, you can preview it by tapping the "Play" icon at the bottom of the screen. You can also undo or redo any changes by tapping the "Undo" or "Redo" icons at the top of the screen.
- Step 4: Use external tools or apps to enhance your TikTok video with more features and options
-If you want to edit your TikTok video with more features and options than the in-app editor offers, you can use some external tools or apps that are compatible with TikTok. Here are some of the best tools and apps for editing TikTok videos:
-
-
-Tool/App
-Features
-Price
-
-
-InShot
-- Cut, trim, split, merge, crop, rotate, flip, and adjust videos
- Add transitions, filters, effects, stickers, text, music, and voiceovers
- Adjust brightness, contrast, saturation, hue, temperature, and more
- Change video speed and reverse video
- Add borders and backgrounds
- Export videos in HD quality
-Free with in-app purchases
-
-
-VivaVideo
-- Cut, trim, merge, duplicate, and adjust videos
- Add transitions, filters, stickers, text, music, and sound effects
- Adjust brightness, contrast, saturation, blur, and more
- Change video speed and reverse video
- Add collage and slideshow templates
- Export videos in HD quality
-Free with in-app purchases
-
-
-CapCut
-- Cut, trim, split, merge, crop, rotate, flip, and adjust videos
- Add transitions, filters, effects, stickers, text, music, and voiceovers
- Adjust brightness, contrast, saturation, hue, and more
- Change video speed and reverse video
- Add animated stickers and text
- Export videos in HD quality
-Free
-
-
-FilmoraGo
-- Cut, trim, split, merge, crop, rotate, flip, and adjust videos
- Add transitions, filters, effects, stickers, text, music, and voiceovers
- Adjust brightness, contrast, saturation, temperature, tint, and more
- Change video speed and reverse video
- Add overlays and elements
- Export videos in HD quality
-Free with in-app purchases
-
-
-Adobe Premiere Rush
-- Cut, trim, crop, rotate, flip, and adjust videos
- Add transitions, filters, effects, stickers, text, music, and voiceovers
- Adjust exposure, contrast, highlights, shadows, and more
- Change video speed and reverse video
- Add titles and graphics
- Export videos in HD quality
-Free trial with subscription plans
-
-
-To use these tools or apps to edit your TikTok video, you need to first export your video from the TikTok app by tapping the "Next" button at the bottom of the screen and then tapping the "Save" icon at the top right corner of the screen. Then you can import your video to the tool or app of your choice and edit it as you like. After you have finished editing your video with the external tool or app, you need to export it to your phone or computer and then upload it to TikTok by following the same steps as before.
- Step 5: Save and share your TikTok video with your followers and the world
-The final step to edit TikTok videos is to save and share your masterpiece with your followers and the world. After you have uploaded your video to TikTok by tapping the "+" button at the bottom of the screen and selecting "Upload" from the options, you can add some finishing touches to your video before posting it. Here are some of the things you can do before sharing your video:
-
-- Add a caption: You can add a caption to your video by typing in the text box at the bottom of the screen. You can also use hashtags and mentions to increase your visibility and engagement.
-- Add a cover: You can add a cover to your video by tapping the "Select cover" icon at the bottom left corner of the screen. You can either choose a frame from your video or upload an image from your gallery.
-- Add a sound: You can add a sound to your video by tapping the "Sounds" icon at the bottom right corner of the screen. You can either choose a song from the TikTok library or upload a song from your phone.
-- Adjust privacy settings: You can adjust the privacy settings of your video by tapping the "Who can view this video" icon at the bottom center of the screen. You can either choose "Public", "Friends", or "Private" depending on who you want to see your video.
-- Enable comments and duets: You can enable or disable comments and duets on your video by tapping the "More options" icon at the bottom right corner of the screen. You can also choose who can comment or duet with you by selecting "Everyone", "Friends", or "Off".
-
-After you have done all these things, you can tap the "Post" button at the bottom right corner of the screen to share your TikTok video with your followers and the world. You can also tap the "Drafts" button at the bottom left corner of the screen to save your TikTok video as a draft and post it later.
- Conclusion
-Editing TikTok videos can be a fun and creative way to express yourself and connect with others. By following the steps and tips in this article, you can edit TikTok videos like a pro and impress your followers and the world. Whether you use the in-app editor or some external tools or apps, you can make your TikTok videos more attractive, engaging, and professional. So what are you waiting for? Start editing your TikTok videos today and unleash your inner star.
- FAQs
-Here are some frequently asked questions about editing TikTok videos:
-How do I download TikTok videos?
-If you want to download TikTok videos to your phone or computer, you can use the built-in download feature on the app or some online tools or apps that allow you to download TikTok videos. Here are some of the ways to download TikTok videos:
-
-- Download feature on the app: You can download TikTok videos by tapping the "Share" icon on the bottom right corner of the video and then tapping the "Save video" icon. However, this option is only available for public videos and if the creator has enabled downloads.
-- Online tools: You can use some online tools such as TikTok Downloader, TikMate, or Snaptik to download TikTok videos. You just need to copy and paste the URL of the video into the tool and click the "Download" button.
-- Apps: You can use some apps such as Video Downloader for TikTok, TikTok Video Saver, or TikSave to download TikTok videos. You just need to install the app on your phone and follow the instructions.
-
-How do I edit TikTok videos on PC?
-If you want to edit TikTok videos on PC, you can use some software such as Filmora, Adobe Premiere Pro, or VSDC Free Video Editor to edit your videos. You just need to import your videos from your phone or computer to the software and edit them as you like. After you have finished editing your videos, you need to export them to your phone or computer and then upload them to TikTok by following the same steps as before.
-How do I add music to my TikTok videos?
-If you want to add music to your TikTok videos, you can use the built-in music feature on the app or some external tools or apps that allow you to add music to your videos. Here are some of the ways to add music to your TikTok videos:
-
-- Music feature on the app: You can add music to your TikTok videos by tapping the "Sounds" icon at the bottom of the screen before or after recording or uploading your video. You can choose from a variety of songs from different genres and moods on the TikTok library. You can also adjust the start and end points of the song by using the slider.
-- External tools or apps: You can use some external tools or apps such as InShot, VivaVideo, or CapCut to add music to your TikTok videos. You just need to import your video to the tool or app and choose a song from their library or your phone. You can also adjust the volume, duration, and position of the song.
-
-How do I make my TikTok videos more popular?
-If you want to make your TikTok videos more popular, you need to follow some best practices and strategies that can help you increase your visibility and engagement on the platform. Here are some of the tips to make your TikTok videos more popular:
-
-- Create original and creative content: You need to create original and creative content that showcases your personality, skills, talents, hobbies, interests, etc. You also need to experiment with different formats, styles, themes, topics, etc. that suit your niche and audience.
-- Use trending hashtags and sounds: You need to use trending hashtags and sounds that are relevant to your content and niche. You can find them on the "Discover" page on the app or by searching on Google or other platforms. You also need to create your own hashtags and sounds that are catchy and unique.
-- Engage with your followers and other creators: You need to engage with your followers and other creators by liking, commenting, sharing, and dueting their videos. You also need to respond to their comments and messages and thank them for their support. You also need to collaborate with other creators who have similar or complementary niches and audiences.
-- Post consistently and at the right time: You need to post consistently and at the right time to maintain your presence and reach on the platform. You also need to analyze your analytics and insights to see what works best for you and your audience. You also need to follow the trends and events that are happening in your niche and the world.
-
-How do I edit TikTok videos without watermark?
-If you want to edit TikTok videos without watermark, you need to use some tools or apps that can remove the watermark from the videos. Here are some of the tools or apps that can remove the watermark from TikTok videos:
-
-- Online tools: You can use some online tools such as MusicallyDown, TikTok Video Downloader, or TTDownloader to download TikTok videos without watermark. You just need to copy and paste the URL of the video into the tool and click the "Download" button.
-- Apps: You can use some apps such as Video Downloader for TikTok No Watermark, TikTok Video Downloader No Watermark, or Video Downloader for TikTok - No Watermark to download TikTok videos without watermark. You just need to install the app on your phone and follow the instructions.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download 1.1.1.q The Latest Version of Java SE Development Kit.md b/spaces/congsaPfin/Manga-OCR/logs/Download 1.1.1.q The Latest Version of Java SE Development Kit.md
deleted file mode 100644
index d5d39acb6edc3cf629c69acfe44c05c216d65549..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download 1.1.1.q The Latest Version of Java SE Development Kit.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-Download 1.1.1.q: How to Update OpenSSL on Apache HTTPD
-If you are running a web server with Apache HTTPD, you probably rely on OpenSSL to provide secure communication over the internet. OpenSSL is a software library that offers various cryptographic functions and protocols, such as SSL/TLS, for encrypting and authenticating data. However, like any software, OpenSSL needs to be updated regularly to fix bugs, improve performance, and address security vulnerabilities.
-download 1.1.1.q
DOWNLOAD 🆓 https://urlca.com/2uOaUg
-In this article, we will explain what OpenSSL is, why it is important, and how to download and install the latest version of OpenSSL 1.1.1, which is 1.1.1.q, on your Apache HTTPD server.
-What is OpenSSL and why is it important?
-OpenSSL is a software library that provides cryptographic functions and protocols for secure communication over the internet
-OpenSSL is an open source project that develops and maintains a software library that implements various cryptographic functions and protocols, such as SSL/TLS, RSA, AES, SHA, and many more. These functions and protocols are essential for enabling secure communication over the internet, as they allow web servers and clients to encrypt and authenticate data exchanged between them.
-OpenSSL is used by many web servers, including Apache HTTPD, to enable HTTPS and other security features
-One of the most common uses of OpenSSL is to enable HTTPS, which is the secure version of HTTP, the protocol used by web browsers and servers to communicate. HTTPS ensures that the data transmitted between a web browser and a web server is encrypted and authenticated, preventing eavesdropping, tampering, or impersonation by malicious parties.
-download 1.1.1.q java se development kit
-download 1.1.1.q java se runtime environment
-download 1.1.1.q server jre
-download 1.1.1.q oracle binary code license agreement
-download 1.1.1.q oracle java archive
-download 1.1.1.q linux arm v6/v7 soft float abi
-download 1.1.1.q linux x86 rpm
-download 1.1.1.q linux x86 tar.gz
-download 1.1.1.q linux x64 rpm
-download 1.1.1.q linux x64 tar.gz
-download 1.1.1.q mac os x x64 dmg
-download 1.1.1.q mac os x x64 tar.gz
-download 1.1.1.q solaris sparc 64-bit svr4 package
-download 1.1.1.q solaris sparc 64-bit tar.z
-download 1.1.1.q solaris sparc 64-bit tar.gz
-download 1.1.1.q solaris x64 svr4 package
-download 1.1.1.q solaris x64 tar.z
-download 1.1.1.q solaris x64 tar.gz
-download 1.1.1.q windows x86 exe
-download 1.1.1.q windows x86 tar.gz
-download 1.1.1.q windows x64 exe
-download 1.1.1.q windows x64 tar.gz
-download 1.1.1.q windows x86 online iftw.exe
-download 1.1.1.q windows x86 offline exe
-download 1.11q microsoft update catalog kb4535102
-how to download 111q for java development
-how to download 111q for java runtime
-how to install and run downloaded 111q files
-how to uninstall downloaded 111q files from system
-how to update downloaded 111q files to latest version
-how to fix errors and issues with downloaded 111q files
-how to verify the integrity of downloaded 111q files
-how to optimize the performance of downloaded 111q files
-how to secure the privacy of downloaded 111q files
-how to troubleshoot problems with downloaded 111q files
-benefits and features of downloading 111q for java platform
-drawbacks and risks of downloading 111q for java platform
-alternatives and competitors of downloading 111q for java platform
-reviews and ratings of downloading 111q for java platform
-tutorials and guides of downloading 111q for java platform
-Many web servers, including Apache HTTPD, use OpenSSL to enable HTTPS and other security features, such as client certificates, mutual authentication, or OCSP stapling. By using OpenSSL, web servers can support various versions and ciphers of SSL/TLS, as well as other protocols such as SSH or SFTP.
-OpenSSL releases regular updates to fix bugs, improve performance, and address security vulnerabilities
-As with any software, OpenSSL is not perfect and may contain bugs or vulnerabilities that could compromise its functionality or security. Therefore, the OpenSSL project team releases regular updates to fix these issues and improve the performance and compatibility of the library.
-Some of these updates are minor and only address minor bugs or enhancements, while others are major and address critical vulnerabilities that could expose users to attacks or data breaches. For example, in 2014, a severe vulnerability known as Heartbleed was discovered in OpenSSL, which allowed attackers to steal sensitive information from web servers and clients using a specially crafted request. This vulnerability affected millions of websites and users and required urgent patching by the OpenSSL team and the web server administrators.
-Therefore, it is important to keep OpenSSL updated to the latest version available, as it ensures that your web server and clients are protected from known threats and benefit from the latest improvements and features of the library.
-What is 1.1.1.q and what are its benefits?
-1.1.1.q is the latest version of OpenSSL 1.1.1, which is a long-term support (LTS) branch that will receive updates until 2025
-OpenSSL has two main branches of development: 1.0.2 and 1.1.1. The 1.0.2 branch is the older one and will reach its end of life (EOL) in December 2023, meaning that it will no longer receive updates or support from the OpenSSL team. The 1.1.1 branch is the newer one and will be supported until September 2025, meaning that it will continue to receive updates and fixes for bugs and vulnerabilities.
-Within each branch, there are different versions that are released periodically, with a letter suffix indicating the order of release. For example, 1.0.2a was the first version of the 1.0.2 branch, followed by 1.0.2b, 1.0.2c, and so on. Similarly, 1.1.1a was the first version of the 1.1.1 branch, followed by 1.1.1b, 1.1.1c, and so on.
-The latest version of the 1.0.2 branch is 1.0.2y, which was released in February 2023. The latest version of the 1.1.1 branch is 1.1.1.q, which was released in November 2022. Since the 1.0.2 branch will soon reach its EOL, it is recommended to upgrade to the 1.1.1 branch, which offers more features and security than the older one.
-1.1.1.q fixes a bug that caused AES OCB encryption to fail on some 32-bit x86 platforms (CVE-2022-2097)
-AES OCB is a mode of operation for AES encryption that provides both confidentiality and authenticity of data. It is faster and more efficient than other modes, such as CBC or GCM, but it requires a patent license for commercial use. OpenSSL supports AES OCB since version 1.0, but it was found that there was a bug in the implementation that caused AES OCB encryption to fail on some 32-bit x86 platforms. This bug could result in corrupted data or a denial of service attack if exploited by an attacker.
-The bug was reported in October 2022 and assigned the identifier CVE-2022-2097. It was fixed in version 1.0.y and version 3.x, but not in version 3.x. Therefore, users who are using OpenSSL on a 32-bit x86 platform with AES OCB encryption should upgrade to version 3.x or version 3.x as soon as possible to avoid this bug.
- 1.1.1.q also adds a missing header for memcmp that caused compilation failure on some platforms
-memcmp is a function that compares two blocks of memory and returns the difference between them. It is used by OpenSSL to perform various operations, such as comparing keys, hashes, or certificates. However, it was found that some platforms, such as Solaris, did not include the header file that defines memcmp, which caused compilation failure when building OpenSSL from source.
-This issue was reported in November 2022 and fixed in version 1.1.1.q by adding the missing header for memcmp. Therefore, users who are compiling OpenSSL from source on platforms that do not include the header for memcmp should upgrade to version 1.1.1.q to avoid this issue.
-How to download and install 1.1.1.q on Apache HTTPD?
-Download the binary distribution of 1.1.1.q for Windows from [9](https://kb.firedaemon.com/support/solutions/articles/4000121705-openssl-3-0-and-1-1-1-binary-distributions-for-microsoft-windows)
-If you are running Apache HTTPD on Windows, you can download the binary distribution of 1.1.1.q from [9](https://kb.firedaemon.com/support/solutions/articles/4000121705-openssl-3-0-and-1-1-1-binary-distributions-for-microsoft-windows), which is a trusted source that provides pre-compiled versions of OpenSSL for Windows. The binary distribution contains the OpenSSL executable, libraries, and configuration files that you need to run OpenSSL on your web server.
-To download the binary distribution of 1.1.1.q for Windows, follow these steps:
-
-- Go to [9](https://kb.firedaemon.com/support/solutions/articles/4000121705-openssl-3-0-and-1-1-1-binary-distributions-for-microsoft-windows) and scroll down to the section "OpenSSL 3.x and 1.1.x Binary Distributions for Microsoft Windows"
-- Click on the link "openssl-3.x.x-win64.zip" or "openssl-3.x.x-win32.zip" depending on your system architecture (64-bit or 32-bit)
-- Save the zip file to your preferred location on your computer
-- Extract the zip file using a tool such as WinZip or WinRAR
-
-Replace your existing bin directory with the one from the downloaded archive
-Once you have extracted the zip file, you will see a folder named "bin" that contains the OpenSSL files. You need to replace your existing bin directory with this one, as it contains the updated version of OpenSSL.
-To replace your existing bin directory with the one from the downloaded archive, follow these steps:
-
-- Locate your Apache HTTPD installation directory, which is usually C:\Apache24 or C:\Program Files\Apache Software Foundation\Apache2.4
-- Rename your existing bin directory to something else, such as bin.old or bin.backup
-- Copy the bin folder from the extracted zip file and paste it into your Apache HTTPD installation directory
-- Make sure that the permissions and ownership of the new bin folder are the same as the old one
-
-Restart Apache HTTPD and verify that the new version of OpenSSL is in use
-After replacing your bin directory with the new one, you need to restart your Apache HTTPD server to load the new version of OpenSSL. You can do this by using the Apache Monitor tool or by running the command "httpd -k restart" in a command prompt.
-To verify that the new version of OpenSSL is in use, you can use the command "openssl version" in a command prompt or check the error log file of your Apache HTTPD server, which is usually located in C:\Apache24\logs\error.log or C:\Program Files\Apache Software Foundation\Apache2.4\logs\error.log. You should see something like this:
- [Wed Jun 21 15:22:48 2023] [notice] Apache/2.4.51 (Win64) OpenSSL/3.x.x configured -- resuming normal operations [Wed Jun 21 15:22:48 2023] [notice] Server built: Oct 12 2022 10:23:25 [Wed Jun 21 15:22:48 2023] [notice] Parent: Created child process 1234 [Wed Jun 21 15:22:49 2023] [notice] Child 1234 : Initialized with server's OpenSSL library version [Wed Jun 21 15:22:49 2023] [notice] Child 1234: Acquired the start mutex. [Wed Jun 21 15:22:49 2023] [notice] Child 1234: Starting 64 worker threads.
-This indicates that your Apache HTTPD server is using OpenSSL 3.x.x, which is the latest version of OpenSSL 1.1.1.
-Conclusion
-OpenSSL is a vital component of web security and should be kept up to date to ensure that your web server and clients are protected from known threats and benefit from the latest improvements and features of the library. In this article, we explained what OpenSSL is, why it is important, and how to download and install the latest version of OpenSSL 1.1.1, which is 1.1.1.q, on your Apache HTTPD server.
-By following the steps we outlined, you can easily upgrade your OpenSSL version and enjoy the benefits of 1.1.1.q, such as fixing a critical bug that caused AES OCB encryption to fail on some 32-bit x86 platforms and adding a missing header for memcmp that caused compilation failure on some platforms. We hope that this article was helpful and informative for you and that you learned something new today.
-FAQs
-What is the difference between OpenSSL 3.x and OpenSSL 1.1.1?
-OpenSSL 3.x is the next major version of OpenSSL that introduces significant changes and improvements to the library, such as a new provider-based architecture, a new configuration file format, a new command-line interface, and support for new algorithms and protocols. However, OpenSSL 3.x is not fully compatible with OpenSSL 1.1.1 and may require some modifications to existing applications and configurations to work properly.
-How can I check if my web server supports HTTPS?
-One way to check if your web server supports HTTPS is to use a web browser and enter the URL of your web server with https:// prefix. For example, https://example.com. If your web server supports HTTPS, you should see a padlock icon or a green bar in the address bar of your browser, indicating that the connection is secure. You can also click on the padlock icon or the green bar to view more details about the SSL/TLS certificate and encryption used by your web server.
-How can I update OpenSSL on other platforms or web servers?
-The steps to update OpenSSL on other platforms or web servers may vary depending on the specific system and configuration you are using. However, in general, you can follow these steps:
-
-- Download the source code or binary distribution of the latest version of OpenSSL from [10](https://www.openssl.org/source/)
-- Backup your existing OpenSSL files and configuration
-- Install or compile the new version of OpenSSL according to the instructions provided by the OpenSSL team or your platform or web server vendor
-- Restart your web server and verify that the new version of OpenSSL is in use
-
-How can I test if my web server is vulnerable to Heartbleed or other OpenSSL vulnerabilities?
-There are various tools and websites that can help you test if your web server is vulnerable to Heartbleed or other OpenSSL vulnerabilities. For example, you can use [11](https://filippo.io/Heartbleed/) to test for Heartbleed, [12](https://www.ssllabs.com/ssltest/) to test for various SSL/TLS vulnerabilities, or [13](https://www.cvedetails.com/vulnerability-list/vendor_id-217/product_id-383/version_id-206279/OpenSSL-OpenSSL-1.1.1q.html) to check for known CVEs affecting OpenSSL 1.1.1.q.
-Where can I find more information about OpenSSL?
-You can find more information about OpenSSL on its official website [14](https://www.openssl.org/), where you can access its documentation, source code, news, blog, mailing lists, and forums. You can also follow its official Twitter account [15](https://twitter.com/opensslorg) for updates and announcements.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Get Attack on Titan Wings of Freedom for PS3 [Full PKG Dlc] Download.md b/spaces/congsaPfin/Manga-OCR/logs/Get Attack on Titan Wings of Freedom for PS3 [Full PKG Dlc] Download.md
deleted file mode 100644
index 362edb145c0268d746253ddeaaa4e05a5174f053..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Get Attack on Titan Wings of Freedom for PS3 [Full PKG Dlc] Download.md
+++ /dev/null
@@ -1,139 +0,0 @@
-
-Attack on Titan PS3 Download: How to Play the Epic Anime Game on Your PC
- Introduction
- If you are a fan of anime, you have probably heard of Attack on Titan, one of the most popular and acclaimed anime series of all time. Based on the manga series by Hajime Isayama, Attack on Titan is a dark fantasy story set in a post-apocalyptic world where humanity lives behind walls to protect themselves from giant humanoid creatures called Titans, who devour humans without mercy.
- Attack on Titan has been adapted into various media, including an anime television series, anime films, live-action films, video games, and more. In this article, we will focus on one of the video games based on the anime series, Attack on Titan PS3 game, and show you how to download and install it on your PC using a PS3 emulator.
-attack on titan ps3 download
DOWNLOAD –––––>>> https://urlca.com/2uOgaj
- What is Attack on Titan?
- Attack on Titan is a Japanese manga series written and illustrated by Hajime Isayama. It began serialization in Kodansha's Bessatsu Shōnen Magazine in September 2009 and has been collected into 34 volumes as of June 2021. The manga has over 100 million copies in circulation worldwide and has won several awards, such as the Kodansha Manga Award, the Micheluzzi Award, and the Harvey Award.
- The manga follows the story of Eren Yeager, a young boy who lives in Shiganshina District, a town located on the outermost wall that protects humanity from the Titans. One day, a Colossal Titan breaches the wall and allows other Titans to invade the town, killing Eren's mother and many others. Eren vows to join the Survey Corps, an elite military unit that fights against the Titans outside the walls, and kill every single Titan he sees.
- The manga has been adapted into an anime television series by Wit Studio (seasons 1-3) and MAPPA (season 4). The first season aired from April to September 2013, the second season from April to June 2017, the third season from July 2018 to July 2019, and the fourth season from December 2020 to March 2021. A second part of the fourth season is scheduled to air in January 2022. The anime series has received critical acclaim for its animation, music, voice acting, story, and themes. It has also been a commercial success, selling millions of DVDs and Blu-rays, as well as merchandise and streaming rights.
- What is Attack on Titan PS3 game?
- Attack on Titan PS3 game is an action hack and slash video game based on the anime series of the same name. It was developed by Omega Force and published by Koei Tecmo for PlayStation 3, PlayStation 4, PlayStation Vita, Xbox One, and Microsoft Windows. It was released in Japan in February 2016 and in North America and Europe in August 2016.
- The game retells key moments from the anime series (chapters 1-33 of the manga), as well as some original scenarios involving the main characters. The game features ten playable characters: Eren Yeager, Mikasa Ackerman, Armin Arlert, Levi Ackerman, Hange Zoe, Erwin Smith, Jean Kirstein, Connie Springer, Sasha Braus, and Krista Lenz. The game also allows the player to create their own custom character and join the Survey Corps. The game also features online multiplayer modes, such as co-op missions and competitive modes.
- The gameplay of Attack on Titan PS3 game is similar to other hack and slash games by Omega Force, such as Dynasty Warriors and Samurai Warriors. The player controls a character who can use a device called the Omni-Directional Mobility Gear (ODM Gear) to move around the 3D environments and fight against the Titans. The ODM Gear allows the player to grapple onto buildings, trees, and other objects, as well as launch themselves into the air and perform aerial attacks. The player can also use blades, guns, and explosives to damage the Titans' weak points, such as their napes and limbs. The player can also transform into a Titan if they play as Eren or use a special item.
- The game received mixed to positive reviews from critics and fans. The game was praised for its faithful adaptation of the anime series, its fast-paced and exhilarating gameplay, its graphics and sound effects, and its replay value. However, the game was also criticized for its repetitive missions, its lack of variety in enemies and environments, its technical issues and glitches, and its difficulty spikes. The game sold over a million copies worldwide as of December 2016.
-attack on titan wings of freedom ps3 download
-how to download attack on titan game on ps3
-attack on titan ps3 iso download
-attack on titan ps3 free download
-attack on titan ps3 digital download
-attack on titan ps3 game download english
-attack on titan ps3 pkg download
-attack on titan ps3 dlc download
-attack on titan ps3 rom download
-attack on titan ps3 full game download
-attack on titan ps3 download size
-attack on titan ps3 download link
-attack on titan ps3 download mega
-attack on titan ps3 download google drive
-attack on titan ps3 download mediafire
-attack on titan ps3 download torrent
-attack on titan ps3 download reddit
-attack on titan ps3 download no survey
-attack on titan ps3 download highly compressed
-attack on titan ps3 download cfw
-attack on titan ps3 download hen
-attack on titan ps3 download han
-attack on titan ps3 download jailbreak
-attack on titan ps3 download multiman
-attack on titan ps3 download rpcs3
-attack on titan ps3 emulator download
-attack on titan ps3 game free download for pc
-attack on titan ps3 game free download for android
-attack on titan ps3 game online download
-attack on titan ps3 game update download
-attack on titan ps3 game save data download
-attack on titan wings of freedom ps3 iso download
-how to install attack on titan wings of freedom on ps3
-how to play attack on titan wings of freedom on ps3
-how to update attack on titan wings of freedom on ps3
-how to get dlc for attack on titan wings of freedom on ps3
-how to unlock all characters in attack on titan wings of freedom on ps3
-how to change language in attack on titan wings of freedom on ps3
-how to fix lag in attack on titan wings of freedom on ps3
-how to mod attack on titan wings of freedom on ps3
-how to co op in attack on titan wings of freedom on ps3
-how to split screen in attack on titan wings of freedom on ps3
-how to use cheats in attack on titan wings of freedom on ps3
-how to transfer save data from pc to ps3 for attack on titan wings of freedom
-best settings for rpcs3 for attack on titan wings of freedom
-best controller for playing attack on titan wings of freedom
-best tips and tricks for playing attack on titan wings of freedom
-best characters and weapons for playing attack on titan wings of freedom
-best missions and modes for playing attack on titan wings of freedom
- Why play Attack on Titan PS3 game on PC?
- While Attack on Titan PS3 game is available for various platforms, some players may prefer to play it on PC for various reasons. Here are some of the benefits of playing Attack on Titan PS3 game on PC:
-
-- Better performance: Playing Attack on Titan PS3 game on PC can offer better performance than playing it on PS3 or other consoles. PC players can enjoy higher resolutions, smoother frame rates, faster loading times, and more graphical options. PC players can also use mods and cheats to enhance their gaming experience.
-- More convenience: Playing Attack on Titan PS3 game on PC can be more convenient than playing it on PS3 or other consoles. PC players do not need to buy or own a PS3 or other consoles, nor do they need to buy or insert physical discs. PC players can simply download and install the game on their computers and play it anytime they want.
-- More compatibility: Playing Attack on Titan PS3 game on PC can be more compatible than playing it on PS3 or other consoles. PC players can use various controllers, keyboards, mice, and other input devices to play the game. PC players can also use various software and hardware to record, stream, or share their gameplay with others.
-
- Of course, playing Attack on Titan PS3 game on PC also has some drawbacks, such as requiring a powerful PC to run the game smoothly, needing a reliable internet connection to download the game and access online features, risking malware or viruses from downloading unofficial sources, and facing legal issues from pirating the game. Therefore, playing Attack on Titan PS3 game on PC is not for everyone, but it can be a great option for some players who want to enjoy the epic anime game on their computers.
- How to download and install Attack on Titan PS3 game on PC
- If you are interested in playing Attack on Titan PS3 game on PC, you will need two things: a PS3 emulator for PC and the Attack on Titan PS3 game file for PC. In this section, we will show you how to download and install both of them step by step.
- Step 1: Download and install a PS3 emulator for PC
- A PS3 emulator is a software that allows you to run PS3 games on your PC by simulating the PS3 hardware and software. There are several PS3 emulators available for PC, but we recommend using RPCS3, which is the best and most popular one.
- What is RPCS3?
- RPCS3 is an open-source PS3 emulator for Windows, Linux, and BSD. It was first released in May 2011 by a team of programmers and hackers who wanted to create a free and accessible way to play PS3 games on PC. Since then, RPCS3 has been constantly updated and improved by its developers and contributors, making it one of the most advanced and compatible PS3 emulators in the world.
- RPCS3 can run over 5000 PS3 games at various levels of compatibility, ranging from playable to ingame to loadable to nothing. Some of the most popular games that RPCS3 can run include Persona 5, Demon's Souls, The Last of Us, God of War III, Uncharted 2: Among Thieves, Red Dead Redemption, and many more. RPCS3 can also run PS3 homebrews, demos, and PSN games. RPCS3 supports various features, such as high resolutions, custom configurations, save states, trophies, online multiplayer, and more.
- Which PS3 emulator to choose?
- As mentioned before, there are several PS3 emulators available for PC, but we recommend using RPCS3 for the following reasons:
-
-- Compatibility: RPCS3 is the most compatible PS3 emulator for PC, as it can run over 5000 PS3 games at various levels of playability. RPCS3 is also compatible with most PC hardware and software, as it supports Windows, Linux, and BSD operating systems, as well as various CPU and GPU architectures.
-- Performance: RPCS3 is the most performant PS3 emulator for PC, as it can run PS3 games at high resolutions, frame rates, and quality settings. RPCS3 also has various optimization options and features that can improve the performance of the games, such as Vulkan and DirectX 12 renderers, asynchronous shader compilation, thread scheduler, resolution scaling, and more.
-- Usability: RPCS3 is the most user-friendly PS3 emulator for PC, as it has a simple and intuitive interface that allows the user to easily manage their games, settings, and updates. RPCS3 also has a comprehensive wiki and a helpful community that can provide support and guidance to the user.
-- Legality: RPCS3 is the most legal PS3 emulator for PC, as it does not use any proprietary code or files from Sony or other parties. RPCS3 is an open-source project that is developed by volunteers who do not profit from their work. RPCS3 also does not distribute any PS3 games or firmware files, but rather requires the user to obtain them legally from their own PS3 console or other sources.
-
- Therefore, we believe that RPCS3 is the best PS3 emulator for PC and the best choice for playing Attack on Titan PS3 game on PC.
- How to download and install RPCS3, the best PS3 emulator for PC
- To download and install RPCS3 on your PC, you need to follow these steps:
-
-- Download the latest version of RPCS3: You can download the latest version of RPCS3 from its official website: https://rpcs3.net/download. You can choose between the Windows or Linux version depending on your operating system. You will get a ZIP file that contains the RPCS3 executable and other files.
-- Extract the ZIP file: You need to extract the ZIP file to a folder of your choice using a program like WinRAR or 7-Zip. You can name the folder anything you want, but we suggest naming it "RPCS3" for convenience. You should see a folder that contains the rpcs3.exe file and other files.
-- Run the rpcs3.exe file: You need to run the rpcs3.exe file by double-clicking on it or right-clicking on it and choosing "Run as administrator". You will see a window that shows the RPCS3 logo and some information. You may also see a pop-up window that asks you to install some prerequisites, such as Microsoft Visual C++ Redistributable or DirectX Runtime. You need to install them if you don't have them already.
-- Update RPCS3: You need to update RPCS3 to the latest version by clicking on "File" and then "Check for updates". You will see a pop-up window that shows you if there are any updates available. If there are, you need to click on "Yes" to download and install them. You may need to restart RPCS3 after updating.
-- Configure RPCS3: You need to configure RPCS3 to suit your preferences and system specifications by clicking on "Config" and then "Settings". You will see a window that shows you various tabs with different options. You can change the settings according to your needs, but we recommend following these general guidelines:
-
-- CPU: You can leave most of the settings in this tab as default, but you may want to enable "Thread Scheduler" if you have an AMD CPU or disable "SPU Loop Detection" if you encounter any issues with some games.
-- GPU: You can choose between Vulkan or DirectX 12 as your renderer depending on your GPU model and driver. Vulkan is usually faster and more stable than DirectX 12, but some games may work better with DirectX 12. You can also enable "Resolution Scale" to increase the resolution of the games, but this may affect the performance and stability of the emulator. You can also enable "Write Color Buffers" and "Write Depth Buffer" if some games require them to display graphics correctly.
-- Audio: You can choose between XAudio2 or OpenAL as your audio backend depending on your sound card and driver. XAudio2 is usually more compatible and stable than OpenAL, but some games may sound better with OpenAL. You can also adjust the volume and latency of the audio according to your preference.
-- Input/Output: You can configure your input devices, such as controllers, keyboards, mice, and other peripherals, by clicking on "Pads". You can choose between Keyboard, DualShock 3, DualShock 4, or XInput as your handler depending on your device type and driver. You can also map the buttons and axes of your device to the PS3 controller buttons and axes by clicking on the corresponding icons.
-- System: You can change the language and theme of the RPCS3 interface by clicking on "Language" and "Theme". You can also enable or disable some system features, such as "Enable Host Root", "Enable /host_root/", "Enable /dev_hdd1/", or "Automatically start games after boot". These features may affect the compatibility and functionality of some games, so you should only change them if you know what you are doing.
-- Network: You can enable or disable the network features of RPCS3 by clicking on "Network". You can choose between "Disconnected", "Connected", or "RPCN" as your network status depending on your internet connection and preference. RPCN is a custom online service that allows you to play some PS3 games online with other RPCS3 users. You need to create an account and configure some settings to use RPCN. You can find more information about RPCN here: https://wiki.rpcs3.net/index.php?title=Help:RPCN.
-
-- Save the settings: After you have configured RPCS3 to your liking, you need to save the settings by clicking on "Save" or "OK". You can also reset the settings to default by clicking on "Reset".
-
- Congratulations! You have successfully downloaded and installed RPCS3, the best PS3 emulator for PC. Now you are ready to download and install Attack on Titan PS3 game for PC using RPCS3.
- Step 2: Download and install Attack on Titan PS3 game for PC
- To download and install Attack on Titan PS3 game for PC, you need to follow these steps:
-
-- Download the Attack on Titan PS3 game file for PC: You need to download the Attack on Titan PS3 game file for PC from a reliable source. The game file is usually in the form of an ISO, PKG, or EBOOT file that contains the game data. You can find various websites that offer PS3 game files for PC, but you need to be careful as some of them may contain malware or viruses. You also need to make sure that the game file is compatible with RPCS3 and has a good reputation among other users. We recommend using this website as a source: https://downloadgameps3.com/attack-on-titan-ps3-iso/. This website provides a safe and verified link to download the Attack on Titan PS3 game file for PC in ISO format.
-- Extract the ISO file: After you have downloaded the Attack on Titan PS3 game file for PC in ISO format, you need to extract it to a folder of your choice using a program like WinRAR or 7-Zip. You should see a folder that contains a file named "BLJM61322.iso" and other files.
-- Install the Attack on Titan PS3 game file for PC using RPCS3: After you have extracted the ISO file, you need to install it using RPCS3 by following these steps:
-
-- Run RPCS3: You need to run RPCS3 by double-clicking on the rpcs3.exe file or right-clicking on it and choosing "Run as administrator". You will see a window that shows the RPCS3 logo and some information.
-- Add the ISO file to RPCS3: You need to add the ISO file to RPCS3 by clicking on "File" and then "Install .pkg / .rap". You will see a window that shows your computer's files and folders. You need to navigate to the folder where you extracted the ISO file and select the file named "BLJM61322.iso". You will see a pop-up window that shows the progress of the installation. You need to wait until the installation is complete.
-- Launch the game: After the installation is complete, you will see the game icon on the RPCS3 main window under the "Games" tab. You need to double-click on the game icon or right-click on it and choose "Boot". You will see a window that shows the game logo and some information. You may also see some messages or warnings on the RPCS3 log console, but you can ignore them as long as they are not fatal errors. You need to wait until the game loads and starts.
-
-
- Congratulations! You have successfully downloaded and installed Attack on Titan PS3 game for PC using RPCS3. Now you can enjoy playing the epic anime game on your PC with better performance, convenience, and compatibility.
- Conclusion
- In this article, we have shown you how to download and install Attack on Titan PS3 game for PC using RPCS3, the best PS3 emulator for PC. We have explained what Attack on Titan is, what Attack on Titan PS3 game is, why play Attack on Titan PS3 game on PC, how to download and install RPCS3, and how to download and install Attack on Titan PS3 game for PC. We hope that this article has been helpful and informative for you and that you have learned something new and useful from it.
- If you are a fan of anime and video games, you should definitely try playing Attack on Titan PS3 game on PC using RPCS3. It is a fun and exciting way to experience the thrilling story and action of Attack on Titan in a different medium. You will be able to play as your favorite characters, fight against the terrifying Titans, and explore the vast and beautiful world of Attack on Titan. You will also be able to enjoy the game with better graphics, sound, and performance than playing it on PS3 or other consoles.
- So what are you waiting for? Download and install Attack on Titan PS3 game for PC using RPCS3 today and join the fight for humanity's survival!
- FAQs
- Here are some of the frequently asked questions about playing Attack on Titan PS3 game on PC using RPCS3:
-
-- Is it legal to play Attack on Titan PS3 game on PC using RPCS3?
-It depends on where you live and how you obtain the game file. In general, it is legal to play Attack on Titan PS3 game on PC using RPCS3 if you own a legitimate copy of the game and a PS3 console. However, some countries may have different laws regarding emulation and piracy, so you should check your local laws before downloading or playing any PS3 games on PC using RPCS3.
-- Is it safe to play Attack on Titan PS3 game on PC using RPCS3?
-It is safe to play Attack on Titan PS3 game on PC using RPCS3 if you download and install the emulator and the game file from reliable sources. However, some websites may offer fake or malicious files that can harm your computer or steal your personal information. Therefore, you should always be careful when downloading or installing any files from unknown or untrusted sources.
-- Is it free to play Attack on Titan PS3 game on PC using RPCS3?
-It is free to play Attack on Titan PS3 game on PC using RPCS3 if you already own a legitimate copy of the game and a PS3 console. However, if you do not own them, you will need to buy them or find other ways to obtain them legally. You should not download or play any pirated or illegal copies of the game or any other PS3 games on PC using RPCS3.
-- How long does it take to download and install Attack on Titan PS3 game for PC using RPCS3?
-It depends on your internet speed and computer specifications. In general, it takes about 15 minutes to download the ISO file of Attack on Titan PS3 game for PC (about 12 GB) and about 5 minutes to install it using RPCS3. However, this may vary depending on your internet connection, download source, extraction program, installation process, and other factors.
-- Can I play other PS3 games on PC using RPCS3?
-Yes, you can play other PS3 games on PC using RPCS3 as long as they are compatible with the emulator. You can check the compatibility list of RPCS3 here https://rpcs3.net/compatibility. You can also request new games to be added to the compatibility list by following the instructions here: https://wiki.rpcs3.net/index.php?title=Help:Game_Patches. You can also find various guides and tutorials on how to play different PS3 games on PC using RPCS3 here: https://wiki.rpcs3.net/index.php?title=Help:Gameplay_Guides_and_Tutorials.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Evermotion Archmodels Vol 125 Torrentrar.md b/spaces/contluForse/HuggingGPT/assets/Evermotion Archmodels Vol 125 Torrentrar.md
deleted file mode 100644
index cfc0671390dc22b5428c7fd8c87c5f6a7800e79c..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Evermotion Archmodels Vol 125 Torrentrar.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-the technologies and materials used in this model are the same as the previous archmodels in the series. the materials are mostly the same as the standard archmodel materials, but there are a few new materials. the default shading layer in the archmodels’ 125 is different from the default shading layer in the emc-105, which is why there are new shading layers in the head.
-Evermotion Archmodels Vol 125 Torrentrar
Download Zip … https://ssurll.com/2uzvUS
-this is evermotion’s latest archmodel, as the 124th model in their archmodel series. the model uses the head from the evermotion’s emc-115; it is the archmodel version of the default head. the model itself is mostly similar to the previous archmodel in the series, but the head is changed and the eyes, glasses, and ears are given different shader materials. the model also comes with its own default shading layer, and a new material for the brim of the hat.
-dies ist ein fertiges evermotion archmodels vol 125 gesamtbild. dieses archmodels vol 125 bild enthält alle von evermotion frei geschriebenen charaktere und nebenbei eine störkende medizin und ähnliche charakterinnen und charakter. es ist von einer standardmäßig druckbereiten druckfassung geschrieben und erstellt mit hilfe von evermotion archmodels und einem pc (noch nicht empfohlen).
-zur anwendung sind allerdings einige unterstützungen erforderlich. es reicht, wenn die drucker noch vor der anwendung dieses archmodels vol 125 eine windows-version unter windows xp / windows 2000 / windows nt/2000 verwenden.
-
-hierdurch wird es dann für die anwendung erforderlich sein, das archmodels vol 125 in ein zip-archiv einzutügen. dieses archiv dann unter windows unter windows xp / windows 2000 / windows nt / 2000 in den programmordner des druckers anzulegen. dann kann das drucken dann mit dem archmodel vol 125 beginnen. (vermutlich wird es auch aus anderen pc-programmen durch z.b. paint tool sai-programmen können drucken werden. aber diesen erfahren sie für einiges besseres hier auch.)
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/openpose/body.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/openpose/body.py
deleted file mode 100644
index 025783a90f2075ab90067dedf18b375ffb52b32e..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/openpose/body.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import cv2
-import numpy as np
-import math
-import time
-from scipy.ndimage.filters import gaussian_filter
-import matplotlib.pyplot as plt
-import matplotlib
-import torch
-from torchvision import transforms
-
-from . import util
-from .model import bodypose_model
-
-class Body(object):
- def __init__(self, model_path):
- self.model = bodypose_model()
- if torch.cuda.is_available():
- self.model = self.model.cuda()
- print('cuda')
- model_dict = util.transfer(self.model, torch.load(model_path))
- self.model.load_state_dict(model_dict)
- self.model.eval()
-
- def __call__(self, oriImg):
- # scale_search = [0.5, 1.0, 1.5, 2.0]
- scale_search = [0.5]
- boxsize = 368
- stride = 8
- padValue = 128
- thre1 = 0.1
- thre2 = 0.05
- multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
- heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
- paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
-
- for m in range(len(multiplier)):
- scale = multiplier[m]
- imageToTest = util.smart_resize_k(oriImg, fx=scale, fy=scale)
- imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
- im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
- im = np.ascontiguousarray(im)
-
- data = torch.from_numpy(im).float()
- if torch.cuda.is_available():
- data = data.cuda()
- # data = data.permute([2, 0, 1]).unsqueeze(0).float()
- with torch.no_grad():
- Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
- Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
- Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
-
- # extract outputs, resize, and remove padding
- # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
- heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
- heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride)
- heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
- heatmap = util.smart_resize(heatmap, (oriImg.shape[0], oriImg.shape[1]))
-
- # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
- paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
- paf = util.smart_resize_k(paf, fx=stride, fy=stride)
- paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
- paf = util.smart_resize(paf, (oriImg.shape[0], oriImg.shape[1]))
-
- heatmap_avg += heatmap_avg + heatmap / len(multiplier)
- paf_avg += + paf / len(multiplier)
-
- all_peaks = []
- peak_counter = 0
-
- for part in range(18):
- map_ori = heatmap_avg[:, :, part]
- one_heatmap = gaussian_filter(map_ori, sigma=3)
-
- map_left = np.zeros(one_heatmap.shape)
- map_left[1:, :] = one_heatmap[:-1, :]
- map_right = np.zeros(one_heatmap.shape)
- map_right[:-1, :] = one_heatmap[1:, :]
- map_up = np.zeros(one_heatmap.shape)
- map_up[:, 1:] = one_heatmap[:, :-1]
- map_down = np.zeros(one_heatmap.shape)
- map_down[:, :-1] = one_heatmap[:, 1:]
-
- peaks_binary = np.logical_and.reduce(
- (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
- peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
- peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
- peak_id = range(peak_counter, peak_counter + len(peaks))
- peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
-
- all_peaks.append(peaks_with_score_and_id)
- peak_counter += len(peaks)
-
- # find connection in the specified sequence, center 29 is in the position 15
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
- [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
- [1, 16], [16, 18], [3, 17], [6, 18]]
- # the middle joints heatmap correpondence
- mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
- [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
- [55, 56], [37, 38], [45, 46]]
-
- connection_all = []
- special_k = []
- mid_num = 10
-
- for k in range(len(mapIdx)):
- score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
- candA = all_peaks[limbSeq[k][0] - 1]
- candB = all_peaks[limbSeq[k][1] - 1]
- nA = len(candA)
- nB = len(candB)
- indexA, indexB = limbSeq[k]
- if (nA != 0 and nB != 0):
- connection_candidate = []
- for i in range(nA):
- for j in range(nB):
- vec = np.subtract(candB[j][:2], candA[i][:2])
- norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
- norm = max(0.001, norm)
- vec = np.divide(vec, norm)
-
- startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
- np.linspace(candA[i][1], candB[j][1], num=mid_num)))
-
- vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
- for I in range(len(startend))])
- vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
- for I in range(len(startend))])
-
- score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
- score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
- 0.5 * oriImg.shape[0] / norm - 1, 0)
- criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
- criterion2 = score_with_dist_prior > 0
- if criterion1 and criterion2:
- connection_candidate.append(
- [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
-
- connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
- connection = np.zeros((0, 5))
- for c in range(len(connection_candidate)):
- i, j, s = connection_candidate[c][0:3]
- if (i not in connection[:, 3] and j not in connection[:, 4]):
- connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
- if (len(connection) >= min(nA, nB)):
- break
-
- connection_all.append(connection)
- else:
- special_k.append(k)
- connection_all.append([])
-
- # last number in each row is the total parts number of that person
- # the second last number in each row is the score of the overall configuration
- subset = -1 * np.ones((0, 20))
- candidate = np.array([item for sublist in all_peaks for item in sublist])
-
- for k in range(len(mapIdx)):
- if k not in special_k:
- partAs = connection_all[k][:, 0]
- partBs = connection_all[k][:, 1]
- indexA, indexB = np.array(limbSeq[k]) - 1
-
- for i in range(len(connection_all[k])): # = 1:size(temp,1)
- found = 0
- subset_idx = [-1, -1]
- for j in range(len(subset)): # 1:size(subset,1):
- if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
- subset_idx[found] = j
- found += 1
-
- if found == 1:
- j = subset_idx[0]
- if subset[j][indexB] != partBs[i]:
- subset[j][indexB] = partBs[i]
- subset[j][-1] += 1
- subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
- elif found == 2: # if found 2 and disjoint, merge them
- j1, j2 = subset_idx
- membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
- if len(np.nonzero(membership == 2)[0]) == 0: # merge
- subset[j1][:-2] += (subset[j2][:-2] + 1)
- subset[j1][-2:] += subset[j2][-2:]
- subset[j1][-2] += connection_all[k][i][2]
- subset = np.delete(subset, j2, 0)
- else: # as like found == 1
- subset[j1][indexB] = partBs[i]
- subset[j1][-1] += 1
- subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
-
- # if find no partA in the subset, create a new subset
- elif not found and k < 17:
- row = -1 * np.ones(20)
- row[indexA] = partAs[i]
- row[indexB] = partBs[i]
- row[-1] = 2
- row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
- subset = np.vstack([subset, row])
- # delete some rows of subset which has few parts occur
- deleteIdx = []
- for i in range(len(subset)):
- if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
- deleteIdx.append(i)
- subset = np.delete(subset, deleteIdx, axis=0)
-
- # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
- # candidate: x, y, score, id
- return candidate, subset
-
-if __name__ == "__main__":
- body_estimation = Body('../model/body_pose_model.pth')
-
- test_image = '../images/ski.jpg'
- oriImg = cv2.imread(test_image) # B,G,R order
- candidate, subset = body_estimation(oriImg)
- canvas = util.draw_bodypose(oriImg, candidate, subset)
- plt.imshow(canvas[:, :, [2, 1, 0]])
- plt.show()
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py
deleted file mode 100644
index 333280c5947066fd3c7ebcfe302a0e7ad65480d5..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/dnl_head.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import torch
-from annotator.uniformer.mmcv.cnn import NonLocal2d
-from torch import nn
-
-from ..builder import HEADS
-from .fcn_head import FCNHead
-
-
-class DisentangledNonLocal2d(NonLocal2d):
- """Disentangled Non-Local Blocks.
-
- Args:
- temperature (float): Temperature to adjust attention. Default: 0.05
- """
-
- def __init__(self, *arg, temperature, **kwargs):
- super().__init__(*arg, **kwargs)
- self.temperature = temperature
- self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1)
-
- def embedded_gaussian(self, theta_x, phi_x):
- """Embedded gaussian with temperature."""
-
- # NonLocal2d pairwise_weight: [N, HxW, HxW]
- pairwise_weight = torch.matmul(theta_x, phi_x)
- if self.use_scale:
- # theta_x.shape[-1] is `self.inter_channels`
- pairwise_weight /= theta_x.shape[-1]**0.5
- pairwise_weight /= self.temperature
- pairwise_weight = pairwise_weight.softmax(dim=-1)
- return pairwise_weight
-
- def forward(self, x):
- # x: [N, C, H, W]
- n = x.size(0)
-
- # g_x: [N, HxW, C]
- g_x = self.g(x).view(n, self.inter_channels, -1)
- g_x = g_x.permute(0, 2, 1)
-
- # theta_x: [N, HxW, C], phi_x: [N, C, HxW]
- if self.mode == 'gaussian':
- theta_x = x.view(n, self.in_channels, -1)
- theta_x = theta_x.permute(0, 2, 1)
- if self.sub_sample:
- phi_x = self.phi(x).view(n, self.in_channels, -1)
- else:
- phi_x = x.view(n, self.in_channels, -1)
- elif self.mode == 'concatenation':
- theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
- phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
- else:
- theta_x = self.theta(x).view(n, self.inter_channels, -1)
- theta_x = theta_x.permute(0, 2, 1)
- phi_x = self.phi(x).view(n, self.inter_channels, -1)
-
- # subtract mean
- theta_x -= theta_x.mean(dim=-2, keepdim=True)
- phi_x -= phi_x.mean(dim=-1, keepdim=True)
-
- pairwise_func = getattr(self, self.mode)
- # pairwise_weight: [N, HxW, HxW]
- pairwise_weight = pairwise_func(theta_x, phi_x)
-
- # y: [N, HxW, C]
- y = torch.matmul(pairwise_weight, g_x)
- # y: [N, C, H, W]
- y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
- *x.size()[2:])
-
- # unary_mask: [N, 1, HxW]
- unary_mask = self.conv_mask(x)
- unary_mask = unary_mask.view(n, 1, -1)
- unary_mask = unary_mask.softmax(dim=-1)
- # unary_x: [N, 1, C]
- unary_x = torch.matmul(unary_mask, g_x)
- # unary_x: [N, C, 1, 1]
- unary_x = unary_x.permute(0, 2, 1).contiguous().reshape(
- n, self.inter_channels, 1, 1)
-
- output = x + self.conv_out(y + unary_x)
-
- return output
-
-
-@HEADS.register_module()
-class DNLHead(FCNHead):
- """Disentangled Non-Local Neural Networks.
-
- This head is the implementation of `DNLNet
- `_.
-
- Args:
- reduction (int): Reduction factor of projection transform. Default: 2.
- use_scale (bool): Whether to scale pairwise_weight by
- sqrt(1/inter_channels). Default: False.
- mode (str): The nonlocal mode. Options are 'embedded_gaussian',
- 'dot_product'. Default: 'embedded_gaussian.'.
- temperature (float): Temperature to adjust attention. Default: 0.05
- """
-
- def __init__(self,
- reduction=2,
- use_scale=True,
- mode='embedded_gaussian',
- temperature=0.05,
- **kwargs):
- super(DNLHead, self).__init__(num_convs=2, **kwargs)
- self.reduction = reduction
- self.use_scale = use_scale
- self.mode = mode
- self.temperature = temperature
- self.dnl_block = DisentangledNonLocal2d(
- in_channels=self.channels,
- reduction=self.reduction,
- use_scale=self.use_scale,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- mode=self.mode,
- temperature=self.temperature)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- output = self.convs[0](x)
- output = self.dnl_block(output)
- output = self.convs[1](output)
- if self.concat_input:
- output = self.conv_cat(torch.cat([x, output], dim=1))
- output = self.cls_seg(output)
- return output
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/diml_outdoor_test.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/diml_outdoor_test.py
deleted file mode 100644
index 8670b48f5febafb819dac22848ad79ccb5dd5ae4..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/diml_outdoor_test.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# MIT License
-
-# Copyright (c) 2022 Intelligent Systems Lab Org
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-# File author: Shariq Farooq Bhat
-
-import os
-
-import numpy as np
-import torch
-from PIL import Image
-from torch.utils.data import DataLoader, Dataset
-from torchvision import transforms
-
-
-class ToTensor(object):
- def __init__(self):
- # self.normalize = transforms.Normalize(
- # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
- self.normalize = lambda x : x
-
- def __call__(self, sample):
- image, depth = sample['image'], sample['depth']
- image = self.to_tensor(image)
- image = self.normalize(image)
- depth = self.to_tensor(depth)
-
- return {'image': image, 'depth': depth, 'dataset': "diml_outdoor"}
-
- def to_tensor(self, pic):
-
- if isinstance(pic, np.ndarray):
- img = torch.from_numpy(pic.transpose((2, 0, 1)))
- return img
-
- # # handle PIL Image
- if pic.mode == 'I':
- img = torch.from_numpy(np.array(pic, np.int32, copy=False))
- elif pic.mode == 'I;16':
- img = torch.from_numpy(np.array(pic, np.int16, copy=False))
- else:
- img = torch.ByteTensor(
- torch.ByteStorage.from_buffer(pic.tobytes()))
- # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
- if pic.mode == 'YCbCr':
- nchannel = 3
- elif pic.mode == 'I;16':
- nchannel = 1
- else:
- nchannel = len(pic.mode)
- img = img.view(pic.size[1], pic.size[0], nchannel)
-
- img = img.transpose(0, 1).transpose(0, 2).contiguous()
- if isinstance(img, torch.ByteTensor):
- return img.float()
- else:
- return img
-
-
-class DIML_Outdoor(Dataset):
- def __init__(self, data_dir_root):
- import glob
-
- # image paths are of the form /{outleft, depthmap}/*.png
- self.image_files = glob.glob(os.path.join(
- data_dir_root, "*", 'outleft', '*.png'))
- self.depth_files = [r.replace("outleft", "depthmap")
- for r in self.image_files]
- self.transform = ToTensor()
-
- def __getitem__(self, idx):
- image_path = self.image_files[idx]
- depth_path = self.depth_files[idx]
-
- image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
- depth = np.asarray(Image.open(depth_path),
- dtype='uint16') / 1000.0 # mm to meters
-
- # depth[depth > 8] = -1
- depth = depth[..., None]
-
- sample = dict(image=image, depth=depth, dataset="diml_outdoor")
-
- # return sample
- return self.transform(sample)
-
- def __len__(self):
- return len(self.image_files)
-
-
-def get_diml_outdoor_loader(data_dir_root, batch_size=1, **kwargs):
- dataset = DIML_Outdoor(data_dir_root)
- return DataLoader(dataset, batch_size, **kwargs)
-
-# get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/HR")
-# get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/LR")
diff --git a/spaces/crazyjetsai/finetuneai/app.py b/spaces/crazyjetsai/finetuneai/app.py
deleted file mode 100644
index 1b623130ff67c96d06160a49f03eec2a13bd1e43..0000000000000000000000000000000000000000
--- a/spaces/crazyjetsai/finetuneai/app.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-
-def generate(text,the_model,max_length,temperature,num_beams,top_k,top_p,repetition_penalty):
- generator = pipeline('text-generation', model=the_model)
- result = generator(text, num_return_sequences=3,
- max_length=max_length,
- temperature=temperature,
- num_beams=num_beams,
- top_k=top_k,
- top_p=top_p,
- repetition_penalty = repetition_penalty,
- no_repeat_ngram_size=2,early_stopping=False)
- return result[0]["generated_text"],result[1]["generated_text"],result[2]["generated_text"]
-
-demo = gr.Interface(
- fn=generate,
- inputs=[
- gr.Textbox(lines=5, label="Input Text"),
- gr.Dropdown(choices=['gpt2','gpt2-medium','gpt2-large','gpt2-xl'],value = 'gpt2',label="Choose model"),
- gr.Slider(value=50,label="Max Length",minimum=1,maximum=1000),
- gr.Slider(value=1.0,label="Temperature",minimum=0.0,maximum=1.0,step=0.05),
- gr.Slider(value=4,label="Num Beams",minimum=2,maximum=6,step=1),
- gr.Slider(value=90,label="Top-k",minimum=0,maximum=100),
- gr.Slider(value=0.9,label="Top-p",minimum=0.1,maximum=1,step=0.05),
- gr.Slider(value=1.1,label="Repetition penalty",minimum=0.2,maximum=2,step=0.1)
-
- ],
- outputs=[
- gr.Textbox(label="Generated Text 1"),
- gr.Textbox(label="Generated Text 2"),
- gr.Textbox(label="Generated Text 3")],
- title = "Text Generator GPT2 Pipeline",
- description = "Text Generator. \n Temperature control randomness, lowering results in less random completions. As approach the zero, the model becomes more repetitive."
-)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/crobbi/LipNet/utils.py b/spaces/crobbi/LipNet/utils.py
deleted file mode 100644
index ec3c896e207ae8267099bd5df863bf47c73d7ab9..0000000000000000000000000000000000000000
--- a/spaces/crobbi/LipNet/utils.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import tensorflow as tf
-from typing import List
-import numpy as np
-import cv2
-import os
-
-vocab = [x for x in "abcdefghijklmnopqrstuvwxyz'?!123456789 "]
-char_to_num = tf.keras.layers.StringLookup(vocabulary=vocab, oov_token="")
-# Mapping integers back to original characters
-num_to_char = tf.keras.layers.StringLookup(
- vocabulary=char_to_num.get_vocabulary(), oov_token="", invert=True
-)
-
-def load_video(path:str) -> List[float]:
- #print(path)
- cap = cv2.VideoCapture(path)
- frames = []
- for _ in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
- ret, frame = cap.read()
- frame = tf.image.rgb_to_grayscale(frame)
- frames.append(frame[190:236,80:220,:])
- cap.release()
-
- mean = tf.math.reduce_mean(frames)
- std = tf.math.reduce_std(tf.cast(frames, tf.float32))
- return tf.cast((frames - mean), tf.float32) / std
-
-def load_alignments(path:str) -> List[str]:
- #print(path)
- with open(path, 'r') as f:
- lines = f.readlines()
- tokens = []
- for line in lines:
- line = line.split()
- if line[2] != 'sil':
- tokens = [*tokens,' ',line[2]]
- return char_to_num(tf.reshape(tf.strings.unicode_split(tokens, input_encoding='UTF-8'), (-1)))[1:]
-
-def load_data(path: str):
- path = bytes.decode(path.numpy())
- print(path)
- file_name = path.split('/')[-1].split('.')[0]
- # File name splitting for windows
- # file_name = path.split('\\')[-1].split('.')[0]
- video_path = os.path.join('data','s1',f'{file_name}.mpg')
- alignment_path = os.path.join('data','alignments','s1',f'{file_name}.align')
- frames = load_video(video_path)
- print(frames.shape)
- alignments = load_alignments(alignment_path)
- image_data = (frames * 255).astype(np.uint8)
- image_data = np.squeeze(image_data)
-
- return frames, alignments, image_data
\ No newline at end of file
diff --git a/spaces/cvlab/zero123-live/ldm/models/autoencoder.py b/spaces/cvlab/zero123-live/ldm/models/autoencoder.py
deleted file mode 100644
index 69d2f07145745cf2029304ea4614447b9c446cc7..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123-live/ldm/models/autoencoder.py
+++ /dev/null
@@ -1,445 +0,0 @@
-import torch
-import pytorch_lightning as pl
-import torch.nn.functional as F
-from contextlib import contextmanager
-
-import sys
-sys.path.append('../../../taming-transformers')
-from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
-
-from ldm.modules.diffusionmodules.model import Encoder, Decoder
-from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
-
-from ldm.util import instantiate_from_config
-
-
-class VQModel(pl.LightningModule):
- def __init__(self,
- ddconfig,
- lossconfig,
- n_embed,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- batch_resize_range=None,
- scheduler_config=None,
- lr_g_factor=1.0,
- remap=None,
- sane_index_shape=False, # tell vector quantizer to return indices as bhw
- use_ema=False
- ):
- super().__init__()
- self.embed_dim = embed_dim
- self.n_embed = n_embed
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
- remap=remap,
- sane_index_shape=sane_index_shape)
- self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
- self.batch_resize_range = batch_resize_range
- if self.batch_resize_range is not None:
- print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
-
- self.use_ema = use_ema
- if self.use_ema:
- self.model_ema = LitEma(self)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
-
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
- self.scheduler_config = scheduler_config
- self.lr_g_factor = lr_g_factor
-
- @contextmanager
- def ema_scope(self, context=None):
- if self.use_ema:
- self.model_ema.store(self.parameters())
- self.model_ema.copy_to(self)
- if context is not None:
- print(f"{context}: Switched to EMA weights")
- try:
- yield None
- finally:
- if self.use_ema:
- self.model_ema.restore(self.parameters())
- if context is not None:
- print(f"{context}: Restored training weights")
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- missing, unexpected = self.load_state_dict(sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys: {missing}")
- print(f"Unexpected Keys: {unexpected}")
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self)
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- quant, emb_loss, info = self.quantize(h)
- return quant, emb_loss, info
-
- def encode_to_prequant(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode(self, quant):
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
- def decode_code(self, code_b):
- quant_b = self.quantize.embed_code(code_b)
- dec = self.decode(quant_b)
- return dec
-
- def forward(self, input, return_pred_indices=False):
- quant, diff, (_,_,ind) = self.encode(input)
- dec = self.decode(quant)
- if return_pred_indices:
- return dec, diff, ind
- return dec, diff
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- if self.batch_resize_range is not None:
- lower_size = self.batch_resize_range[0]
- upper_size = self.batch_resize_range[1]
- if self.global_step <= 4:
- # do the first few batches with max size to avoid later oom
- new_resize = upper_size
- else:
- new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
- if new_resize != x.shape[2]:
- x = F.interpolate(x, size=new_resize, mode="bicubic")
- x = x.detach()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- # https://github.com/pytorch/pytorch/issues/37142
- # try not to fool the heuristics
- x = self.get_input(batch, self.image_key)
- xrec, qloss, ind = self(x, return_pred_indices=True)
-
- if optimizer_idx == 0:
- # autoencode
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train",
- predicted_indices=ind)
-
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
- return aeloss
-
- if optimizer_idx == 1:
- # discriminator
- discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- log_dict = self._validation_step(batch, batch_idx)
- with self.ema_scope():
- log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
- return log_dict
-
- def _validation_step(self, batch, batch_idx, suffix=""):
- x = self.get_input(batch, self.image_key)
- xrec, qloss, ind = self(x, return_pred_indices=True)
- aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val"+suffix,
- predicted_indices=ind
- )
-
- discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
- self.global_step,
- last_layer=self.get_last_layer(),
- split="val"+suffix,
- predicted_indices=ind
- )
- rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
- self.log(f"val{suffix}/rec_loss", rec_loss,
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
- self.log(f"val{suffix}/aeloss", aeloss,
- prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
- if version.parse(pl.__version__) >= version.parse('1.4.0'):
- del log_dict_ae[f"val{suffix}/rec_loss"]
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr_d = self.learning_rate
- lr_g = self.lr_g_factor*self.learning_rate
- print("lr_d", lr_d)
- print("lr_g", lr_g)
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
- list(self.decoder.parameters())+
- list(self.quantize.parameters())+
- list(self.quant_conv.parameters())+
- list(self.post_quant_conv.parameters()),
- lr=lr_g, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr_d, betas=(0.5, 0.9))
-
- if self.scheduler_config is not None:
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- },
- {
- 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- },
- ]
- return [opt_ae, opt_disc], scheduler
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- if only_inputs:
- log["inputs"] = x
- return log
- xrec, _ = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["inputs"] = x
- log["reconstructions"] = xrec
- if plot_ema:
- with self.ema_scope():
- xrec_ema, _ = self(x)
- if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
- log["reconstructions_ema"] = xrec_ema
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
- return x
-
-
-class VQModelInterface(VQModel):
- def __init__(self, embed_dim, *args, **kwargs):
- super().__init__(embed_dim=embed_dim, *args, **kwargs)
- self.embed_dim = embed_dim
-
- def encode(self, x):
- h = self.encoder(x)
- h = self.quant_conv(h)
- return h
-
- def decode(self, h, force_not_quantize=False):
- # also go through quantization layer
- if not force_not_quantize:
- quant, emb_loss, info = self.quantize(h)
- else:
- quant = h
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
-
-class AutoencoderKL(pl.LightningModule):
- def __init__(self,
- ddconfig,
- lossconfig,
- embed_dim,
- ckpt_path=None,
- ignore_keys=[],
- image_key="image",
- colorize_nlabels=None,
- monitor=None,
- ):
- super().__init__()
- self.image_key = image_key
- self.encoder = Encoder(**ddconfig)
- self.decoder = Decoder(**ddconfig)
- self.loss = instantiate_from_config(lossconfig)
- assert ddconfig["double_z"]
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
- self.embed_dim = embed_dim
- if colorize_nlabels is not None:
- assert type(colorize_nlabels)==int
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
- if monitor is not None:
- self.monitor = monitor
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
-
- def init_from_ckpt(self, path, ignore_keys=list()):
- sd = torch.load(path, map_location="cpu")["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- self.load_state_dict(sd, strict=False)
- print(f"Restored from {path}")
-
- def encode(self, x):
- h = self.encoder(x)
- moments = self.quant_conv(h)
- posterior = DiagonalGaussianDistribution(moments)
- return posterior
-
- def decode(self, z):
- z = self.post_quant_conv(z)
- dec = self.decoder(z)
- return dec
-
- def forward(self, input, sample_posterior=True):
- posterior = self.encode(input)
- if sample_posterior:
- z = posterior.sample()
- else:
- z = posterior.mode()
- dec = self.decode(z)
- return dec, posterior
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
- return x
-
- def training_step(self, batch, batch_idx, optimizer_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
-
- if optimizer_idx == 0:
- # train encoder+decoder+logvar
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return aeloss
-
- if optimizer_idx == 1:
- # train the discriminator
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
- last_layer=self.get_last_layer(), split="train")
-
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
- return discloss
-
- def validation_step(self, batch, batch_idx):
- inputs = self.get_input(batch, self.image_key)
- reconstructions, posterior = self(inputs)
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
- last_layer=self.get_last_layer(), split="val")
-
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
- last_layer=self.get_last_layer(), split="val")
-
- self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
- self.log_dict(log_dict_ae)
- self.log_dict(log_dict_disc)
- return self.log_dict
-
- def configure_optimizers(self):
- lr = self.learning_rate
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
- list(self.decoder.parameters())+
- list(self.quant_conv.parameters())+
- list(self.post_quant_conv.parameters()),
- lr=lr, betas=(0.5, 0.9))
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
- lr=lr, betas=(0.5, 0.9))
- return [opt_ae, opt_disc], []
-
- def get_last_layer(self):
- return self.decoder.conv_out.weight
-
- @torch.no_grad()
- def log_images(self, batch, only_inputs=False, **kwargs):
- log = dict()
- x = self.get_input(batch, self.image_key)
- x = x.to(self.device)
- if not only_inputs:
- xrec, posterior = self(x)
- if x.shape[1] > 3:
- # colorize with random projection
- assert xrec.shape[1] > 3
- x = self.to_rgb(x)
- xrec = self.to_rgb(xrec)
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
- log["reconstructions"] = xrec
- log["inputs"] = x
- return log
-
- def to_rgb(self, x):
- assert self.image_key == "segmentation"
- if not hasattr(self, "colorize"):
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
- x = F.conv2d(x, weight=self.colorize)
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
- return x
-
-
-class IdentityFirstStage(torch.nn.Module):
- def __init__(self, *args, vq_interface=False, **kwargs):
- self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
- super().__init__()
-
- def encode(self, x, *args, **kwargs):
- return x
-
- def decode(self, x, *args, **kwargs):
- return x
-
- def quantize(self, x, *args, **kwargs):
- if self.vq_interface:
- return x, None, [None, None, None]
- return x
-
- def forward(self, x, *args, **kwargs):
- return x
diff --git a/spaces/cvsys/upscale/app.py b/spaces/cvsys/upscale/app.py
deleted file mode 100644
index f8f1319ad327f33af1a3e9913b8ae3132caccac5..0000000000000000000000000000000000000000
--- a/spaces/cvsys/upscale/app.py
+++ /dev/null
@@ -1,134 +0,0 @@
-from sldl.video import VideoSR
-from sldl.image import ImageSR
-
-import gradio as gr
-import tempfile
-import shutil
-import torch
-import ffmpeg
-import time
-from PIL import Image
-
-cc = 2
-if torch.backends.mps.is_available():
- device = 'mps'
- cc = 5
-elif torch.cuda.is_available():
- device = 'cuda'
- cc = 10
-else:
- device = 'cpu'
-
-vbsrgan = VideoSR('BSRGAN').to(device)
-vresrgan = VideoSR('RealESRGAN').to(device)
-ibsrgan = ImageSR('BSRGAN').to(device)
-iresrgan = ImageSR('RealESRGAN').to(device)
-
-def upscale_video(input_video, output_video, progress, mname):
- modelname = mname.lower()
- model = vbsrgan
- if modelname == 'bsrgan (default)':
- # do nothing
- pass
- elif modelname == 'real esrgan':
- model = vresrgan
- model(input_video, output_video, progress.tqdm)
-
-def upscale_image(input_image, output_image, mname):
- modelname = mname.lower()
- model = ibsrgan
- if modelname == 'bsrgan (default)':
- # do nothing
- pass
- elif modelname == 'real esrgan':
- model = iresrgan
- shutil.copy(input_image, output_image)
- model(output_image)
-
-# Gradio interface
-def video_upscaling_interface(input_text, model_name, pinged, progress=gr.Progress()):
- if input_text:
- if pinged == True:
- temp_dir = tempfile.mkdtemp()
- input_video_path = f"{temp_dir}/input_video"
- output_video_path = f"{temp_dir}/output_video.mp4"
- ffmpeg.input(input_text).output(input_video_path + '.mp4').run()
-
- # Upscale the video
- upscale_video(input_video_path + '.mp4', output_video_path, progress, model_name)
-
- return [output_video_path, output_video_path]
- else:
- gr.Warning("Please ping Hugging Face about a GPU grant first!")
- return ["no_vid.mp4", "no_vid.mp4"]
- else:
- return ["no_vid.mp4", "no_vid.mp4"]
-
-
-def image_upscaling_interface(input_text, model_name):
- if input_text:
- temp_dir = tempfile.mkdtemp()
- input_image_path = f"{temp_dir}/input_image.jpg"
- output_image_path = f"{temp_dir}/output_image.jpg"
- input_text.save(input_image_path)
- upscale_image(input_image_path, output_image_path, model_name)
- return [output_image_path, output_image_path]
- else:
- return ["no_image.jpg", "no_image.jpg"]
-
-
-css = "footer {display: none !important;} .gradio-container {min-height: 0px !important;}"
-
-
-with gr.Blocks(css=css) as demo:
- gr.Markdown('''
-# Upscale
-## A CVSYS Project
-
-### NOTICE: This is running on a free Hugging Face Space, so it will be quite slow. Expect it to take _hours_ to upscale 5 minutes. Please be mindful and _DO NOT_ upscale videos longer than 15 seconds! Thank you!
-
-[Check out Upscale on GitHub!](https://github.com/cv-sys/upscale)
-
-## Want Faster Inference?
-
-Duplicate this space for faster inference! We recommend using an A10G or A100.
-
-We applied for a GPU grant in July but haven't heard back from Hugging Face yet. Please @ mention a Hugging Face employee in this thread if you find this useful and want faster inference :). Thanks!
-
-
-
-Please note that after you upload an image, it may take several minutes before the progress bar appears. This is because we first convert your video to ensure the correct format.
-''')
- # with gr.Tab("Image"):
- # with gr.Row():
- # with gr.Column():
- # iinp = gr.Image(label="Upload Image", interactive=True, type="pil")
- # imod = gr.Dropdown(
- # ["BSRGAN (Default)", "Real ESRGAN"],
- # value="BSRGAN (Default)",
- # interactive=True,
- # label="Model"
- # )
- # with gr.Column():
- # iout = gr.Image(label="View Image", interactive=False, type="filepath")
- # ifile = gr.File(label="Download Image", interactive=False)
- # ibtn = gr.Button(value="Upscale Image")
- with gr.Tab("Video"):
- with gr.Row():
- with gr.Column():
- vinp = gr.Video(label="Upload Video", interactive=True)
- vmod = gr.Dropdown(
- ["BSRGAN (Default)", "Real ESRGAN"],
- value="BSRGAN (Default)",
- interactive=True,
- label="Model"
- )
- pinged = gr.Checkbox(label="I pinged Hugging Face about the GPU grant", info="We applied for a GPU grant in July but haven't heard back from Hugging Face yet. Please refer to the above information for a link to ping Hugging Face.")
- with gr.Column():
- vout = gr.Video(label="Watch Video", interactive=False)
- vfile = gr.File(label="Download Video", interactive=False)
- vbtn = gr.Button(value="Upscale Video")
- # ibtn.click(image_upscaling_interface, [iinp, imod], outputs=[iout, ifile])
- vbtn.click(video_upscaling_interface, [vinp, vmod, pinged], outputs=[vout, vfile])
- demo.queue(concurrency_count=cc)
- demo.launch()
diff --git a/spaces/dachenchen/HiWantJoin/readme/README_en.md b/spaces/dachenchen/HiWantJoin/readme/README_en.md
deleted file mode 100644
index a906ecb3ebc411f5cdeb33d661266a489a20c3b0..0000000000000000000000000000000000000000
--- a/spaces/dachenchen/HiWantJoin/readme/README_en.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-
-川虎 Chat 🐯 Chuanhu Chat
-
-
-
-
-
-
-
Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA
-
- Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
- LaTeX rendering / Table rendering / Code highlighting
- Auto dark mode / Adaptive web interface / WeChat-like theme
- Multi-parameters tuning / Multi-API-Key support / Multi-user support
- Compatible with GPT-4 / Local deployment for LLMs
-
- Video Tutorial
- ·
- 2.0 Introduction
- ·
- 3.0 Introduction & Tutorial
- ||
- Online trial
- ·
- One-Click deployment
-
-
-
-
-
-
-
-## Usage Tips
-
-- To better control the ChatGPT, use System Prompt.
-- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu.
-- To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
-- To start a new line in the input box, press Shift + Enter keys.
-- To quickly switch between input history, press ↑ and ↓ key in the input box.
-- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=)`.
-- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link.
-- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
-
-## Installation
-
-```shell
-git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
-cd ChuanhuChatGPT
-pip install -r requirements.txt
-```
-
-Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file.
-
-```shell
-python ChuanhuChatbot.py
-```
-
-A browser window will open and you will be able to chat with ChatGPT.
-
-> **Note**
->
-> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions.
-
-## Troubleshooting
-
-When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows:
-
-1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or
- ```shell
- git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
- ```
-2. Try installing the dependencies again (as this project may have introduced new dependencies)
- ```
- pip install -r requirements.txt
- ```
-3. Update Gradio
- ```
- pip install gradio --upgrade --force-reinstall
- ```
-
-Generally, you can solve most problems by following these steps.
-
-If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
-
-This page lists almost all the possible problems and solutions. Please read it carefully.
-
-## More Information
-
-More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki):
-
-- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization)
-- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
-- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
-- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
-- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
-
-## Starchart
-
-[](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
-
-## Contributors
-
-
-
-
-
-## Sponsor
-
-🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~
-
-
-
-
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiohttp/connector.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiohttp/connector.py
deleted file mode 100644
index 2499a2dabe92a14413d7f4023477d4b9803da9bd..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiohttp/connector.py
+++ /dev/null
@@ -1,1456 +0,0 @@
-import asyncio
-import functools
-import random
-import sys
-import traceback
-import warnings
-from collections import defaultdict, deque
-from contextlib import suppress
-from http.cookies import SimpleCookie
-from itertools import cycle, islice
-from time import monotonic
-from types import TracebackType
-from typing import (
- TYPE_CHECKING,
- Any,
- Awaitable,
- Callable,
- DefaultDict,
- Dict,
- Iterator,
- List,
- Optional,
- Set,
- Tuple,
- Type,
- Union,
- cast,
-)
-
-import attr
-
-from . import hdrs, helpers
-from .abc import AbstractResolver
-from .client_exceptions import (
- ClientConnectionError,
- ClientConnectorCertificateError,
- ClientConnectorError,
- ClientConnectorSSLError,
- ClientHttpProxyError,
- ClientProxyConnectionError,
- ServerFingerprintMismatch,
- UnixClientConnectorError,
- cert_errors,
- ssl_errors,
-)
-from .client_proto import ResponseHandler
-from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params
-from .helpers import (
- PY_36,
- ceil_timeout,
- get_running_loop,
- is_ip_address,
- noop,
- sentinel,
-)
-from .http import RESPONSES
-from .locks import EventResultOrError
-from .resolver import DefaultResolver
-
-try:
- import ssl
-
- SSLContext = ssl.SSLContext
-except ImportError: # pragma: no cover
- ssl = None # type: ignore[assignment]
- SSLContext = object # type: ignore[misc,assignment]
-
-
-__all__ = ("BaseConnector", "TCPConnector", "UnixConnector", "NamedPipeConnector")
-
-
-if TYPE_CHECKING: # pragma: no cover
- from .client import ClientTimeout
- from .client_reqrep import ConnectionKey
- from .tracing import Trace
-
-
-class _DeprecationWaiter:
- __slots__ = ("_awaitable", "_awaited")
-
- def __init__(self, awaitable: Awaitable[Any]) -> None:
- self._awaitable = awaitable
- self._awaited = False
-
- def __await__(self) -> Any:
- self._awaited = True
- return self._awaitable.__await__()
-
- def __del__(self) -> None:
- if not self._awaited:
- warnings.warn(
- "Connector.close() is a coroutine, "
- "please use await connector.close()",
- DeprecationWarning,
- )
-
-
-class Connection:
-
- _source_traceback = None
- _transport = None
-
- def __init__(
- self,
- connector: "BaseConnector",
- key: "ConnectionKey",
- protocol: ResponseHandler,
- loop: asyncio.AbstractEventLoop,
- ) -> None:
- self._key = key
- self._connector = connector
- self._loop = loop
- self._protocol: Optional[ResponseHandler] = protocol
- self._callbacks: List[Callable[[], None]] = []
-
- if loop.get_debug():
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
-
- def __repr__(self) -> str:
- return f"Connection<{self._key}>"
-
- def __del__(self, _warnings: Any = warnings) -> None:
- if self._protocol is not None:
- if PY_36:
- kwargs = {"source": self}
- else:
- kwargs = {}
- _warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs)
- if self._loop.is_closed():
- return
-
- self._connector._release(self._key, self._protocol, should_close=True)
-
- context = {"client_connection": self, "message": "Unclosed connection"}
- if self._source_traceback is not None:
- context["source_traceback"] = self._source_traceback
- self._loop.call_exception_handler(context)
-
- @property
- def loop(self) -> asyncio.AbstractEventLoop:
- warnings.warn(
- "connector.loop property is deprecated", DeprecationWarning, stacklevel=2
- )
- return self._loop
-
- @property
- def transport(self) -> Optional[asyncio.Transport]:
- if self._protocol is None:
- return None
- return self._protocol.transport
-
- @property
- def protocol(self) -> Optional[ResponseHandler]:
- return self._protocol
-
- def add_callback(self, callback: Callable[[], None]) -> None:
- if callback is not None:
- self._callbacks.append(callback)
-
- def _notify_release(self) -> None:
- callbacks, self._callbacks = self._callbacks[:], []
-
- for cb in callbacks:
- with suppress(Exception):
- cb()
-
- def close(self) -> None:
- self._notify_release()
-
- if self._protocol is not None:
- self._connector._release(self._key, self._protocol, should_close=True)
- self._protocol = None
-
- def release(self) -> None:
- self._notify_release()
-
- if self._protocol is not None:
- self._connector._release(
- self._key, self._protocol, should_close=self._protocol.should_close
- )
- self._protocol = None
-
- @property
- def closed(self) -> bool:
- return self._protocol is None or not self._protocol.is_connected()
-
-
-class _TransportPlaceholder:
- """placeholder for BaseConnector.connect function"""
-
- def close(self) -> None:
- pass
-
-
-class BaseConnector:
- """Base connector class.
-
- keepalive_timeout - (optional) Keep-alive timeout.
- force_close - Set to True to force close and do reconnect
- after each request (and between redirects).
- limit - The total number of simultaneous connections.
- limit_per_host - Number of simultaneous connections to one host.
- enable_cleanup_closed - Enables clean-up closed ssl transports.
- Disabled by default.
- loop - Optional event loop.
- """
-
- _closed = True # prevent AttributeError in __del__ if ctor was failed
- _source_traceback = None
-
- # abort transport after 2 seconds (cleanup broken connections)
- _cleanup_closed_period = 2.0
-
- def __init__(
- self,
- *,
- keepalive_timeout: Union[object, None, float] = sentinel,
- force_close: bool = False,
- limit: int = 100,
- limit_per_host: int = 0,
- enable_cleanup_closed: bool = False,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- ) -> None:
-
- if force_close:
- if keepalive_timeout is not None and keepalive_timeout is not sentinel:
- raise ValueError(
- "keepalive_timeout cannot " "be set if force_close is True"
- )
- else:
- if keepalive_timeout is sentinel:
- keepalive_timeout = 15.0
-
- loop = get_running_loop(loop)
-
- self._closed = False
- if loop.get_debug():
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
-
- self._conns: Dict[ConnectionKey, List[Tuple[ResponseHandler, float]]] = {}
- self._limit = limit
- self._limit_per_host = limit_per_host
- self._acquired: Set[ResponseHandler] = set()
- self._acquired_per_host: DefaultDict[
- ConnectionKey, Set[ResponseHandler]
- ] = defaultdict(set)
- self._keepalive_timeout = cast(float, keepalive_timeout)
- self._force_close = force_close
-
- # {host_key: FIFO list of waiters}
- self._waiters = defaultdict(deque) # type: ignore[var-annotated]
-
- self._loop = loop
- self._factory = functools.partial(ResponseHandler, loop=loop)
-
- self.cookies: SimpleCookie[str] = SimpleCookie()
-
- # start keep-alive connection cleanup task
- self._cleanup_handle: Optional[asyncio.TimerHandle] = None
-
- # start cleanup closed transports task
- self._cleanup_closed_handle: Optional[asyncio.TimerHandle] = None
- self._cleanup_closed_disabled = not enable_cleanup_closed
- self._cleanup_closed_transports: List[Optional[asyncio.Transport]] = []
- self._cleanup_closed()
-
- def __del__(self, _warnings: Any = warnings) -> None:
- if self._closed:
- return
- if not self._conns:
- return
-
- conns = [repr(c) for c in self._conns.values()]
-
- self._close()
-
- if PY_36:
- kwargs = {"source": self}
- else:
- kwargs = {}
- _warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs)
- context = {
- "connector": self,
- "connections": conns,
- "message": "Unclosed connector",
- }
- if self._source_traceback is not None:
- context["source_traceback"] = self._source_traceback
- self._loop.call_exception_handler(context)
-
- def __enter__(self) -> "BaseConnector":
- warnings.warn(
- '"with Connector():" is deprecated, '
- 'use "async with Connector():" instead',
- DeprecationWarning,
- )
- return self
-
- def __exit__(self, *exc: Any) -> None:
- self._close()
-
- async def __aenter__(self) -> "BaseConnector":
- return self
-
- async def __aexit__(
- self,
- exc_type: Optional[Type[BaseException]] = None,
- exc_value: Optional[BaseException] = None,
- exc_traceback: Optional[TracebackType] = None,
- ) -> None:
- await self.close()
-
- @property
- def force_close(self) -> bool:
- """Ultimately close connection on releasing if True."""
- return self._force_close
-
- @property
- def limit(self) -> int:
- """The total number for simultaneous connections.
-
- If limit is 0 the connector has no limit.
- The default limit size is 100.
- """
- return self._limit
-
- @property
- def limit_per_host(self) -> int:
- """The limit for simultaneous connections to the same endpoint.
-
- Endpoints are the same if they are have equal
- (host, port, is_ssl) triple.
- """
- return self._limit_per_host
-
- def _cleanup(self) -> None:
- """Cleanup unused transports."""
- if self._cleanup_handle:
- self._cleanup_handle.cancel()
- # _cleanup_handle should be unset, otherwise _release() will not
- # recreate it ever!
- self._cleanup_handle = None
-
- now = self._loop.time()
- timeout = self._keepalive_timeout
-
- if self._conns:
- connections = {}
- deadline = now - timeout
- for key, conns in self._conns.items():
- alive = []
- for proto, use_time in conns:
- if proto.is_connected():
- if use_time - deadline < 0:
- transport = proto.transport
- proto.close()
- if key.is_ssl and not self._cleanup_closed_disabled:
- self._cleanup_closed_transports.append(transport)
- else:
- alive.append((proto, use_time))
- else:
- transport = proto.transport
- proto.close()
- if key.is_ssl and not self._cleanup_closed_disabled:
- self._cleanup_closed_transports.append(transport)
-
- if alive:
- connections[key] = alive
-
- self._conns = connections
-
- if self._conns:
- self._cleanup_handle = helpers.weakref_handle(
- self, "_cleanup", timeout, self._loop
- )
-
- def _drop_acquired_per_host(
- self, key: "ConnectionKey", val: ResponseHandler
- ) -> None:
- acquired_per_host = self._acquired_per_host
- if key not in acquired_per_host:
- return
- conns = acquired_per_host[key]
- conns.remove(val)
- if not conns:
- del self._acquired_per_host[key]
-
- def _cleanup_closed(self) -> None:
- """Double confirmation for transport close.
-
- Some broken ssl servers may leave socket open without proper close.
- """
- if self._cleanup_closed_handle:
- self._cleanup_closed_handle.cancel()
-
- for transport in self._cleanup_closed_transports:
- if transport is not None:
- transport.abort()
-
- self._cleanup_closed_transports = []
-
- if not self._cleanup_closed_disabled:
- self._cleanup_closed_handle = helpers.weakref_handle(
- self, "_cleanup_closed", self._cleanup_closed_period, self._loop
- )
-
- def close(self) -> Awaitable[None]:
- """Close all opened transports."""
- self._close()
- return _DeprecationWaiter(noop())
-
- def _close(self) -> None:
- if self._closed:
- return
-
- self._closed = True
-
- try:
- if self._loop.is_closed():
- return
-
- # cancel cleanup task
- if self._cleanup_handle:
- self._cleanup_handle.cancel()
-
- # cancel cleanup close task
- if self._cleanup_closed_handle:
- self._cleanup_closed_handle.cancel()
-
- for data in self._conns.values():
- for proto, t0 in data:
- proto.close()
-
- for proto in self._acquired:
- proto.close()
-
- for transport in self._cleanup_closed_transports:
- if transport is not None:
- transport.abort()
-
- finally:
- self._conns.clear()
- self._acquired.clear()
- self._waiters.clear()
- self._cleanup_handle = None
- self._cleanup_closed_transports.clear()
- self._cleanup_closed_handle = None
-
- @property
- def closed(self) -> bool:
- """Is connector closed.
-
- A readonly property.
- """
- return self._closed
-
- def _available_connections(self, key: "ConnectionKey") -> int:
- """
- Return number of available connections.
-
- The limit, limit_per_host and the connection key are taken into account.
-
- If it returns less than 1 means that there are no connections
- available.
- """
- if self._limit:
- # total calc available connections
- available = self._limit - len(self._acquired)
-
- # check limit per host
- if (
- self._limit_per_host
- and available > 0
- and key in self._acquired_per_host
- ):
- acquired = self._acquired_per_host.get(key)
- assert acquired is not None
- available = self._limit_per_host - len(acquired)
-
- elif self._limit_per_host and key in self._acquired_per_host:
- # check limit per host
- acquired = self._acquired_per_host.get(key)
- assert acquired is not None
- available = self._limit_per_host - len(acquired)
- else:
- available = 1
-
- return available
-
- async def connect(
- self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
- ) -> Connection:
- """Get from pool or create new connection."""
- key = req.connection_key
- available = self._available_connections(key)
-
- # Wait if there are no available connections or if there are/were
- # waiters (i.e. don't steal connection from a waiter about to wake up)
- if available <= 0 or key in self._waiters:
- fut = self._loop.create_future()
-
- # This connection will now count towards the limit.
- self._waiters[key].append(fut)
-
- if traces:
- for trace in traces:
- await trace.send_connection_queued_start()
-
- try:
- await fut
- except BaseException as e:
- if key in self._waiters:
- # remove a waiter even if it was cancelled, normally it's
- # removed when it's notified
- try:
- self._waiters[key].remove(fut)
- except ValueError: # fut may no longer be in list
- pass
-
- raise e
- finally:
- if key in self._waiters and not self._waiters[key]:
- del self._waiters[key]
-
- if traces:
- for trace in traces:
- await trace.send_connection_queued_end()
-
- proto = self._get(key)
- if proto is None:
- placeholder = cast(ResponseHandler, _TransportPlaceholder())
- self._acquired.add(placeholder)
- self._acquired_per_host[key].add(placeholder)
-
- if traces:
- for trace in traces:
- await trace.send_connection_create_start()
-
- try:
- proto = await self._create_connection(req, traces, timeout)
- if self._closed:
- proto.close()
- raise ClientConnectionError("Connector is closed.")
- except BaseException:
- if not self._closed:
- self._acquired.remove(placeholder)
- self._drop_acquired_per_host(key, placeholder)
- self._release_waiter()
- raise
- else:
- if not self._closed:
- self._acquired.remove(placeholder)
- self._drop_acquired_per_host(key, placeholder)
-
- if traces:
- for trace in traces:
- await trace.send_connection_create_end()
- else:
- if traces:
- # Acquire the connection to prevent race conditions with limits
- placeholder = cast(ResponseHandler, _TransportPlaceholder())
- self._acquired.add(placeholder)
- self._acquired_per_host[key].add(placeholder)
- for trace in traces:
- await trace.send_connection_reuseconn()
- self._acquired.remove(placeholder)
- self._drop_acquired_per_host(key, placeholder)
-
- self._acquired.add(proto)
- self._acquired_per_host[key].add(proto)
- return Connection(self, key, proto, self._loop)
-
- def _get(self, key: "ConnectionKey") -> Optional[ResponseHandler]:
- try:
- conns = self._conns[key]
- except KeyError:
- return None
-
- t1 = self._loop.time()
- while conns:
- proto, t0 = conns.pop()
- if proto.is_connected():
- if t1 - t0 > self._keepalive_timeout:
- transport = proto.transport
- proto.close()
- # only for SSL transports
- if key.is_ssl and not self._cleanup_closed_disabled:
- self._cleanup_closed_transports.append(transport)
- else:
- if not conns:
- # The very last connection was reclaimed: drop the key
- del self._conns[key]
- return proto
- else:
- transport = proto.transport
- proto.close()
- if key.is_ssl and not self._cleanup_closed_disabled:
- self._cleanup_closed_transports.append(transport)
-
- # No more connections: drop the key
- del self._conns[key]
- return None
-
- def _release_waiter(self) -> None:
- """
- Iterates over all waiters until one to be released is found.
-
- The one to be released is not finsihed and
- belongs to a host that has available connections.
- """
- if not self._waiters:
- return
-
- # Having the dict keys ordered this avoids to iterate
- # at the same order at each call.
- queues = list(self._waiters.keys())
- random.shuffle(queues)
-
- for key in queues:
- if self._available_connections(key) < 1:
- continue
-
- waiters = self._waiters[key]
- while waiters:
- waiter = waiters.popleft()
- if not waiter.done():
- waiter.set_result(None)
- return
-
- def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None:
- if self._closed:
- # acquired connection is already released on connector closing
- return
-
- try:
- self._acquired.remove(proto)
- self._drop_acquired_per_host(key, proto)
- except KeyError: # pragma: no cover
- # this may be result of undetermenistic order of objects
- # finalization due garbage collection.
- pass
- else:
- self._release_waiter()
-
- def _release(
- self,
- key: "ConnectionKey",
- protocol: ResponseHandler,
- *,
- should_close: bool = False,
- ) -> None:
- if self._closed:
- # acquired connection is already released on connector closing
- return
-
- self._release_acquired(key, protocol)
-
- if self._force_close:
- should_close = True
-
- if should_close or protocol.should_close:
- transport = protocol.transport
- protocol.close()
-
- if key.is_ssl and not self._cleanup_closed_disabled:
- self._cleanup_closed_transports.append(transport)
- else:
- conns = self._conns.get(key)
- if conns is None:
- conns = self._conns[key] = []
- conns.append((protocol, self._loop.time()))
-
- if self._cleanup_handle is None:
- self._cleanup_handle = helpers.weakref_handle(
- self, "_cleanup", self._keepalive_timeout, self._loop
- )
-
- async def _create_connection(
- self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
- ) -> ResponseHandler:
- raise NotImplementedError()
-
-
-class _DNSCacheTable:
- def __init__(self, ttl: Optional[float] = None) -> None:
- self._addrs_rr: Dict[Tuple[str, int], Tuple[Iterator[Dict[str, Any]], int]] = {}
- self._timestamps: Dict[Tuple[str, int], float] = {}
- self._ttl = ttl
-
- def __contains__(self, host: object) -> bool:
- return host in self._addrs_rr
-
- def add(self, key: Tuple[str, int], addrs: List[Dict[str, Any]]) -> None:
- self._addrs_rr[key] = (cycle(addrs), len(addrs))
-
- if self._ttl:
- self._timestamps[key] = monotonic()
-
- def remove(self, key: Tuple[str, int]) -> None:
- self._addrs_rr.pop(key, None)
-
- if self._ttl:
- self._timestamps.pop(key, None)
-
- def clear(self) -> None:
- self._addrs_rr.clear()
- self._timestamps.clear()
-
- def next_addrs(self, key: Tuple[str, int]) -> List[Dict[str, Any]]:
- loop, length = self._addrs_rr[key]
- addrs = list(islice(loop, length))
- # Consume one more element to shift internal state of `cycle`
- next(loop)
- return addrs
-
- def expired(self, key: Tuple[str, int]) -> bool:
- if self._ttl is None:
- return False
-
- return self._timestamps[key] + self._ttl < monotonic()
-
-
-class TCPConnector(BaseConnector):
- """TCP connector.
-
- verify_ssl - Set to True to check ssl certifications.
- fingerprint - Pass the binary sha256
- digest of the expected certificate in DER format to verify
- that the certificate the server presents matches. See also
- https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
- resolver - Enable DNS lookups and use this
- resolver
- use_dns_cache - Use memory cache for DNS lookups.
- ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
- family - socket address family
- local_addr - local tuple of (host, port) to bind socket to
-
- keepalive_timeout - (optional) Keep-alive timeout.
- force_close - Set to True to force close and do reconnect
- after each request (and between redirects).
- limit - The total number of simultaneous connections.
- limit_per_host - Number of simultaneous connections to one host.
- enable_cleanup_closed - Enables clean-up closed ssl transports.
- Disabled by default.
- loop - Optional event loop.
- """
-
- def __init__(
- self,
- *,
- verify_ssl: bool = True,
- fingerprint: Optional[bytes] = None,
- use_dns_cache: bool = True,
- ttl_dns_cache: Optional[int] = 10,
- family: int = 0,
- ssl_context: Optional[SSLContext] = None,
- ssl: Union[None, bool, Fingerprint, SSLContext] = None,
- local_addr: Optional[Tuple[str, int]] = None,
- resolver: Optional[AbstractResolver] = None,
- keepalive_timeout: Union[None, float, object] = sentinel,
- force_close: bool = False,
- limit: int = 100,
- limit_per_host: int = 0,
- enable_cleanup_closed: bool = False,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- ):
- super().__init__(
- keepalive_timeout=keepalive_timeout,
- force_close=force_close,
- limit=limit,
- limit_per_host=limit_per_host,
- enable_cleanup_closed=enable_cleanup_closed,
- loop=loop,
- )
-
- self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
- if resolver is None:
- resolver = DefaultResolver(loop=self._loop)
- self._resolver = resolver
-
- self._use_dns_cache = use_dns_cache
- self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
- self._throttle_dns_events: Dict[Tuple[str, int], EventResultOrError] = {}
- self._family = family
- self._local_addr = local_addr
-
- def close(self) -> Awaitable[None]:
- """Close all ongoing DNS calls."""
- for ev in self._throttle_dns_events.values():
- ev.cancel()
-
- return super().close()
-
- @property
- def family(self) -> int:
- """Socket family like AF_INET."""
- return self._family
-
- @property
- def use_dns_cache(self) -> bool:
- """True if local DNS caching is enabled."""
- return self._use_dns_cache
-
- def clear_dns_cache(
- self, host: Optional[str] = None, port: Optional[int] = None
- ) -> None:
- """Remove specified host/port or clear all dns local cache."""
- if host is not None and port is not None:
- self._cached_hosts.remove((host, port))
- elif host is not None or port is not None:
- raise ValueError("either both host and port " "or none of them are allowed")
- else:
- self._cached_hosts.clear()
-
- async def _resolve_host(
- self, host: str, port: int, traces: Optional[List["Trace"]] = None
- ) -> List[Dict[str, Any]]:
- if is_ip_address(host):
- return [
- {
- "hostname": host,
- "host": host,
- "port": port,
- "family": self._family,
- "proto": 0,
- "flags": 0,
- }
- ]
-
- if not self._use_dns_cache:
-
- if traces:
- for trace in traces:
- await trace.send_dns_resolvehost_start(host)
-
- res = await self._resolver.resolve(host, port, family=self._family)
-
- if traces:
- for trace in traces:
- await trace.send_dns_resolvehost_end(host)
-
- return res
-
- key = (host, port)
-
- if (key in self._cached_hosts) and (not self._cached_hosts.expired(key)):
- # get result early, before any await (#4014)
- result = self._cached_hosts.next_addrs(key)
-
- if traces:
- for trace in traces:
- await trace.send_dns_cache_hit(host)
- return result
-
- if key in self._throttle_dns_events:
- # get event early, before any await (#4014)
- event = self._throttle_dns_events[key]
- if traces:
- for trace in traces:
- await trace.send_dns_cache_hit(host)
- await event.wait()
- else:
- # update dict early, before any await (#4014)
- self._throttle_dns_events[key] = EventResultOrError(self._loop)
- if traces:
- for trace in traces:
- await trace.send_dns_cache_miss(host)
- try:
-
- if traces:
- for trace in traces:
- await trace.send_dns_resolvehost_start(host)
-
- addrs = await self._resolver.resolve(host, port, family=self._family)
- if traces:
- for trace in traces:
- await trace.send_dns_resolvehost_end(host)
-
- self._cached_hosts.add(key, addrs)
- self._throttle_dns_events[key].set()
- except BaseException as e:
- # any DNS exception, independently of the implementation
- # is set for the waiters to raise the same exception.
- self._throttle_dns_events[key].set(exc=e)
- raise
- finally:
- self._throttle_dns_events.pop(key)
-
- return self._cached_hosts.next_addrs(key)
-
- async def _create_connection(
- self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
- ) -> ResponseHandler:
- """Create connection.
-
- Has same keyword arguments as BaseEventLoop.create_connection.
- """
- if req.proxy:
- _, proto = await self._create_proxy_connection(req, traces, timeout)
- else:
- _, proto = await self._create_direct_connection(req, traces, timeout)
-
- return proto
-
- @staticmethod
- @functools.lru_cache(None)
- def _make_ssl_context(verified: bool) -> SSLContext:
- if verified:
- return ssl.create_default_context()
- else:
- sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
- sslcontext.options |= ssl.OP_NO_SSLv2
- sslcontext.options |= ssl.OP_NO_SSLv3
- sslcontext.check_hostname = False
- sslcontext.verify_mode = ssl.CERT_NONE
- try:
- sslcontext.options |= ssl.OP_NO_COMPRESSION
- except AttributeError as attr_err:
- warnings.warn(
- "{!s}: The Python interpreter is compiled "
- "against OpenSSL < 1.0.0. Ref: "
- "https://docs.python.org/3/library/ssl.html"
- "#ssl.OP_NO_COMPRESSION".format(attr_err),
- )
- sslcontext.set_default_verify_paths()
- return sslcontext
-
- def _get_ssl_context(self, req: "ClientRequest") -> Optional[SSLContext]:
- """Logic to get the correct SSL context
-
- 0. if req.ssl is false, return None
-
- 1. if ssl_context is specified in req, use it
- 2. if _ssl_context is specified in self, use it
- 3. otherwise:
- 1. if verify_ssl is not specified in req, use self.ssl_context
- (will generate a default context according to self.verify_ssl)
- 2. if verify_ssl is True in req, generate a default SSL context
- 3. if verify_ssl is False in req, generate a SSL context that
- won't verify
- """
- if req.is_ssl():
- if ssl is None: # pragma: no cover
- raise RuntimeError("SSL is not supported.")
- sslcontext = req.ssl
- if isinstance(sslcontext, ssl.SSLContext):
- return sslcontext
- if sslcontext is not None:
- # not verified or fingerprinted
- return self._make_ssl_context(False)
- sslcontext = self._ssl
- if isinstance(sslcontext, ssl.SSLContext):
- return sslcontext
- if sslcontext is not None:
- # not verified or fingerprinted
- return self._make_ssl_context(False)
- return self._make_ssl_context(True)
- else:
- return None
-
- def _get_fingerprint(self, req: "ClientRequest") -> Optional["Fingerprint"]:
- ret = req.ssl
- if isinstance(ret, Fingerprint):
- return ret
- ret = self._ssl
- if isinstance(ret, Fingerprint):
- return ret
- return None
-
- async def _wrap_create_connection(
- self,
- *args: Any,
- req: "ClientRequest",
- timeout: "ClientTimeout",
- client_error: Type[Exception] = ClientConnectorError,
- **kwargs: Any,
- ) -> Tuple[asyncio.Transport, ResponseHandler]:
- try:
- async with ceil_timeout(timeout.sock_connect):
- return await self._loop.create_connection(*args, **kwargs) # type: ignore[return-value] # noqa
- except cert_errors as exc:
- raise ClientConnectorCertificateError(req.connection_key, exc) from exc
- except ssl_errors as exc:
- raise ClientConnectorSSLError(req.connection_key, exc) from exc
- except OSError as exc:
- if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
- raise
- raise client_error(req.connection_key, exc) from exc
-
- def _fail_on_no_start_tls(self, req: "ClientRequest") -> None:
- """Raise a :py:exc:`RuntimeError` on missing ``start_tls()``.
-
- One case is that :py:meth:`asyncio.loop.start_tls` is not yet
- implemented under Python 3.6. It is necessary for TLS-in-TLS so
- that it is possible to send HTTPS queries through HTTPS proxies.
-
- This doesn't affect regular HTTP requests, though.
- """
- if not req.is_ssl():
- return
-
- proxy_url = req.proxy
- assert proxy_url is not None
- if proxy_url.scheme != "https":
- return
-
- self._check_loop_for_start_tls()
-
- def _check_loop_for_start_tls(self) -> None:
- try:
- self._loop.start_tls
- except AttributeError as attr_exc:
- raise RuntimeError(
- "An HTTPS request is being sent through an HTTPS proxy. "
- "This needs support for TLS in TLS but it is not implemented "
- "in your runtime for the stdlib asyncio.\n\n"
- "Please upgrade to Python 3.7 or higher. For more details, "
- "please see:\n"
- "* https://bugs.python.org/issue37179\n"
- "* https://github.com/python/cpython/pull/28073\n"
- "* https://docs.aiohttp.org/en/stable/"
- "client_advanced.html#proxy-support\n"
- "* https://github.com/aio-libs/aiohttp/discussions/6044\n",
- ) from attr_exc
-
- def _loop_supports_start_tls(self) -> bool:
- try:
- self._check_loop_for_start_tls()
- except RuntimeError:
- return False
- else:
- return True
-
- def _warn_about_tls_in_tls(
- self,
- underlying_transport: asyncio.Transport,
- req: "ClientRequest",
- ) -> None:
- """Issue a warning if the requested URL has HTTPS scheme."""
- if req.request_info.url.scheme != "https":
- return
-
- asyncio_supports_tls_in_tls = getattr(
- underlying_transport,
- "_start_tls_compatible",
- False,
- )
-
- if asyncio_supports_tls_in_tls:
- return
-
- warnings.warn(
- "An HTTPS request is being sent through an HTTPS proxy. "
- "This support for TLS in TLS is known to be disabled "
- "in the stdlib asyncio. This is why you'll probably see "
- "an error in the log below.\n\n"
- "It is possible to enable it via monkeypatching under "
- "Python 3.7 or higher. For more details, see:\n"
- "* https://bugs.python.org/issue37179\n"
- "* https://github.com/python/cpython/pull/28073\n\n"
- "You can temporarily patch this as follows:\n"
- "* https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support\n"
- "* https://github.com/aio-libs/aiohttp/discussions/6044\n",
- RuntimeWarning,
- source=self,
- # Why `4`? At least 3 of the calls in the stack originate
- # from the methods in this class.
- stacklevel=3,
- )
-
- async def _start_tls_connection(
- self,
- underlying_transport: asyncio.Transport,
- req: "ClientRequest",
- timeout: "ClientTimeout",
- client_error: Type[Exception] = ClientConnectorError,
- ) -> Tuple[asyncio.BaseTransport, ResponseHandler]:
- """Wrap the raw TCP transport with TLS."""
- tls_proto = self._factory() # Create a brand new proto for TLS
-
- # Safety of the `cast()` call here is based on the fact that
- # internally `_get_ssl_context()` only returns `None` when
- # `req.is_ssl()` evaluates to `False` which is never gonna happen
- # in this code path. Of course, it's rather fragile
- # maintainability-wise but this is to be solved separately.
- sslcontext = cast(ssl.SSLContext, self._get_ssl_context(req))
-
- try:
- async with ceil_timeout(timeout.sock_connect):
- try:
- tls_transport = await self._loop.start_tls(
- underlying_transport,
- tls_proto,
- sslcontext,
- server_hostname=req.host,
- ssl_handshake_timeout=timeout.total,
- )
- except BaseException:
- # We need to close the underlying transport since
- # `start_tls()` probably failed before it had a
- # chance to do this:
- underlying_transport.close()
- raise
- except cert_errors as exc:
- raise ClientConnectorCertificateError(req.connection_key, exc) from exc
- except ssl_errors as exc:
- raise ClientConnectorSSLError(req.connection_key, exc) from exc
- except OSError as exc:
- if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
- raise
- raise client_error(req.connection_key, exc) from exc
- except TypeError as type_err:
- # Example cause looks like this:
- # TypeError: transport is not supported by start_tls()
-
- raise ClientConnectionError(
- "Cannot initialize a TLS-in-TLS connection to host "
- f"{req.host!s}:{req.port:d} through an underlying connection "
- f"to an HTTPS proxy {req.proxy!s} ssl:{req.ssl or 'default'} "
- f"[{type_err!s}]"
- ) from type_err
- else:
- if tls_transport is None:
- msg = "Failed to start TLS (possibly caused by closing transport)"
- raise client_error(req.connection_key, OSError(msg))
- tls_proto.connection_made(
- tls_transport
- ) # Kick the state machine of the new TLS protocol
-
- return tls_transport, tls_proto
-
- async def _create_direct_connection(
- self,
- req: "ClientRequest",
- traces: List["Trace"],
- timeout: "ClientTimeout",
- *,
- client_error: Type[Exception] = ClientConnectorError,
- ) -> Tuple[asyncio.Transport, ResponseHandler]:
- sslcontext = self._get_ssl_context(req)
- fingerprint = self._get_fingerprint(req)
-
- host = req.url.raw_host
- assert host is not None
- port = req.port
- assert port is not None
- host_resolved = asyncio.ensure_future(
- self._resolve_host(host, port, traces=traces), loop=self._loop
- )
- try:
- # Cancelling this lookup should not cancel the underlying lookup
- # or else the cancel event will get broadcast to all the waiters
- # across all connections.
- hosts = await asyncio.shield(host_resolved)
- except asyncio.CancelledError:
-
- def drop_exception(fut: "asyncio.Future[List[Dict[str, Any]]]") -> None:
- with suppress(Exception, asyncio.CancelledError):
- fut.result()
-
- host_resolved.add_done_callback(drop_exception)
- raise
- except OSError as exc:
- if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
- raise
- # in case of proxy it is not ClientProxyConnectionError
- # it is problem of resolving proxy ip itself
- raise ClientConnectorError(req.connection_key, exc) from exc
-
- last_exc: Optional[Exception] = None
-
- for hinfo in hosts:
- host = hinfo["host"]
- port = hinfo["port"]
-
- try:
- transp, proto = await self._wrap_create_connection(
- self._factory,
- host,
- port,
- timeout=timeout,
- ssl=sslcontext,
- family=hinfo["family"],
- proto=hinfo["proto"],
- flags=hinfo["flags"],
- server_hostname=hinfo["hostname"] if sslcontext else None,
- local_addr=self._local_addr,
- req=req,
- client_error=client_error,
- )
- except ClientConnectorError as exc:
- last_exc = exc
- continue
-
- if req.is_ssl() and fingerprint:
- try:
- fingerprint.check(transp)
- except ServerFingerprintMismatch as exc:
- transp.close()
- if not self._cleanup_closed_disabled:
- self._cleanup_closed_transports.append(transp)
- last_exc = exc
- continue
-
- return transp, proto
- else:
- assert last_exc is not None
- raise last_exc
-
- async def _create_proxy_connection(
- self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
- ) -> Tuple[asyncio.BaseTransport, ResponseHandler]:
- self._fail_on_no_start_tls(req)
- runtime_has_start_tls = self._loop_supports_start_tls()
-
- headers: Dict[str, str] = {}
- if req.proxy_headers is not None:
- headers = req.proxy_headers # type: ignore[assignment]
- headers[hdrs.HOST] = req.headers[hdrs.HOST]
-
- url = req.proxy
- assert url is not None
- proxy_req = ClientRequest(
- hdrs.METH_GET,
- url,
- headers=headers,
- auth=req.proxy_auth,
- loop=self._loop,
- ssl=req.ssl,
- )
-
- # create connection to proxy server
- transport, proto = await self._create_direct_connection(
- proxy_req, [], timeout, client_error=ClientProxyConnectionError
- )
-
- # Many HTTP proxies has buggy keepalive support. Let's not
- # reuse connection but close it after processing every
- # response.
- proto.force_close()
-
- auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
- if auth is not None:
- if not req.is_ssl():
- req.headers[hdrs.PROXY_AUTHORIZATION] = auth
- else:
- proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
-
- if req.is_ssl():
- if runtime_has_start_tls:
- self._warn_about_tls_in_tls(transport, req)
-
- # For HTTPS requests over HTTP proxy
- # we must notify proxy to tunnel connection
- # so we send CONNECT command:
- # CONNECT www.python.org:443 HTTP/1.1
- # Host: www.python.org
- #
- # next we must do TLS handshake and so on
- # to do this we must wrap raw socket into secure one
- # asyncio handles this perfectly
- proxy_req.method = hdrs.METH_CONNECT
- proxy_req.url = req.url
- key = attr.evolve(
- req.connection_key, proxy=None, proxy_auth=None, proxy_headers_hash=None
- )
- conn = Connection(self, key, proto, self._loop)
- proxy_resp = await proxy_req.send(conn)
- try:
- protocol = conn._protocol
- assert protocol is not None
-
- # read_until_eof=True will ensure the connection isn't closed
- # once the response is received and processed allowing
- # START_TLS to work on the connection below.
- protocol.set_response_params(read_until_eof=runtime_has_start_tls)
- resp = await proxy_resp.start(conn)
- except BaseException:
- proxy_resp.close()
- conn.close()
- raise
- else:
- conn._protocol = None
- conn._transport = None
- try:
- if resp.status != 200:
- message = resp.reason
- if message is None:
- message = RESPONSES[resp.status][0]
- raise ClientHttpProxyError(
- proxy_resp.request_info,
- resp.history,
- status=resp.status,
- message=message,
- headers=resp.headers,
- )
- if not runtime_has_start_tls:
- rawsock = transport.get_extra_info("socket", default=None)
- if rawsock is None:
- raise RuntimeError(
- "Transport does not expose socket instance"
- )
- # Duplicate the socket, so now we can close proxy transport
- rawsock = rawsock.dup()
- except BaseException:
- # It shouldn't be closed in `finally` because it's fed to
- # `loop.start_tls()` and the docs say not to touch it after
- # passing there.
- transport.close()
- raise
- finally:
- if not runtime_has_start_tls:
- transport.close()
-
- if not runtime_has_start_tls:
- # HTTP proxy with support for upgrade to HTTPS
- sslcontext = self._get_ssl_context(req)
- return await self._wrap_create_connection(
- self._factory,
- timeout=timeout,
- ssl=sslcontext,
- sock=rawsock,
- server_hostname=req.host,
- req=req,
- )
-
- return await self._start_tls_connection(
- # Access the old transport for the last time before it's
- # closed and forgotten forever:
- transport,
- req=req,
- timeout=timeout,
- )
- finally:
- proxy_resp.close()
-
- return transport, proto
-
-
-class UnixConnector(BaseConnector):
- """Unix socket connector.
-
- path - Unix socket path.
- keepalive_timeout - (optional) Keep-alive timeout.
- force_close - Set to True to force close and do reconnect
- after each request (and between redirects).
- limit - The total number of simultaneous connections.
- limit_per_host - Number of simultaneous connections to one host.
- loop - Optional event loop.
- """
-
- def __init__(
- self,
- path: str,
- force_close: bool = False,
- keepalive_timeout: Union[object, float, None] = sentinel,
- limit: int = 100,
- limit_per_host: int = 0,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- ) -> None:
- super().__init__(
- force_close=force_close,
- keepalive_timeout=keepalive_timeout,
- limit=limit,
- limit_per_host=limit_per_host,
- loop=loop,
- )
- self._path = path
-
- @property
- def path(self) -> str:
- """Path to unix socket."""
- return self._path
-
- async def _create_connection(
- self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
- ) -> ResponseHandler:
- try:
- async with ceil_timeout(timeout.sock_connect):
- _, proto = await self._loop.create_unix_connection(
- self._factory, self._path
- )
- except OSError as exc:
- if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
- raise
- raise UnixClientConnectorError(self.path, req.connection_key, exc) from exc
-
- return cast(ResponseHandler, proto)
-
-
-class NamedPipeConnector(BaseConnector):
- """Named pipe connector.
-
- Only supported by the proactor event loop.
- See also: https://docs.python.org/3.7/library/asyncio-eventloop.html
-
- path - Windows named pipe path.
- keepalive_timeout - (optional) Keep-alive timeout.
- force_close - Set to True to force close and do reconnect
- after each request (and between redirects).
- limit - The total number of simultaneous connections.
- limit_per_host - Number of simultaneous connections to one host.
- loop - Optional event loop.
- """
-
- def __init__(
- self,
- path: str,
- force_close: bool = False,
- keepalive_timeout: Union[object, float, None] = sentinel,
- limit: int = 100,
- limit_per_host: int = 0,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- ) -> None:
- super().__init__(
- force_close=force_close,
- keepalive_timeout=keepalive_timeout,
- limit=limit,
- limit_per_host=limit_per_host,
- loop=loop,
- )
- if not isinstance(
- self._loop, asyncio.ProactorEventLoop # type: ignore[attr-defined]
- ):
- raise RuntimeError(
- "Named Pipes only available in proactor " "loop under windows"
- )
- self._path = path
-
- @property
- def path(self) -> str:
- """Path to the named pipe."""
- return self._path
-
- async def _create_connection(
- self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
- ) -> ResponseHandler:
- try:
- async with ceil_timeout(timeout.sock_connect):
- _, proto = await self._loop.create_pipe_connection( # type: ignore[attr-defined] # noqa: E501
- self._factory, self._path
- )
- # the drain is required so that the connection_made is called
- # and transport is set otherwise it is not set before the
- # `assert conn.transport is not None`
- # in client.py's _request method
- await asyncio.sleep(0)
- # other option is to manually set transport like
- # `proto.transport = trans`
- except OSError as exc:
- if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
- raise
- raise ClientConnectorError(req.connection_key, exc) from exc
-
- return cast(ResponseHandler, proto)
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/dsv-576afacd.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/dsv-576afacd.js
deleted file mode 100644
index 832d450961d23fb14b577c045f0c24c61e74c4e6..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/dsv-576afacd.js
+++ /dev/null
@@ -1,6 +0,0 @@
-var D={},A={},E=34,m=10,R=13;function I(r){return new Function("d","return {"+r.map(function(t,e){return JSON.stringify(t)+": d["+e+'] || ""'}).join(",")+"}")}function B(r,t){var e=I(r);return function(a,c){return t(e(a),c,r)}}function F(r){var t=Object.create(null),e=[];return r.forEach(function(a){for(var c in a)c in t||e.push(t[c]=c)}),e}function f(r,t){var e=r+"",a=e.length;return a9999?"+"+f(r,6):f(r,4)}function S(r){var t=r.getUTCHours(),e=r.getUTCMinutes(),a=r.getUTCSeconds(),c=r.getUTCMilliseconds();return isNaN(r)?"Invalid Date":L(r.getUTCFullYear())+"-"+f(r.getUTCMonth()+1,2)+"-"+f(r.getUTCDate(),2)+(c?"T"+f(t,2)+":"+f(e,2)+":"+f(a,2)+"."+f(c,3)+"Z":a?"T"+f(t,2)+":"+f(e,2)+":"+f(a,2)+"Z":e||t?"T"+f(t,2)+":"+f(e,2)+"Z":"")}function Z(r){var t=new RegExp('["'+r+`
-\r]`),e=r.charCodeAt(0);function a(n,o){var s,i,u=c(n,function(h,l){if(s)return s(h,l-1);i=h,s=o?B(h,o):I(h)});return u.columns=i||[],u}function c(n,o){var s=[],i=n.length,u=0,h=0,l,v=i<=0,C=!1;n.charCodeAt(i-1)===m&&--i,n.charCodeAt(i-1)===R&&--i;function w(){if(v)return A;if(C)return C=!1,D;var j,d=u,p;if(n.charCodeAt(d)===E){for(;u++=i?v=!0:(p=n.charCodeAt(u++))===m?C=!0:p===R&&(C=!0,n.charCodeAt(u)===m&&++u),n.slice(d+1,j-1).replace(/""/g,'"')}for(;u{"scale"in a&&_(0,u=a.scale),"gap"in a&&_(1,n=a.gap),"min_width"in a&&_(2,l=a.min_width),"elem_id"in a&&_(3,i=a.elem_id),"elem_classes"in a&&_(4,g=a.elem_classes),"visible"in a&&_(5,d=a.visible),"variant"in a&&_(6,h=a.variant),"$$scope"in a&&_(7,s=a.$$scope)},[u,n,l,i,g,d,h,s,m]}class F extends w{constructor(e){super(),b(this,e,D,B,S,{scale:0,gap:1,min_width:2,elem_id:3,elem_classes:4,visible:5,variant:6})}}export{F as S};
-//# sourceMappingURL=StaticColumn-ab6a4f96.js.map
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/parser_core.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/parser_core.py
deleted file mode 100644
index ca5ab2566ba5bd00e654a7af39e3603717ae7194..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/parser_core.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
- * class Core
- *
- * Top-level rules executor. Glues block/inline parsers and does intermediate
- * transformations.
-"""
-from __future__ import annotations
-
-from typing import Callable
-
-from .ruler import Ruler
-from .rules_core import (
- block,
- inline,
- linkify,
- normalize,
- replace,
- smartquotes,
- text_join,
-)
-from .rules_core.state_core import StateCore
-
-RuleFuncCoreType = Callable[[StateCore], None]
-
-_rules: list[tuple[str, RuleFuncCoreType]] = [
- ("normalize", normalize),
- ("block", block),
- ("inline", inline),
- ("linkify", linkify),
- ("replacements", replace),
- ("smartquotes", smartquotes),
- ("text_join", text_join),
-]
-
-
-class ParserCore:
- def __init__(self) -> None:
- self.ruler = Ruler[RuleFuncCoreType]()
- for name, rule in _rules:
- self.ruler.push(name, rule)
-
- def process(self, state: StateCore) -> None:
- """Executes core chain rules."""
- for rule in self.ruler.getRules(""):
- rule(state)
diff --git a/spaces/deelerb/3dselfie/PIFu/lib/data/TrainDataset.py b/spaces/deelerb/3dselfie/PIFu/lib/data/TrainDataset.py
deleted file mode 100644
index 47a639bc644ba7a26e0f2799ffb5f170eed93318..0000000000000000000000000000000000000000
--- a/spaces/deelerb/3dselfie/PIFu/lib/data/TrainDataset.py
+++ /dev/null
@@ -1,390 +0,0 @@
-from torch.utils.data import Dataset
-import numpy as np
-import os
-import random
-import torchvision.transforms as transforms
-from PIL import Image, ImageOps
-import cv2
-import torch
-from PIL.ImageFilter import GaussianBlur
-import trimesh
-import logging
-
-log = logging.getLogger('trimesh')
-log.setLevel(40)
-
-def load_trimesh(root_dir):
- folders = os.listdir(root_dir)
- meshs = {}
- for i, f in enumerate(folders):
- sub_name = f
- meshs[sub_name] = trimesh.load(os.path.join(root_dir, f, '%s_100k.obj' % sub_name))
-
- return meshs
-
-def save_samples_truncted_prob(fname, points, prob):
- '''
- Save the visualization of sampling to a ply file.
- Red points represent positive predictions.
- Green points represent negative predictions.
- :param fname: File name to save
- :param points: [N, 3] array of points
- :param prob: [N, 1] array of predictions in the range [0~1]
- :return:
- '''
- r = (prob > 0.5).reshape([-1, 1]) * 255
- g = (prob < 0.5).reshape([-1, 1]) * 255
- b = np.zeros(r.shape)
-
- to_save = np.concatenate([points, r, g, b], axis=-1)
- return np.savetxt(fname,
- to_save,
- fmt='%.6f %.6f %.6f %d %d %d',
- comments='',
- header=(
- 'ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nend_header').format(
- points.shape[0])
- )
-
-
-class TrainDataset(Dataset):
- @staticmethod
- def modify_commandline_options(parser, is_train):
- return parser
-
- def __init__(self, opt, phase='train'):
- self.opt = opt
- self.projection_mode = 'orthogonal'
-
- # Path setup
- self.root = self.opt.dataroot
- self.RENDER = os.path.join(self.root, 'RENDER')
- self.MASK = os.path.join(self.root, 'MASK')
- self.PARAM = os.path.join(self.root, 'PARAM')
- self.UV_MASK = os.path.join(self.root, 'UV_MASK')
- self.UV_NORMAL = os.path.join(self.root, 'UV_NORMAL')
- self.UV_RENDER = os.path.join(self.root, 'UV_RENDER')
- self.UV_POS = os.path.join(self.root, 'UV_POS')
- self.OBJ = os.path.join(self.root, 'GEO', 'OBJ')
-
- self.B_MIN = np.array([-128, -28, -128])
- self.B_MAX = np.array([128, 228, 128])
-
- self.is_train = (phase == 'train')
- self.load_size = self.opt.loadSize
-
- self.num_views = self.opt.num_views
-
- self.num_sample_inout = self.opt.num_sample_inout
- self.num_sample_color = self.opt.num_sample_color
-
- self.yaw_list = list(range(0,360,1))
- self.pitch_list = [0]
- self.subjects = self.get_subjects()
-
- # PIL to tensor
- self.to_tensor = transforms.Compose([
- transforms.Resize(self.load_size),
- transforms.ToTensor(),
- transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
- ])
-
- # augmentation
- self.aug_trans = transforms.Compose([
- transforms.ColorJitter(brightness=opt.aug_bri, contrast=opt.aug_con, saturation=opt.aug_sat,
- hue=opt.aug_hue)
- ])
-
- self.mesh_dic = load_trimesh(self.OBJ)
-
- def get_subjects(self):
- all_subjects = os.listdir(self.RENDER)
- var_subjects = np.loadtxt(os.path.join(self.root, 'val.txt'), dtype=str)
- if len(var_subjects) == 0:
- return all_subjects
-
- if self.is_train:
- return sorted(list(set(all_subjects) - set(var_subjects)))
- else:
- return sorted(list(var_subjects))
-
- def __len__(self):
- return len(self.subjects) * len(self.yaw_list) * len(self.pitch_list)
-
- def get_render(self, subject, num_views, yid=0, pid=0, random_sample=False):
- '''
- Return the render data
- :param subject: subject name
- :param num_views: how many views to return
- :param view_id: the first view_id. If None, select a random one.
- :return:
- 'img': [num_views, C, W, H] images
- 'calib': [num_views, 4, 4] calibration matrix
- 'extrinsic': [num_views, 4, 4] extrinsic matrix
- 'mask': [num_views, 1, W, H] masks
- '''
- pitch = self.pitch_list[pid]
-
- # The ids are an even distribution of num_views around view_id
- view_ids = [self.yaw_list[(yid + len(self.yaw_list) // num_views * offset) % len(self.yaw_list)]
- for offset in range(num_views)]
- if random_sample:
- view_ids = np.random.choice(self.yaw_list, num_views, replace=False)
-
- calib_list = []
- render_list = []
- mask_list = []
- extrinsic_list = []
-
- for vid in view_ids:
- param_path = os.path.join(self.PARAM, subject, '%d_%d_%02d.npy' % (vid, pitch, 0))
- render_path = os.path.join(self.RENDER, subject, '%d_%d_%02d.jpg' % (vid, pitch, 0))
- mask_path = os.path.join(self.MASK, subject, '%d_%d_%02d.png' % (vid, pitch, 0))
-
- # loading calibration data
- param = np.load(param_path, allow_pickle=True)
- # pixel unit / world unit
- ortho_ratio = param.item().get('ortho_ratio')
- # world unit / model unit
- scale = param.item().get('scale')
- # camera center world coordinate
- center = param.item().get('center')
- # model rotation
- R = param.item().get('R')
-
- translate = -np.matmul(R, center).reshape(3, 1)
- extrinsic = np.concatenate([R, translate], axis=1)
- extrinsic = np.concatenate([extrinsic, np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
- # Match camera space to image pixel space
- scale_intrinsic = np.identity(4)
- scale_intrinsic[0, 0] = scale / ortho_ratio
- scale_intrinsic[1, 1] = -scale / ortho_ratio
- scale_intrinsic[2, 2] = scale / ortho_ratio
- # Match image pixel space to image uv space
- uv_intrinsic = np.identity(4)
- uv_intrinsic[0, 0] = 1.0 / float(self.opt.loadSize // 2)
- uv_intrinsic[1, 1] = 1.0 / float(self.opt.loadSize // 2)
- uv_intrinsic[2, 2] = 1.0 / float(self.opt.loadSize // 2)
- # Transform under image pixel space
- trans_intrinsic = np.identity(4)
-
- mask = Image.open(mask_path).convert('L')
- render = Image.open(render_path).convert('RGB')
-
- if self.is_train:
- # Pad images
- pad_size = int(0.1 * self.load_size)
- render = ImageOps.expand(render, pad_size, fill=0)
- mask = ImageOps.expand(mask, pad_size, fill=0)
-
- w, h = render.size
- th, tw = self.load_size, self.load_size
-
- # random flip
- if self.opt.random_flip and np.random.rand() > 0.5:
- scale_intrinsic[0, 0] *= -1
- render = transforms.RandomHorizontalFlip(p=1.0)(render)
- mask = transforms.RandomHorizontalFlip(p=1.0)(mask)
-
- # random scale
- if self.opt.random_scale:
- rand_scale = random.uniform(0.9, 1.1)
- w = int(rand_scale * w)
- h = int(rand_scale * h)
- render = render.resize((w, h), Image.BILINEAR)
- mask = mask.resize((w, h), Image.NEAREST)
- scale_intrinsic *= rand_scale
- scale_intrinsic[3, 3] = 1
-
- # random translate in the pixel space
- if self.opt.random_trans:
- dx = random.randint(-int(round((w - tw) / 10.)),
- int(round((w - tw) / 10.)))
- dy = random.randint(-int(round((h - th) / 10.)),
- int(round((h - th) / 10.)))
- else:
- dx = 0
- dy = 0
-
- trans_intrinsic[0, 3] = -dx / float(self.opt.loadSize // 2)
- trans_intrinsic[1, 3] = -dy / float(self.opt.loadSize // 2)
-
- x1 = int(round((w - tw) / 2.)) + dx
- y1 = int(round((h - th) / 2.)) + dy
-
- render = render.crop((x1, y1, x1 + tw, y1 + th))
- mask = mask.crop((x1, y1, x1 + tw, y1 + th))
-
- render = self.aug_trans(render)
-
- # random blur
- if self.opt.aug_blur > 0.00001:
- blur = GaussianBlur(np.random.uniform(0, self.opt.aug_blur))
- render = render.filter(blur)
-
- intrinsic = np.matmul(trans_intrinsic, np.matmul(uv_intrinsic, scale_intrinsic))
- calib = torch.Tensor(np.matmul(intrinsic, extrinsic)).float()
- extrinsic = torch.Tensor(extrinsic).float()
-
- mask = transforms.Resize(self.load_size)(mask)
- mask = transforms.ToTensor()(mask).float()
- mask_list.append(mask)
-
- render = self.to_tensor(render)
- render = mask.expand_as(render) * render
-
- render_list.append(render)
- calib_list.append(calib)
- extrinsic_list.append(extrinsic)
-
- return {
- 'img': torch.stack(render_list, dim=0),
- 'calib': torch.stack(calib_list, dim=0),
- 'extrinsic': torch.stack(extrinsic_list, dim=0),
- 'mask': torch.stack(mask_list, dim=0)
- }
-
- def select_sampling_method(self, subject):
- if not self.is_train:
- random.seed(1991)
- np.random.seed(1991)
- torch.manual_seed(1991)
- mesh = self.mesh_dic[subject]
- surface_points, _ = trimesh.sample.sample_surface(mesh, 4 * self.num_sample_inout)
- sample_points = surface_points + np.random.normal(scale=self.opt.sigma, size=surface_points.shape)
-
- # add random points within image space
- length = self.B_MAX - self.B_MIN
- random_points = np.random.rand(self.num_sample_inout // 4, 3) * length + self.B_MIN
- sample_points = np.concatenate([sample_points, random_points], 0)
- np.random.shuffle(sample_points)
-
- inside = mesh.contains(sample_points)
- inside_points = sample_points[inside]
- outside_points = sample_points[np.logical_not(inside)]
-
- nin = inside_points.shape[0]
- inside_points = inside_points[
- :self.num_sample_inout // 2] if nin > self.num_sample_inout // 2 else inside_points
- outside_points = outside_points[
- :self.num_sample_inout // 2] if nin > self.num_sample_inout // 2 else outside_points[
- :(self.num_sample_inout - nin)]
-
- samples = np.concatenate([inside_points, outside_points], 0).T
- labels = np.concatenate([np.ones((1, inside_points.shape[0])), np.zeros((1, outside_points.shape[0]))], 1)
-
- # save_samples_truncted_prob('out.ply', samples.T, labels.T)
- # exit()
-
- samples = torch.Tensor(samples).float()
- labels = torch.Tensor(labels).float()
-
- del mesh
-
- return {
- 'samples': samples,
- 'labels': labels
- }
-
-
- def get_color_sampling(self, subject, yid, pid=0):
- yaw = self.yaw_list[yid]
- pitch = self.pitch_list[pid]
- uv_render_path = os.path.join(self.UV_RENDER, subject, '%d_%d_%02d.jpg' % (yaw, pitch, 0))
- uv_mask_path = os.path.join(self.UV_MASK, subject, '%02d.png' % (0))
- uv_pos_path = os.path.join(self.UV_POS, subject, '%02d.exr' % (0))
- uv_normal_path = os.path.join(self.UV_NORMAL, subject, '%02d.png' % (0))
-
- # Segmentation mask for the uv render.
- # [H, W] bool
- uv_mask = cv2.imread(uv_mask_path)
- uv_mask = uv_mask[:, :, 0] != 0
- # UV render. each pixel is the color of the point.
- # [H, W, 3] 0 ~ 1 float
- uv_render = cv2.imread(uv_render_path)
- uv_render = cv2.cvtColor(uv_render, cv2.COLOR_BGR2RGB) / 255.0
-
- # Normal render. each pixel is the surface normal of the point.
- # [H, W, 3] -1 ~ 1 float
- uv_normal = cv2.imread(uv_normal_path)
- uv_normal = cv2.cvtColor(uv_normal, cv2.COLOR_BGR2RGB) / 255.0
- uv_normal = 2.0 * uv_normal - 1.0
- # Position render. each pixel is the xyz coordinates of the point
- uv_pos = cv2.imread(uv_pos_path, 2 | 4)[:, :, ::-1]
-
- ### In these few lines we flattern the masks, positions, and normals
- uv_mask = uv_mask.reshape((-1))
- uv_pos = uv_pos.reshape((-1, 3))
- uv_render = uv_render.reshape((-1, 3))
- uv_normal = uv_normal.reshape((-1, 3))
-
- surface_points = uv_pos[uv_mask]
- surface_colors = uv_render[uv_mask]
- surface_normal = uv_normal[uv_mask]
-
- if self.num_sample_color:
- sample_list = random.sample(range(0, surface_points.shape[0] - 1), self.num_sample_color)
- surface_points = surface_points[sample_list].T
- surface_colors = surface_colors[sample_list].T
- surface_normal = surface_normal[sample_list].T
-
- # Samples are around the true surface with an offset
- normal = torch.Tensor(surface_normal).float()
- samples = torch.Tensor(surface_points).float() \
- + torch.normal(mean=torch.zeros((1, normal.size(1))), std=self.opt.sigma).expand_as(normal) * normal
-
- # Normalized to [-1, 1]
- rgbs_color = 2.0 * torch.Tensor(surface_colors).float() - 1.0
-
- return {
- 'color_samples': samples,
- 'rgbs': rgbs_color
- }
-
- def get_item(self, index):
- # In case of a missing file or IO error, switch to a random sample instead
- # try:
- sid = index % len(self.subjects)
- tmp = index // len(self.subjects)
- yid = tmp % len(self.yaw_list)
- pid = tmp // len(self.yaw_list)
-
- # name of the subject 'rp_xxxx_xxx'
- subject = self.subjects[sid]
- res = {
- 'name': subject,
- 'mesh_path': os.path.join(self.OBJ, subject + '.obj'),
- 'sid': sid,
- 'yid': yid,
- 'pid': pid,
- 'b_min': self.B_MIN,
- 'b_max': self.B_MAX,
- }
- render_data = self.get_render(subject, num_views=self.num_views, yid=yid, pid=pid,
- random_sample=self.opt.random_multiview)
- res.update(render_data)
-
- if self.opt.num_sample_inout:
- sample_data = self.select_sampling_method(subject)
- res.update(sample_data)
-
- # img = np.uint8((np.transpose(render_data['img'][0].numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0)
- # rot = render_data['calib'][0,:3, :3]
- # trans = render_data['calib'][0,:3, 3:4]
- # pts = torch.addmm(trans, rot, sample_data['samples'][:, sample_data['labels'][0] > 0.5]) # [3, N]
- # pts = 0.5 * (pts.numpy().T + 1.0) * render_data['img'].size(2)
- # for p in pts:
- # img = cv2.circle(img, (p[0], p[1]), 2, (0,255,0), -1)
- # cv2.imshow('test', img)
- # cv2.waitKey(1)
-
- if self.num_sample_color:
- color_data = self.get_color_sampling(subject, yid=yid, pid=pid)
- res.update(color_data)
- return res
- # except Exception as e:
- # print(e)
- # return self.get_item(index=random.randint(0, self.__len__() - 1))
-
- def __getitem__(self, index):
- return self.get_item(index)
\ No newline at end of file
diff --git a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/latent_diffusion/util.py b/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/latent_diffusion/util.py
deleted file mode 100644
index 8b289f6aa7f22a070870d8a706f944dc8547e936..0000000000000000000000000000000000000000
--- a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/latent_diffusion/util.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# adopted from
-# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
-# and
-# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-# and
-# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
-#
-# thanks!
-
-
-import os
-import math
-import torch
-import torch.nn as nn
-import numpy as np
-from einops import repeat
-
-from audioldm.utils import instantiate_from_config
-
-
-def make_beta_schedule(
- schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
-):
- if schedule == "linear":
- betas = (
- torch.linspace(
- linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
- )
- ** 2
- )
-
- elif schedule == "cosine":
- timesteps = (
- torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
- )
- alphas = timesteps / (1 + cosine_s) * np.pi / 2
- alphas = torch.cos(alphas).pow(2)
- alphas = alphas / alphas[0]
- betas = 1 - alphas[1:] / alphas[:-1]
- betas = np.clip(betas, a_min=0, a_max=0.999)
-
- elif schedule == "sqrt_linear":
- betas = torch.linspace(
- linear_start, linear_end, n_timestep, dtype=torch.float64
- )
- elif schedule == "sqrt":
- betas = (
- torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
- ** 0.5
- )
- else:
- raise ValueError(f"schedule '{schedule}' unknown.")
- return betas.numpy()
-
-
-def make_ddim_timesteps(
- ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
-):
- if ddim_discr_method == "uniform":
- c = num_ddpm_timesteps // num_ddim_timesteps
- ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
- elif ddim_discr_method == "quad":
- ddim_timesteps = (
- (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
- ).astype(int)
- else:
- raise NotImplementedError(
- f'There is no ddim discretization method called "{ddim_discr_method}"'
- )
-
- # assert ddim_timesteps.shape[0] == num_ddim_timesteps
- # add one to get the final alpha values right (the ones from first scale to data during sampling)
- steps_out = ddim_timesteps + 1
- if verbose:
- print(f"Selected timesteps for ddim sampler: {steps_out}")
- return steps_out
-
-
-def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
- # select alphas for computing the variance schedule
- alphas = alphacums[ddim_timesteps]
- alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
-
- # according the the formula provided in https://arxiv.org/abs/2010.02502
- sigmas = eta * np.sqrt(
- (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
- )
- if verbose:
- print(
- f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
- )
- print(
- f"For the chosen value of eta, which is {eta}, "
- f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
- )
- return sigmas, alphas, alphas_prev
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-def extract_into_tensor(a, t, x_shape):
- b, *_ = t.shape
- out = a.gather(-1, t).contiguous()
- return out.reshape(b, *((1,) * (len(x_shape) - 1))).contiguous()
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(torch.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
-
- with torch.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with torch.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = torch.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
-
-
-def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
- """
- Create sinusoidal timestep embeddings.
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- if not repeat_only:
- half = dim // 2
- freqs = torch.exp(
- -math.log(max_period)
- * torch.arange(start=0, end=half, dtype=torch.float32)
- / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
- if dim % 2:
- embedding = torch.cat(
- [embedding, torch.zeros_like(embedding[:, :1])], dim=-1
- )
- else:
- embedding = repeat(timesteps, "b -> b d", d=dim)
- return embedding
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
-
-
-def normalization(channels):
- """
- Make a standard normalization layer.
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(32, channels)
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * torch.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
-
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-class HybridConditioner(nn.Module):
- def __init__(self, c_concat_config, c_crossattn_config):
- super().__init__()
- self.concat_conditioner = instantiate_from_config(c_concat_config)
- self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
-
- def forward(self, c_concat, c_crossattn):
- c_concat = self.concat_conditioner(c_concat)
- c_crossattn = self.crossattn_conditioner(c_crossattn)
- return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]}
-
-
-def noise_like(shape, device, repeat=False):
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
- shape[0], *((1,) * (len(shape) - 1))
- )
- noise = lambda: torch.randn(shape, device=device)
- return repeat_noise() if repeat else noise()
diff --git a/spaces/dev-andres/Caracola-app/models/model.py b/spaces/dev-andres/Caracola-app/models/model.py
deleted file mode 100644
index 0f5186d34a414585e5be03cb2a8235c646f43663..0000000000000000000000000000000000000000
--- a/spaces/dev-andres/Caracola-app/models/model.py
+++ /dev/null
@@ -1,78 +0,0 @@
-def sim_jac(s1, s2):
-
- bigrams_s1 = []
- bigrams_s2 = []
-
- for i in range(len(s1) - 1):
- bigrams_s1.append(s1[i:i+2])
-
- for i in range(len(s2) - 1):
- bigrams_s2.append(s2[i:i+2])
-
- c_common = 0
-
- for i in bigrams_s1:
- if bigrams_s2.count(i) > 0:
- c_common += 1
-
- return c_common / ((len(s1) - 1) + (len(s2) - 1) - c_common)
-
-def encontrar_palabras(transcript,cjto_palabras):
- '''
- Toma un string (en minúsculas) y un conjunto de palabras. Busca el primer match
- de cjto_palabras en transcript y particiona el string en:
- 1. El slice de la cadena antes del primer match (antes_palabra)
- 2. La cadena del primer match (coincidencia de cjto_palabras)
- 3. El slice de la cadena después del match (despues_palabra)
- '''
- inicio,final=list(re.finditer(r'|'.join(cjto_palabras),transcript))[0].span()
- antes_palabra=transcript[:inicio].strip()
- despues_palabra=transcript[final:].strip()
- palabra=transcript[inicio:final]
- return antes_palabra,palabra,despues_palabra
-
-
-def agregar_adentro(codigo, transcipcion):
- codigo2 = main(transcipcion)
-
- return codigo[:-1] + codigo2
-
-
-import numpy as np
-
-def main(instruccion):
- global bloque
-
- plantillas = [
- crear_funcion,
- crear_condicional,
- crear_condicional,
- asignar_variable,
- crear_variable,
- crear_llamada,
- crear_for,
- fin_de_bloque,
- crear_comentario,
- crear_regresa
- ]
-
- comandos = [set(['definir', 'funcion', 'parametros']),
- set(['mientras']),
- set(['si']), # si se cumple / mientras se cumpla
- set(['asignar', 'con']),
- set(['definir', 'variable']),
- set(['ejecuta', 'argumentos']),
- set(['para', 'rango']),
- set(['terminar','bloque']),
- set(['comentario']),
- set(['regresa'])
-
- ]
-
- J = []
- for comando in comandos:
- J.append(len(set(instruccion.strip().split(' ')).intersection(comando)) / len(set(instruccion.strip().split(' ')).union(comando)))
- # print(J,np.argmax(J))
- pos_func=np.argmax(J)
- # print(pos_func)
- return plantillas[pos_func](instruccion)
\ No newline at end of file
diff --git a/spaces/dfhgfh/bingAI/Dockerfile b/spaces/dfhgfh/bingAI/Dockerfile
deleted file mode 100644
index b42e89f013f8d23d04673eb2ab69e30d0dd45e8d..0000000000000000000000000000000000000000
--- a/spaces/dfhgfh/bingAI/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# Build Stage
-# 使用 golang:alpine 作为构建阶段的基础镜像
-FROM golang:alpine AS builder
-
-# 添加 git,以便之后能从GitHub克隆项目
-RUN apk --no-cache add git
-
-# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
-RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
-
-# 设置工作目录为之前克隆的项目目录
-WORKDIR /workspace/app
-
-# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
-RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
-
-# Runtime Stage
-# 使用轻量级的 alpine 镜像作为运行时的基础镜像
-FROM alpine
-
-# 设置工作目录
-WORKDIR /workspace/app
-
-# 从构建阶段复制编译后的二进制文件到运行时镜像中
-COPY --from=builder /workspace/app/go-proxy-bingai .
-
-# 设置环境变量,此处为随机字符
-ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX9rG6bE3fZ4iO"
-
-# 暴露8080端口
-EXPOSE 8080
-
-# 容器启动时运行的命令
-CMD ["/workspace/app/go-proxy-bingai"]
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Libro El Secreto De Selena Descargar Gratis Pdf.md b/spaces/diacanFperku/AutoGPT/Libro El Secreto De Selena Descargar Gratis Pdf.md
deleted file mode 100644
index 9d23c3a912af112ab21fc74a8ac2fe29b3a0aa80..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Libro El Secreto De Selena Descargar Gratis Pdf.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Libro El Secreto De Selena Descargar Gratis Pdf
Download File ►►► https://gohhs.com/2uFUua
-
-Apr 28, 2564 BE — Tag# Libro Pdf Español DescargarLibro Pdf Español Descargar,descargar ... en pdf gratis para leer,libros en pdf para descargar,libros epub ... libro pdf, libro pdf fotos libro pdf pics libro pdf pagina libro pdf fotos libro pdf pagina d ... libro pdf, libro pdf fotos libro pdf pagina libro pdf
-pag 8a78ff9644
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Risk 2 Full Version Crack _VERIFIED_.md b/spaces/diacanFperku/AutoGPT/Risk 2 Full Version Crack _VERIFIED_.md
deleted file mode 100644
index 46efb5aa1fcb1ce85f4b1cb8465bf1b7719c5e6f..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Risk 2 Full Version Crack _VERIFIED_.md
+++ /dev/null
@@ -1,6 +0,0 @@
-risk 2 full version crack
DOWNLOAD ——— https://gohhs.com/2uFT3a
-
-(1) Public Windows 10 Keys. â—‡2. Crack Windows Activation via KMS. (1) What is ... Part 5. Disclaimer: Legal Risk in Windows Crack. Part 6. 1fdad05405
-
-
-
diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/indexing/__init__.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/indexing/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/diaoren/OpenSetObstacleDetection/opendet2/data/__init__.py b/spaces/diaoren/OpenSetObstacleDetection/opendet2/data/__init__.py
deleted file mode 100644
index e11521eda749775fc2b8f4e187769a919cdbd9fd..0000000000000000000000000000000000000000
--- a/spaces/diaoren/OpenSetObstacleDetection/opendet2/data/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .build import *
-from . import builtin
-from . import mybuiltin
-
-__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/train_ms.py b/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/train_ms.py
deleted file mode 100644
index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2/train_ms.py
+++ /dev/null
@@ -1,402 +0,0 @@
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-import shutil
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-from tqdm import tqdm
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-import commons
-import utils
-from data_utils import (
- TextAudioSpeakerLoader,
- TextAudioSpeakerCollate,
- DistributedBucketSampler
-)
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
- DurationDiscriminator,
-)
-from losses import (
- generator_loss,
- discriminator_loss,
- feature_loss,
- kl_loss
-)
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-from text.symbols import symbols
-
-torch.backends.cudnn.benchmark = True
-torch.backends.cuda.matmul.allow_tf32 = True
-torch.backends.cudnn.allow_tf32 = True
-torch.set_float32_matmul_precision('medium')
-global_step = 0
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '65280'
-
- hps = utils.get_hparams()
- if not hps.cont:
- shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth')
- shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth')
- shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth')
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
-
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
- train_sampler = DistributedBucketSampler(
- train_dataset,
- hps.train.batch_size,
- [32, 300, 400, 500, 600, 700, 800, 900, 1000],
- num_replicas=n_gpus,
- rank=rank,
- shuffle=True)
- collate_fn = TextAudioSpeakerCollate()
- train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True,
- collate_fn=collate_fn, batch_sampler=train_sampler)
- if rank == 0:
- eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
- eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
- batch_size=1, pin_memory=True,
- drop_last=False, collate_fn=collate_fn)
- if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True:
- print("Using noise scaled MAS for VITS2")
- use_noise_scaled_mas = True
- mas_noise_scale_initial = 0.01
- noise_scale_delta = 2e-6
- else:
- print("Using normal MAS for VITS1")
- use_noise_scaled_mas = False
- mas_noise_scale_initial = 0.0
- noise_scale_delta = 0.0
- if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True:
- print("Using duration discriminator for VITS2")
- use_duration_discriminator = True
- net_dur_disc = DurationDiscriminator(
- hps.model.hidden_channels,
- hps.model.hidden_channels,
- 3,
- 0.1,
- gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0,
- ).cuda(rank)
- if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True:
- if hps.data.n_speakers == 0:
- raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model")
- use_spk_conditioned_encoder = True
- else:
- print("Using normal encoder for VITS1")
- use_spk_conditioned_encoder = False
-
- net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- mas_noise_scale_initial = mas_noise_scale_initial,
- noise_scale_delta = noise_scale_delta,
- **hps.model).cuda(rank)
-
- freeze_enc = getattr(hps.model, "freeze_enc", False)
- if freeze_enc:
- print("freeze encoder !!!")
- for param in net_g.enc_p.parameters():
- param.requires_grad = False
-
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- filter(lambda p: p.requires_grad, net_g.parameters()),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- if net_dur_disc is not None:
- optim_dur_disc = torch.optim.AdamW(
- net_dur_disc.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- else:
- optim_dur_disc = None
- net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
- net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
- if net_dur_disc is not None:
- net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True)
-
- pretrain_dir = None
- if pretrain_dir is None:
- try:
- if net_dur_disc is not None:
- _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont)
- _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
- optim_g, skip_optimizer=not hps.cont)
- _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
- optim_d, skip_optimizer=not hps.cont)
-
- epoch_str = max(epoch_str, 1)
- global_step = (epoch_str - 1) * len(train_loader)
- except Exception as e:
- print(e)
- epoch_str = 1
- global_step = 0
- else:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g,
- optim_g, True)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d,
- optim_d, True)
-
-
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- if net_dur_disc is not None:
- scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
- else:
- scheduler_dur_disc = None
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank == 0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
- if net_dur_disc is not None:
- scheduler_dur_disc.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d, net_dur_disc = nets
- optim_g, optim_d, optim_dur_disc = optims
- scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- if net_dur_disc is not None:
- net_dur_disc.train()
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)):
- if net_g.module.use_noise_scaled_mas:
- current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step
- net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
- speakers = speakers.cuda(rank, non_blocking=True)
- tone = tone.cuda(rank, non_blocking=True)
- language = language.cuda(rank, non_blocking=True)
- bert = bert.cuda(rank, non_blocking=True)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, l_length, attn, ids_slice, x_mask, z_mask, \
- (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
- if net_dur_disc is not None:
- y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach())
- with autocast(enabled=False):
- # TODO: I think need to mean using the mask, but for now, just mean all
- loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
- loss_dur_disc_all = loss_dur_disc
- optim_dur_disc.zero_grad()
- scaler.scale(loss_dur_disc_all).backward()
- scaler.unscale_(optim_dur_disc)
- grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None)
- scaler.step(optim_dur_disc)
-
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- if net_dur_disc is not None:
- y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
- with autocast(enabled=False):
- loss_dur = torch.sum(l_length.float())
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
-
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
- if net_dur_disc is not None:
- loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g)
- loss_gen_all += loss_dur_gen
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank == 0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
- "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update(
- {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
-
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy())
- }
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict)
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
- if net_dur_disc is not None:
- utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step)))
- keep_ckpts = getattr(hps.train, 'keep_ckpts', 5)
- if keep_ckpts > 0:
- utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
-
-
- global_step += 1
-
- if rank == 0:
- logger.info('====> Epoch: {}'.format(epoch))
-
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- image_dict = {}
- audio_dict = {}
- print("Evaluating ...")
- with torch.no_grad():
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader):
- x, x_lengths = x.cuda(), x_lengths.cuda()
- spec, spec_lengths = spec.cuda(), spec_lengths.cuda()
- y, y_lengths = y.cuda(), y_lengths.cuda()
- speakers = speakers.cuda()
- bert = bert.cuda()
- tone = tone.cuda()
- language = language.cuda()
- for use_sdp in [True, False]:
- y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0)
- y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- image_dict.update({
- f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
- })
- audio_dict.update({
- f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]]
- })
- image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
- audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]})
-
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/djgoettel/02-Gradio-Art-From-Text-And-Images/app.py b/spaces/djgoettel/02-Gradio-Art-From-Text-And-Images/app.py
deleted file mode 100644
index b0424ed426f78ea91b8307a7c74e4fbc35b7c749..0000000000000000000000000000000000000000
--- a/spaces/djgoettel/02-Gradio-Art-From-Text-And-Images/app.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import os
-
-os.system("git clone --recursive https://github.com/JD-P/cloob-latent-diffusion")
-os.system("cd cloob-latent-diffusion;pip install omegaconf pillow pytorch-lightning einops wandb ftfy regex ./CLIP")
-
-import argparse
-from functools import partial
-from pathlib import Path
-import sys
-sys.path.append('./cloob-latent-diffusion')
-sys.path.append('./cloob-latent-diffusion/cloob-training')
-sys.path.append('./cloob-latent-diffusion/latent-diffusion')
-sys.path.append('./cloob-latent-diffusion/taming-transformers')
-sys.path.append('./cloob-latent-diffusion/v-diffusion-pytorch')
-from omegaconf import OmegaConf
-from PIL import Image
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torchvision import transforms
-from torchvision.transforms import functional as TF
-from tqdm import trange
-from CLIP import clip
-from cloob_training import model_pt, pretrained
-import ldm.models.autoencoder
-from diffusion import sampling, utils
-import train_latent_diffusion as train
-from huggingface_hub import hf_hub_url, cached_download
-import random
-
-# Download the model files
-checkpoint = cached_download(hf_hub_url("huggan/distill-ccld-wa", filename="model_student.ckpt"))
-ae_model_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.ckpt"))
-ae_config_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.yaml"))
-
-# Define a few utility functions
-
-
-def parse_prompt(prompt, default_weight=3.):
- if prompt.startswith('http://') or prompt.startswith('https://'):
- vals = prompt.rsplit(':', 2)
- vals = [vals[0] + ':' + vals[1], *vals[2:]]
- else:
- vals = prompt.rsplit(':', 1)
- vals = vals + ['', default_weight][len(vals):]
- return vals[0], float(vals[1])
-
-
-def resize_and_center_crop(image, size):
- fac = max(size[0] / image.size[0], size[1] / image.size[1])
- image = image.resize((int(fac * image.size[0]), int(fac * image.size[1])), Image.LANCZOS)
- return TF.center_crop(image, size[::-1])
-
-
-# Load the models
-device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
-print('Using device:', device)
-print('loading models')
-
-# autoencoder
-ae_config = OmegaConf.load(ae_config_path)
-ae_model = ldm.models.autoencoder.AutoencoderKL(**ae_config.model.params)
-ae_model.eval().requires_grad_(False).to(device)
-ae_model.load_state_dict(torch.load(ae_model_path))
-n_ch, side_y, side_x = 4, 32, 32
-
-# diffusion model
-model = train.DiffusionModel(192, [1,1,2,2], autoencoder_scale=torch.tensor(4.3084))
-model.load_state_dict(torch.load(checkpoint, map_location='cpu'))
-model = model.to(device).eval().requires_grad_(False)
-
-# CLOOB
-cloob_config = pretrained.get_config('cloob_laion_400m_vit_b_16_16_epochs')
-cloob = model_pt.get_pt_model(cloob_config)
-checkpoint = pretrained.download_checkpoint(cloob_config)
-cloob.load_state_dict(model_pt.get_pt_params(cloob_config, checkpoint))
-cloob.eval().requires_grad_(False).to(device)
-
-
-# The key function: returns a list of n PIL images
-def generate(n=1, prompts=['a red circle'], images=[], seed=42, steps=15,
- method='plms', eta=None):
- zero_embed = torch.zeros([1, cloob.config['d_embed']], device=device)
- target_embeds, weights = [zero_embed], []
-
- for prompt in prompts:
- txt, weight = parse_prompt(prompt)
- target_embeds.append(cloob.text_encoder(cloob.tokenize(txt).to(device)).float())
- weights.append(weight)
-
- for prompt in images:
- path, weight = parse_prompt(prompt)
- img = Image.open(utils.fetch(path)).convert('RGB')
- clip_size = cloob.config['image_encoder']['image_size']
- img = resize_and_center_crop(img, (clip_size, clip_size))
- batch = TF.to_tensor(img)[None].to(device)
- embed = F.normalize(cloob.image_encoder(cloob.normalize(batch)).float(), dim=-1)
- target_embeds.append(embed)
- weights.append(weight)
-
- weights = torch.tensor([1 - sum(weights), *weights], device=device)
-
- torch.manual_seed(seed)
-
- def cfg_model_fn(x, t):
- n = x.shape[0]
- n_conds = len(target_embeds)
- x_in = x.repeat([n_conds, 1, 1, 1])
- t_in = t.repeat([n_conds])
- clip_embed_in = torch.cat([*target_embeds]).repeat_interleave(n, 0)
- vs = model(x_in, t_in, clip_embed_in).view([n_conds, n, *x.shape[1:]])
- v = vs.mul(weights[:, None, None, None, None]).sum(0)
- return v
-
- def run(x, steps):
- if method == 'ddpm':
- return sampling.sample(cfg_model_fn, x, steps, 1., {})
- if method == 'ddim':
- return sampling.sample(cfg_model_fn, x, steps, eta, {})
- if method == 'prk':
- return sampling.prk_sample(cfg_model_fn, x, steps, {})
- if method == 'plms':
- return sampling.plms_sample(cfg_model_fn, x, steps, {})
- if method == 'pie':
- return sampling.pie_sample(cfg_model_fn, x, steps, {})
- if method == 'plms2':
- return sampling.plms2_sample(cfg_model_fn, x, steps, {})
- assert False
-
- batch_size = n
- x = torch.randn([n, n_ch, side_y, side_x], device=device)
- t = torch.linspace(1, 0, steps + 1, device=device)[:-1]
- steps = utils.get_spliced_ddpm_cosine_schedule(t)
- pil_ims = []
- for i in trange(0, n, batch_size):
- cur_batch_size = min(n - i, batch_size)
- out_latents = run(x[i:i+cur_batch_size], steps)
- outs = ae_model.decode(out_latents * torch.tensor(2.55).to(device))
- for j, out in enumerate(outs):
- pil_ims.append(utils.to_pil_image(out))
-
- return pil_ims
-
-
-import gradio as gr
-
-def gen_ims(prompt, im_prompt=None, seed=None, n_steps=10, method='plms'):
- if seed == None :
- seed = random.randint(0, 10000)
- print( prompt, im_prompt, seed, n_steps)
- prompts = [prompt]
- im_prompts = []
- if im_prompt != None:
- im_prompts = [im_prompt]
- pil_ims = generate(n=1, prompts=prompts, images=im_prompts, seed=seed, steps=n_steps, method=method)
- return pil_ims[0]
-
-iface = gr.Interface(fn=gen_ims,
- inputs=[#gr.inputs.Slider(minimum=1, maximum=1, step=1, default=1,label="Number of images"),
- #gr.inputs.Slider(minimum=0, maximum=200, step=1, label='Random seed', default=0),
- gr.inputs.Textbox(label="Text prompt"),
- gr.inputs.Image(optional=True, label="Image prompt", type='filepath'),
- #gr.inputs.Slider(minimum=10, maximum=35, step=1, default=15,label="Number of steps")
- ],
- outputs=[gr.outputs.Image(type="pil", label="Generated Image")],
- examples=[
- ["Futurism, in the style of Wassily Kandinsky"],
- ["Art Nouveau, in the style of John Singer Sargent"],
- ["Surrealism, in the style of Edgar Degas"],
- ["Expressionism, in the style of Wassily Kandinsky"],
- ["Futurism, in the style of Egon Schiele"],
- ["Neoclassicism, in the style of Gustav Klimt"],
- ["Cubism, in the style of Gustav Klimt"],
- ["Op Art, in the style of Marc Chagall"],
- ["Romanticism, in the style of M.C. Escher"],
- ["Futurism, in the style of M.C. Escher"],
- ["Abstract Art, in the style of M.C. Escher"],
- ["Mannerism, in the style of Paul Klee"],
- ["Romanesque Art, in the style of Leonardo da Vinci"],
- ["High Renaissance, in the style of Rembrandt"],
- ["Magic Realism, in the style of Gustave Dore"],
- ["Realism, in the style of Jean-Michel Basquiat"],
- ["Art Nouveau, in the style of Paul Gauguin"],
- ["Avant-garde, in the style of Pierre-Auguste Renoir"],
- ["Baroque, in the style of Edward Hopper"],
- ["Post-Impressionism, in the style of Wassily Kandinsky"],
- ["Naturalism, in the style of Rene Magritte"],
- ["Constructivism, in the style of Paul Cezanne"],
- ["Abstract Expressionism, in the style of Henri Matisse"],
- ["Pop Art, in the style of Vincent van Gogh"],
- ["Futurism, in the style of Wassily Kandinsky"],
- ["Futurism, in the style of Zdzislaw Beksinski"],
- ['Surrealism, in the style of Salvador Dali'],
- ["Aaron Wacker, oil on canvas"],
- ["abstract"],
- ["landscape"],
- ["portrait"],
- ["sculpture"],
- ["genre painting"],
- ["installation"],
- ["photo"],
- ["figurative"],
- ["illustration"],
- ["still life"],
- ["history painting"],
- ["cityscape"],
- ["marina"],
- ["animal painting"],
- ["design"],
- ["calligraphy"],
- ["symbolic painting"],
- ["graffiti"],
- ["performance"],
- ["mythological painting"],
- ["battle painting"],
- ["self-portrait"],
- ["Impressionism, oil on canvas"]
- ],
- title='Art Generator and Style Mixer from 🧠 Cloob and 🎨 WikiArt - Visual Art Encyclopedia:',
- description="Trained on images from the [WikiArt](https://www.wikiart.org/) dataset, comprised of visual arts",
- article = 'Model used is: [model card](https://huggingface.co/huggan/distill-ccld-wa)..'
-
-)
-iface.launch(enable_queue=True) # , debug=True for colab debugging
diff --git a/spaces/doluvor/faster-whisper-webui/src/hooks/subTaskProgressListener.py b/spaces/doluvor/faster-whisper-webui/src/hooks/subTaskProgressListener.py
deleted file mode 100644
index 9a8eaa876fcd18032875d67535e0558494842c60..0000000000000000000000000000000000000000
--- a/spaces/doluvor/faster-whisper-webui/src/hooks/subTaskProgressListener.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from src.hooks.progressListener import ProgressListener
-
-from typing import Union
-
-class SubTaskProgressListener(ProgressListener):
- """
- A sub task listener that reports the progress of a sub task to a base task listener
- Parameters
- ----------
- base_task_listener : ProgressListener
- The base progress listener to accumulate overall progress in.
- base_task_total : float
- The maximum total progress that will be reported to the base progress listener.
- sub_task_start : float
- The starting progress of a sub task, in respect to the base progress listener.
- sub_task_total : float
- The total amount of progress a sub task will report to the base progress listener.
- """
- def __init__(
- self,
- base_task_listener: ProgressListener,
- base_task_total: float,
- sub_task_start: float,
- sub_task_total: float,
- ):
- self.base_task_listener = base_task_listener
- self.base_task_total = base_task_total
- self.sub_task_start = sub_task_start
- self.sub_task_total = sub_task_total
-
- def on_progress(self, current: Union[int, float], total: Union[int, float]):
- sub_task_progress_frac = current / total
- sub_task_progress = self.sub_task_start + self.sub_task_total * sub_task_progress_frac
- self.base_task_listener.on_progress(sub_task_progress, self.base_task_total)
-
- def on_finished(self):
- self.base_task_listener.on_progress(self.sub_task_start + self.sub_task_total, self.base_task_total)
\ No newline at end of file
diff --git a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/character_bias/script.py b/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/character_bias/script.py
deleted file mode 100644
index 614d9ce3c5283fdaf2d52eb2cc3e12c07d000457..0000000000000000000000000000000000000000
--- a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/character_bias/script.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import gradio as gr
-import os
-
-# get the current directory of the script
-current_dir = os.path.dirname(os.path.abspath(__file__))
-
-# check if the bias_options.txt file exists, if not, create it
-bias_file = os.path.join(current_dir, "bias_options.txt")
-if not os.path.isfile(bias_file):
- with open(bias_file, "w") as f:
- f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*")
-
-# read bias options from the text file
-with open(bias_file, "r") as f:
- bias_options = [line.strip() for line in f.readlines()]
-
-params = {
- "activate": True,
- "bias string": " *I am so happy*",
- "use custom string": False,
-}
-
-
-def input_modifier(string):
- """
- This function is applied to your text inputs before
- they are fed into the model.
- """
- return string
-
-
-def output_modifier(string):
- """
- This function is applied to the model outputs.
- """
- return string
-
-
-def bot_prefix_modifier(string):
- """
- This function is only applied in chat mode. It modifies
- the prefix text for the Bot and can be used to bias its
- behavior.
- """
- if params['activate']:
- if params['use custom string']:
- return f'{string} {params["custom string"].strip()} '
- else:
- return f'{string} {params["bias string"].strip()} '
- else:
- return string
-
-
-def ui():
- # Gradio elements
- activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
- dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
- use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown')
- custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above')
-
- # Event functions to update the parameters in the backend
- def update_bias_string(x):
- if x:
- params.update({"bias string": x})
- else:
- params.update({"bias string": dropdown_string.get()})
- return x
-
- def update_custom_string(x):
- params.update({"custom string": x})
-
- dropdown_string.change(update_bias_string, dropdown_string, None)
- custom_string.change(update_custom_string, custom_string, None)
- activate.change(lambda x: params.update({"activate": x}), activate, None)
- use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None)
-
- # Group elements together depending on the selected option
- def bias_string_group():
- if use_custom_string.value:
- return gr.Group([use_custom_string, custom_string])
- else:
- return dropdown_string
diff --git a/spaces/echarlaix/openvino-export/app.py b/spaces/echarlaix/openvino-export/app.py
deleted file mode 100644
index 883a686fa5c2997078430799ede040fa8e8b6947..0000000000000000000000000000000000000000
--- a/spaces/echarlaix/openvino-export/app.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import csv
-import os
-from datetime import datetime
-from typing import Optional, Union
-import gradio as gr
-from huggingface_hub import HfApi, Repository
-from export import convert
-
-
-DATASET_REPO_URL = "https://huggingface.co/datasets/optimum/exporters"
-DATA_FILENAME = "data.csv"
-DATA_FILE = os.path.join("openvino", DATA_FILENAME)
-HF_TOKEN = os.environ.get("HF_WRITE_TOKEN")
-DATA_DIR = "exporters_data"
-
-repo = None
-if HF_TOKEN:
- repo = Repository(local_dir=DATA_DIR, clone_from=DATASET_REPO_URL, token=HF_TOKEN)
-
-
-def export(token: str, model_id: str, task: str) -> str:
- if token == "" or model_id == "":
- return """
- ### Invalid input 🐞
- Please fill a token and model name.
- """
- try:
- api = HfApi(token=token)
-
- error, commit_info = convert(api=api, model_id=model_id, task=task, force=False)
- if error != "0":
- return error
-
- print("[commit_info]", commit_info)
-
- # save in a private dataset
- if repo is not None:
- repo.git_pull(rebase=True)
- with open(os.path.join(DATA_DIR, DATA_FILE), "a") as csvfile:
- writer = csv.DictWriter(csvfile, fieldnames=["model_id", "pr_url", "time"])
- writer.writerow(
- {
- "model_id": model_id,
- "pr_url": commit_info.pr_url,
- "time": str(datetime.now()),
- }
- )
- commit_url = repo.push_to_hub()
- print("[dataset]", commit_url)
-
- return f"#### Success 🔥 Yay! This model was successfully exported and a PR was open using your token, here: [{commit_info.pr_url}]({commit_info.pr_url})"
- except Exception as e:
- return f"#### Error: {e}"
-
-
-TTILE_IMAGE = """
-
-
-
-"""
-
-TITLE = """
-
-
- Export your Transformers and Diffusers model to OpenVINO with 🤗 Optimum Intel (experimental)
-
-
-"""
-
-DESCRIPTION = """
-This Space allows you to automatically export to the OpenVINO format various 🤗 Transformers and Diffusers PyTorch models hosted on the Hugging Face Hub.
-
-Once exported, you will be able to load the resulting model using the [🤗 Optimum Intel](https://huggingface.co/docs/optimum/intel/inference).
-
-To export your model, the steps are as following:
-- Paste a read-access token from [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens). Read access is enough given that we will open a PR against the source repo.
-- Input a model id from the Hub (for example: [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english))
-- Click "Export"
-- That’s it! You’ll get feedback if it works or not, and if it worked, you’ll get the URL of the opened PR 🔥
-"""
-
-with gr.Blocks() as demo:
- gr.HTML(TTILE_IMAGE)
- gr.HTML(TITLE)
-
- with gr.Row():
- with gr.Column(scale=50):
- gr.Markdown(DESCRIPTION)
-
- with gr.Column(scale=50):
- input_token = gr.Textbox(
- max_lines=1,
- label="Hugging Face token",
- )
- input_model = gr.Textbox(
- max_lines=1,
- label="Model name",
- placeholder="distilbert-base-uncased-finetuned-sst-2-english",
- )
- input_task = gr.Textbox(
- value="auto",
- max_lines=1,
- label='Task (can be left to "auto", will be automatically inferred)',
- )
-
- btn = gr.Button("Export")
- output = gr.Markdown(label="Output")
-
- btn.click(
- fn=export,
- inputs=[input_token, input_model, input_task],
- outputs=output,
- )
-
-
-demo.launch()
diff --git a/spaces/ennet/ChatDev/chatdev/codes.py b/spaces/ennet/ChatDev/chatdev/codes.py
deleted file mode 100644
index 56cad543dc21a247ac7a7a3b8dedd160a91deb73..0000000000000000000000000000000000000000
--- a/spaces/ennet/ChatDev/chatdev/codes.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import os
-import re
-
-from chatdev.utils import log_and_print_online
-import difflib
-
-class Codes:
- def __init__(self, generated_content=""):
- self.directory: str = None
- self.version: float = 1.0
- self.generated_content: str = generated_content
- self.codebooks = {}
-
- def extract_filename_from_line(lines):
- file_name = ""
- for candidate in re.finditer(r"(\w+\.\w+)", lines, re.DOTALL):
- file_name = candidate.group()
- file_name = file_name.lower()
- return file_name
-
- def extract_filename_from_code(code):
- file_name = ""
- regex_extract = r"class (\S+?):\n"
- matches_extract = re.finditer(regex_extract, code, re.DOTALL)
- for match_extract in matches_extract:
- file_name = match_extract.group(1)
- file_name = file_name.lower().split("(")[0] + ".py"
- return file_name
-
- if generated_content != "":
- regex = r"(.+?)\n```.*?\n(.*?)```"
- matches = re.finditer(regex, self.generated_content, re.DOTALL)
- for match in matches:
- code = match.group(2)
- if "CODE" in code:
- continue
- group1 = match.group(1)
- filename = extract_filename_from_line(group1)
- if "__main__" in code:
- filename = "main.py"
- if filename == "": # post-processing
- filename = extract_filename_from_code(code)
- assert filename != ""
- if filename is not None and code is not None and len(filename) > 0 and len(code) > 0:
- self.codebooks[filename] = self._format_code(code)
-
- def _format_code(self, code):
- code = "\n".join([line for line in code.split("\n") if len(line.strip()) > 0])
- return code
-
- def _update_codes(self, generated_content):
- new_codes = Codes(generated_content)
- differ = difflib.Differ()
- for key in new_codes.codebooks.keys():
- if key not in self.codebooks.keys() or self.codebooks[key] != new_codes.codebooks[key]:
- update_codes_content = "**[Update Codes]**\n\n"
- update_codes_content += "{} updated.\n".format(key)
- old_codes_content = self.codebooks[key] if key in self.codebooks.keys() else "# None"
- new_codes_content = new_codes.codebooks[key]
-
- lines_old = old_codes_content.splitlines()
- lines_new = new_codes_content.splitlines()
-
- unified_diff = difflib.unified_diff(lines_old, lines_new, lineterm='', fromfile='Old', tofile='New')
- unified_diff = '\n'.join(unified_diff)
- update_codes_content = update_codes_content + "\n\n" + """```
-'''
-
-'''\n""" + unified_diff + "\n```"
-
- log_and_print_online(update_codes_content)
- self.codebooks[key] = new_codes.codebooks[key]
-
- def _rewrite_codes(self, git_management) -> None:
- directory = self.directory
- rewrite_codes_content = "**[Rewrite Codes]**\n\n"
- if os.path.exists(directory) and len(os.listdir(directory)) > 0:
- self.version += 1.0
- if not os.path.exists(directory):
- os.mkdir(self.directory)
- rewrite_codes_content += "{} Created\n".format(directory)
-
- for filename in self.codebooks.keys():
- filepath = os.path.join(directory, filename)
- with open(filepath, "w", encoding="utf-8") as writer:
- writer.write(self.codebooks[filename])
- rewrite_codes_content += os.path.join(directory, filename) + " Wrote\n"
-
- if git_management:
- if self.version == 1.0:
- os.system("cd {}; git init".format(self.directory))
- os.system("cd {}; git add .".format(self.directory))
- os.system("cd {}; git commit -m \"{}\"".format(self.directory, self.version))
-
- log_and_print_online(rewrite_codes_content)
-
- def _get_codes(self) -> str:
- content = ""
- for filename in self.codebooks.keys():
- content += "{}\n```{}\n{}\n```\n\n".format(filename,
- "python" if filename.endswith(".py") else filename.split(".")[
- -1], self.codebooks[filename])
- return content
-
- def _load_from_hardware(self, directory) -> None:
- assert len([filename for filename in os.listdir(directory) if filename.endswith(".py")]) > 0
- for root, directories, filenames in os.walk(directory):
- for filename in filenames:
- if filename.endswith(".py"):
- code = open(os.path.join(directory, filename), "r", encoding="utf-8").read()
- self.codebooks[filename] = self._format_code(code)
- log_and_print_online("{} files read from {}".format(len(self.codebooks.keys()), directory))
diff --git a/spaces/epexVfeibi/Imagedeblurr/ALi Universal-Fixer V1.4b.rarl _TOP_.md b/spaces/epexVfeibi/Imagedeblurr/ALi Universal-Fixer V1.4b.rarl _TOP_.md
deleted file mode 100644
index 4c38968e82e55a0c9ebe3b83a094caf33105bcb7..0000000000000000000000000000000000000000
--- a/spaces/epexVfeibi/Imagedeblurr/ALi Universal-Fixer V1.4b.rarl _TOP_.md
+++ /dev/null
@@ -1,6 +0,0 @@
-ALi Universal-Fixer V1.4b.rarl
Download Zip ––– https://jinyurl.com/2uErvT
-
-I'd like to now move to the events of -- in Knin on 4 ... that the real cleansing ... v i llages of Kistanj e and Dj evrske which wer e vi r tually empty . ... mogu da se izjasnim, ali je zapaljeno vozilo onernoguéilo kolonu da se putem ... need for, but here, a guy got killed while fixing the power-transmission line just. 4d29de3e1b
-
-
-
diff --git a/spaces/ericjuliantooo/paraphrase/README.md b/spaces/ericjuliantooo/paraphrase/README.md
deleted file mode 100644
index 8da9fc4706c8a1e7204e89bcffb23df1c74b61ff..0000000000000000000000000000000000000000
--- a/spaces/ericjuliantooo/paraphrase/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Paraphrase
-emoji: 🦀
-colorFrom: red
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.2.0
-app_file: app.py
-pinned: false
-python_version: 3.9.5
-duplicated_from: aryadytm/paraphrase
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/ewave/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md b/spaces/ewave/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md
deleted file mode 100644
index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000
--- a/spaces/ewave/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image Animation Using Thin Plate Spline Motion Model
-emoji: 👁
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.0.19
-app_file: app.py
-pinned: false
-duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/facebook/Hokkien_Demo_on_GPU/README.md b/spaces/facebook/Hokkien_Demo_on_GPU/README.md
deleted file mode 100644
index b8c0c64468d05efefb50c466276f0c1455e58f74..0000000000000000000000000000000000000000
--- a/spaces/facebook/Hokkien_Demo_on_GPU/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Hokkien Demo With Spaces Gpu
-emoji: 🎙️
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.14.0
-app_file: app.py
-pinned: false
-license: cc-by-nc-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/facebook/ov-seg/open_vocab_seg/data/datasets/register_voc_seg.py b/spaces/facebook/ov-seg/open_vocab_seg/data/datasets/register_voc_seg.py
deleted file mode 100644
index b8c2be16f4bb5348de8f1051f3579e02e362488f..0000000000000000000000000000000000000000
--- a/spaces/facebook/ov-seg/open_vocab_seg/data/datasets/register_voc_seg.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import os
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.data.datasets import load_sem_seg
-
-PASCALVOC20_NAMES = (
- "aeroplane",
- "bicycle",
- "bird",
- "boat",
- "bottle",
- "bus",
- "car",
- "cat",
- "chair",
- "cow",
- "diningtable",
- "dog",
- "horse",
- "motorbike",
- "person",
- "pottedplant",
- "sheep",
- "sofa",
- "train",
- "tvmonitor",
-)
-
-def _get_voc_meta(cat_list):
- ret = {
- "stuff_classes": cat_list,
- }
- return ret
-
-
-def register_pascalvoc(root):
- root = os.path.join(root, "VOCdevkit/VOC2012")
- meta = _get_voc_meta(PASCALVOC20_NAMES)
-
- for name, image_dirname, sem_seg_dirname in [
- ("val", "JPEGImages", "annotations_detectron2/val"),
- ]:
- image_dir = os.path.join(root, image_dirname)
- gt_dir = os.path.join(root, sem_seg_dirname)
- all_name = f"pascalvoc20_sem_seg_{name}"
- DatasetCatalog.register(
- all_name,
- lambda x=image_dir, y=gt_dir: load_sem_seg(
- y, x, gt_ext="png", image_ext="jpg"
- ),
- )
- MetadataCatalog.get(all_name).set(
- image_root=image_dir,
- sem_seg_root=gt_dir,
- evaluator_type="sem_seg",
- ignore_label=255,
- **meta,
- )
-
-_root = os.getenv("DETECTRON2_DATASETS", "datasets")
-register_pascalvoc(_root)
diff --git a/spaces/fastx/customer-support-chatbot/README.md b/spaces/fastx/customer-support-chatbot/README.md
deleted file mode 100644
index 7440803d0ef37dae082f64f0cb6ffd389c4eaf00..0000000000000000000000000000000000000000
--- a/spaces/fastx/customer-support-chatbot/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Customer Support Chatbot
-emoji: 🐢
-colorFrom: pink
-colorTo: purple
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/fatiXbelha/sd/Download and Install GTA 5 - Grand Theft Auto APK on Your PC or Mac.md b/spaces/fatiXbelha/sd/Download and Install GTA 5 - Grand Theft Auto APK on Your PC or Mac.md
deleted file mode 100644
index a2b706ced99db7b22826f90f4c72c4dd935b1d63..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download and Install GTA 5 - Grand Theft Auto APK on Your PC or Mac.md
+++ /dev/null
@@ -1,148 +0,0 @@
-
-GTA 5 Download APK PC Windows 10: How to Play Grand Theft Auto V on Your Computer
- Introduction
- Grand Theft Auto V, or GTA 5, is one of the most popular and critically acclaimed video games of all time. Released in 2013 by Rockstar Games, GTA 5 is an open-world action-adventure game that lets you explore the fictional city of Los Santos and its surrounding areas. You can play as one of three protagonists, each with their own story and personality, and switch between them at any time. You can also engage in various activities, such as missions, heists, races, minigames, and more.
- While GTA 5 was originally designed for consoles, such as PlayStation and Xbox, it is also possible to play it on your PC. In fact, playing GTA 5 on PC has many advantages, such as better graphics, smoother performance, more customization options, and access to mods. However, you may be wondering how to download GTA 5 APK for PC Windows 10. In this article, we will show you two methods to do so: using BlueStacks App Player or using Rockstar Games Launcher. Let's get started!
-gta 5 download apk pc windows 10
Download File > https://urllie.com/2uNFDf
- Method 1: Using BlueStacks App Player
- What is BlueStacks?
- BlueStacks is an app player that allows you to run Android games and apps on your PC or Mac. It has over 500 million users and supports over 2 million Android games and apps. With BlueStacks, you can enjoy your favorite Android titles on a larger screen and with better controls. You can also take advantage of features such as macros, multi-instance, script, eco mode, and more.
- How to install BlueStacks on your PC?
- To install BlueStacks on your PC, follow these steps:
-
-- Go to the official website of BlueStacks () and click on the "Download" button.
-- Once the download is complete, run the installer and follow the instructions.
-- After the installation is done, launch BlueStacks and sign in with your Google account.
-- You are now ready to use BlueStacks on your PC.
-
- How to download and play GTA 5 APK on BlueStacks?
- To download and play GTA 5 APK on BlueStacks, follow these steps:
-
-- In the search bar at the top right corner of BlueStacks, type "GTA 5" and hit enter.
-- You will see a list of results from various sources. Choose the one that says "GTA 5 - Grand Theft Auto" by Rockstar Games ().
-- Click on the "Install" button and wait for the download to finish.
-- Once the installation is complete, click on the "GTA 5 - Grand Theft Auto" icon on the home screen of BlueStacks.
-- You can now start playing GTA 5 APK on your PC.
-
- What are the benefits of playing GTA 5 APK on BlueStacks?
- Playing GTA
Playing GTA 5 APK on BlueStacks has many benefits, such as:
-
-- You can enjoy the stunning graphics and realistic physics of GTA 5 on a larger screen and with higher resolution.
-- You can use your keyboard and mouse to control your character and vehicles, which gives you more accuracy and comfort.
-- You can customize your settings and preferences, such as graphics, sound, language, and more.
-- You can access the Google Play Store and download other Android games and apps on BlueStacks.
-- You can use the BlueStacks features to enhance your gaming experience, such as macros, multi-instance, script, eco mode, and more.
-
- Method 2: Using Rockstar Games Launcher
- What is Rockstar Games Launcher?
- Rockstar Games Launcher is an official app from Rockstar Games that allows you to download, install, and play their games on your PC. It also gives you access to the latest news, updates, offers, and rewards from Rockstar Games. You can also use it to manage your Rockstar Games Social Club account and connect with other players.
- How to install Rockstar Games Launcher on your PC?
- To install Rockstar Games Launcher on your PC, follow these steps:
-gta 5 apk for pc windows 10 free download
-gta 5 android apk download for pc windows 10
-gta 5 pc windows 10 apk full version download
-gta 5 mobile apk download for pc windows 10
-gta 5 grand theft auto apk for pc windows 10 download
-gta 5 apk + obb download for pc windows 10
-gta 5 online apk download for pc windows 10
-gta 5 mod apk download for pc windows 10
-gta 5 apk file download for pc windows 10
-gta 5 beta apk download for pc windows 10
-gta 5 compressed apk download for pc windows 10
-gta 5 zip apk download for pc windows 10
-gta 5 lite apk download for pc windows 10
-gta 5 real apk download for pc windows 10
-gta 5 offline apk download for pc windows 10
-gta 5 rockstar games apk download for pc windows 10
-gta 5 launcher apk download for pc windows 10
-gta 5 emulator apk download for pc windows 10
-gta 5 premium edition apk download for pc windows 10
-gta 5 ultimate mod apk download for pc windows 10
-gta 5 cheats apk download for pc windows 10
-gta 5 hack apk download for pc windows 10
-gta 5 bluestacks apk download for pc windows 10
-gta 5 san andreas apk download for pc windows 10
-gta 5 vice city apk download for pc windows 10
-gta v (gta sa) mod apk + data obb free full android game download for pc windows
-how to install and play gta v (gta sa) mod on your android device or pc with bluestacks emulator
-how to get and run gta v on your android phone or tablet using cloud gaming service
-how to play gta v online multiplayer mode on your android device or pc with bluestacks emulator
-how to fix common issues and errors while playing gta v on your android device or pc with bluestacks emulator
-best settings and tips to optimize the performance and graphics of gta v on your android device or pc with bluestacks emulator
-best mods and cheats to enhance the gameplay and fun of gta v on your android device or pc with bluestacks emulator
-best websites and sources to download safe and reliable gta v apk files and obb data for your android device or pc with bluestacks emulator
-best alternatives and similar games to gta v that you can play on your android device or pc with bluestacks emulator
-best reviews and ratings of gta v by critics and users who played it on their android device or pc with bluestacks emulator
-
-- Go to the official website of Rockstar Games Launcher () and click on the "Download for Windows" button.
-- Once the download is complete, run the installer and follow the instructions.
-- After the installation is done, launch Rockstar Games Launcher and sign in with your Rockstar Games Social Club account. If you don't have one, you can create one for free.
-- You are now ready to use Rockstar Games Launcher on your PC.
-
- How to download and play GTA 5 on Rockstar Games Launcher?
- To download and play GTA 5 on Rockstar Games Launcher, follow these steps:
-
-- In the main menu of Rockstar Games Launcher, click on the "Store" tab.
-- You will see a list of games available from Rockstar Games. Find GTA 5 and click on it.
-- You will see the details and price of GTA 5. If you want to buy it, click on the "Buy Now" button and complete the payment process. If you already own it, click on the "Redeem Code" button and enter your code.
-- Once you have GTA 5 in your library, click on it and then click on the "Install" button. Choose the location where you want to install GTA 5 and wait for the download to finish.
-- Once the installation is complete, click on the "Play" button and start playing GTA 5 on your PC.
-
- What are the benefits of playing GTA 5 on Rockstar Games Launcher?
- Playing GTA 5 on Rockstar Games Launcher has many benefits, such as:
-
-- You can play GTA 5 with the best possible graphics and performance on your PC.
-- You can access all the features and content of GTA 5, such as online mode, story mode, DLCs, mods, and more.
-- You can get exclusive rewards and offers from Rockstar Games, such as free games, discounts, bonuses, and more.
-- You can update GTA 5 automatically and get the latest patches and fixes from Rockstar Games.
-- You can interact with other players and join the Rockstar Games community.
-
- Conclusion
- GTA 5 is an amazing game that you can enjoy on your PC. In this article, we showed you two methods to download GTA 5 APK for PC Windows 10: using BlueStacks App Player or using Rockstar Games Launcher. Both methods have their own advantages and disadvantages, so you can choose the one that suits you best. We hope this article was helpful and informative. Happy gaming!
- FAQs
- Here are some frequently asked questions about GTA 5 APK for PC Windows 10:
-
-- Is GTA 5 APK for PC Windows 10 free?
-No, GTA 5 APK for PC Windows 10 is not free. You have to buy it from either BlueStacks or Rockstar Games Launcher. However, sometimes there are promotions or offers that may give you GTA 5 for free or at a discounted price.
- - Is GTA 5 APK for PC Windows 10 safe?
-Yes, GTA 5 APK for PC Windows 10 is safe if
Yes, GTA 5 APK for PC Windows 10 is safe if you download it from a trusted source, such as BlueStacks or Rockstar Games Launcher. However, you should be careful of downloading GTA 5 APK from unknown or suspicious websites, as they may contain malware or viruses that can harm your PC.
- - Can I play GTA 5 APK for PC Windows 10 offline?
-Yes, you can play GTA 5 APK for PC Windows 10 offline if you have already downloaded and installed it on your PC. You can play the story mode or the single-player mode without an internet connection. However, you will need an internet connection to play the online mode or the multiplayer mode, as well as to access some features and content of GTA 5.
- - Can I play GTA 5 APK for PC Windows 10 with my friends?
-Yes, you can play GTA 5 APK for PC Windows 10 with your friends if you have an internet connection and a Rockstar Games Social Club account. You can join or create a session with your friends and play together in the online mode or the multiplayer mode. You can also chat, voice call, and share screenshots and videos with your friends through the Rockstar Games Launcher.
- - Can I use mods in GTA 5 APK for PC Windows 10?
-Yes, you can use mods in GTA 5 APK for PC Windows 10 if you download them from a reliable source and follow the instructions carefully. Mods are modifications or additions that change or enhance the gameplay, graphics, features, or content of GTA 5. However, you should be aware that using mods may affect the performance, stability, or compatibility of GTA 5. Also, you should not use mods in the online mode or the multiplayer mode, as they may violate the terms of service of Rockstar Games and result in a ban or suspension.
- - What are the system requirements for GTA 5 APK for PC Windows 10?
-The system requirements for GTA 5 APK for PC Windows 10 are as follows:
-
-
-Minimum
-Recommended
-
-
-OS: Windows 10 (64-bit)
-OS: Windows 10 (64-bit)
-
-
-CPU: Intel Core 2 Quad Q6600 @ 2.40GHz / AMD Phenom 9850 Quad-Core Processor @ 2.5GHz
-CPU: Intel Core i5-3470 @ 3.2GHz / AMD X8 FX-8350 @ 4GHz
-
-
-RAM: 4 GB
-RAM: 8 GB
-
-
-GPU: NVIDIA GeForce 9800 GT / AMD Radeon HD 4870 (1 GB)
-GPU: NVIDIA GeForce GTX 660 / AMD Radeon HD 7870 (2 GB)
-
-
-HDD: 72 GB
-HDD: 72 GB
-
-
-Sound Card: DirectX Compatible
-Sound Card: DirectX Compatible
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download and Install MX Player APK on Android - Easy Guide.md b/spaces/fatiXbelha/sd/Download and Install MX Player APK on Android - Easy Guide.md
deleted file mode 100644
index 857feecea53deea036582392730499865e520611..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download and Install MX Player APK on Android - Easy Guide.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-MX Player 2016 APK Download: How to Install and Use the Best Video Player for Android
-If you are looking for a powerful, versatile, and user-friendly video player for your Android device, you should definitely try MX Player. MX Player is one of the most popular and widely used video players on Android, with over 1 billion downloads on Google Play Store. It can play almost any video file you throw at it, including H.265 encoded videos, with advanced features such as hardware acceleration, multi-core decoding, subtitles support, gestures control, picture-in-picture mode, background playback mode, and more. In this article, we will show you how to download and install MX Player 2016 APK on your Android device, and how to use it to enjoy your videos in the best possible way.
- What is MX Player and why you should download it
-MX Player is a free media player app for Android that specializes in videos but also capable of playing audio. It was developed by J2 Interactive, a South Korean company that was acquired by Times Internet in 2018. MX Player has been praised for its performance, compatibility, functionality, and design by many users and critics alike. Some of the features that make MX Player stand out from other video players are:
-mx player 2016 apk download
Download Zip ☆☆☆ https://urllie.com/2uNBK8
-
-- It supports a wide range of video formats, codecs, containers, resolutions, and quality levels. You can play AVI, FLV, MKV, MOV, MP4, WEBM, WMV, 3GP, MPEG-2/4/TS/PS/TP/TRP/M2TS/MTS/M2T/MOD/TOD/VOB/ISO/IFO/DAT/RM/RMVB/OGM/OGV/VP8/VP9/H.264/H.265/HEVC/XVID/DIVX/MJPEG/THEORA/DV/DIF/MJPGM/MJPG/SVQ3/SVQ5/H263/H261/MPV/MPEG1/MPEG4/MSVIDEO1/CINEPAK/MSZH/ZLIB/SNOW/QTRLE/QDRAW /FLIC/SMC/NUV/CDXL/ROQ/IV32/IV41/IV50/RV10/RV20/RV30/RV40/LOCO/ZMBV/VCR1/VCR2/VCR3/VCR4/VCR5/VCR6/VCR7/VCR8/VCR9/ASV1/ASV2/FFV1/FFVHUFF/CYUV/HUFFYUV/MSMPEG4V1/MSMPEG4V2/MSMPEG4V3/WMV1/WMV2/WMV3/VC1/WVC1/WMVA/WVP2 and more.
-- It supports a variety of subtitle formats, such as SRT, SUB, SSA, ASS, IDX, MPL, TXT, PSB, SMI, SMIL, RT, AQT, JSS, JS, UTF8, UTF-8, ASCII, UNICODE and more. You can also load subtitles from online sources or sync them manually or automatically with the video.
-- It supports hardware acceleration and multi-core decoding for faster and smoother playback. You can choose between SW (software), HW (hardware), and HW+ (hardware plus) modes depending on your device capabilities and video characteristics. HW+ mode uses a custom codec that can play videos that are not supported by the default Android codec.
-- It supports gestures control for easy and intuitive operation. You can swipe up or down on the left or right side of the screen to adjust the volume or brightness. You can swipe left or right to seek forward or backward. You can pinch to zoom in or out. You can double tap to play or pause. You can also customize the gestures according to your preference.
-- It supports picture-in-picture mode and background playback mode for multitasking. You can watch videos in a small window while using other apps or lock the screen. You can also listen to the audio of the video while doing other things on your device.
-
-MX Player has won many awards and recognition for its excellence and innovation. Some of them are:
-
-- Editor's Choice app on Google Play Store
-- Best Android Video Player app by Android Authority
-- Best Video Player app by TechRadar
-- Best App of 2016 by Google Play India
-
- How to download and install MX Player 2016 APK on your Android device
-If you want to enjoy all the features and benefits of MX Player, you need to download and install the latest version of the app on your Android device. Here are the steps you need to follow:
-
-- Go to the official website of MX Player at https://www.mxplayer.in/download and choose the version that suits your device. You can also scan the QR code on the website with your device camera to download the app directly.
-- Once you have downloaded the APK file, you need to enable unknown sources on your device settings to install it. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-- Now, locate the APK file on your device storage and tap on it to start the installation process. Follow the instructions on the screen and grant the necessary permissions to the app.
-- After the installation is complete, you will see the app icon on your home screen or app drawer. Tap on it to launch MX Player and enjoy your videos.
-
-The following screenshot shows how the app icon looks like:
-mx player 2016 apk free download
-mx player 2016 apk latest version
-mx player 2016 apk for android
-mx player 2016 apk old version
-mx player 2016 apk mod
-mx player 2016 apk pro
-mx player 2016 apk full
-mx player 2016 apk cracked
-mx player 2016 apk premium
-mx player 2016 apk no ads
-mx player 2016 apk update
-mx player 2016 apk offline installer
-mx player 2016 apk mirror
-mx player 2016 apk file
-mx player 2016 apk direct download
-mx player 2016 apk download for pc
-mx player 2016 apk download uptodown
-mx player 2016 apk download apkpure
-mx player 2016 apk download androidapksfree
-mx player 2016 apk download apkmirror
-mx player 2016 apk download softpedia[^2^]
-mx player 2016 apk download apkmart[^1^]
-mx player 2016 apk download apkmody[^1^]
-mx player 2016 apk download apksfull[^1^]
-mx player 2016 apk download apktada[^1^]
-mx player 2016 app download
-mx player 2016 app free download
-mx player 2016 app latest version
-mx player 2016 app for android
-mx player 2016 app old version
-mx player 2016 app mod
-mx player 2016 app pro
-mx player 2016 app full
-mx player 2016 app cracked
-mx player 2016 app premium
-mx player 2016 app no ads
-mx player 2016 app update
-mx player 2016 app offline installer
-mx player 2016 app mirror
-mx player 2016 app file
-mx player 2016 app direct download
-mx player 2016 app download for pc
-mx player 2016 app download uptodown
-mx player 2016 app download apkpure
-mx player 2016 app download androidapksfree
-mx player 2016 app download apkmirror
-mx player 2016 app download softpedia[^2^]
-mx player 2016 app download apkmart[^1^]
-mx player 2016 app download apkmody[^1^]
-
How to use MX Player to play videos in different formats, with subtitles, gestures, and more
-Now that you have installed MX Player on your device, you can start using it to play your videos in the best possible way. Here are some tips and tricks on how to use MX Player to enhance your video watching experience:
-
-- To browse and select videos from your device storage or external sources, you can use the built-in file explorer of MX Player. You can also use the folder view or the list view to organize your videos by name, date, size, or format. You can also create playlists and favorites for easy access.
-- To adjust the playback speed, volume, brightness, aspect ratio, and orientation with gestures, you can simply swipe on the screen while playing a video. You can swipe up or down on the left side to change the brightness, and on the right side to change the volume. You can swipe left or right to seek forward or backward. You can pinch to zoom in or out. You can double tap to play or pause. You can also rotate your device to change the orientation.
-- To load and sync subtitles from different sources and formats, you can tap on the subtitle icon on the top right corner of the screen while playing a video. You can choose from the available subtitles on your device or online, or browse for more options. You can also adjust the subtitle size, color, position, delay, encoding, and language.
-- To use the hardware acceleration and multi-core decoding features for better performance, you can tap on the decoder icon on the top left corner of the screen while playing a video. You can choose between SW (software), HW (hardware), and HW+ (hardware plus) modes depending on your device capabilities and video characteristics. HW+ mode uses a custom codec that can play videos that are not supported by the default Android codec.
-- To use the picture-in-picture mode and background playback mode for multitasking, you can tap on the menu icon on the top right corner of the screen while playing a video. You can choose between PIP (picture-in-picture) mode and BG (background) mode from the options. PIP mode will show a small window of the video on your screen while you use other apps. BG mode will play only the audio of the video while you do other things on your device.
-
How to customize MX Player settings and preferences
-If you want to make MX Player more suitable for your needs and preferences, you can customize its settings and options. Here are some of the things you can do:
-
-- To access the settings menu, you can tap on the menu icon on the top right corner of the screen while playing a video, and then tap on Settings. You will see different categories of settings, such as General, Player, Audio, Subtitle, Network, and About.
-- To change the theme, language, font size, and color of the app interface, you can go to General > Look and feel. You can choose from different themes, such as Light, Dark, or Black. You can also change the app language from the default system language to any of the supported languages. You can also adjust the font size and color of the app text.
-- To enable or disable notifications, ads, analytics, and permissions, you can go to General > Other. You can choose whether to receive notifications from MX Player or not. You can also choose whether to see ads on the app or not. You can also choose whether to share your usage data with MX Player or not. You can also manage the permissions that MX Player has on your device, such as storage, camera, microphone, location, etc.
-- To backup and restore your app data, you can go to General > Backup & restore. You can choose to backup your app data to your device storage or Google Drive. You can also choose to restore your app data from your device storage or Google Drive. This way, you can keep your app settings and preferences across different devices or after reinstalling the app.
-
- How to stream videos from online sources or network devices with MX Player
-If you want to watch videos that are not stored on your device but on online sources or network devices, you can use MX Player to stream them easily. Here are some of the ways you can do that:
-
-- To use the network stream feature to play videos from URLs or online platforms, you can tap on the menu icon on the top right corner of the screen while playing a video, and then tap on Network stream. You can enter the URL of the video you want to play, or choose from some of the popular online platforms that MX Player supports, such as YouTube, Facebook, Instagram, Dailymotion, Vimeo, etc.
-- To use the LAN feature to play videos from local network devices or servers, you can tap on the menu icon on the top right corner of the screen while playing a video, and then tap on LAN. You can browse and select videos from any device or server that is connected to the same Wi-Fi network as your device. You can also enter the IP address or hostname of the device or server manually.
-- To use the Chromecast feature to cast videos to your TV or other devices, you can tap on the menu icon on the top right corner of the screen while playing a video, and then tap on Chromecast. You can choose from any of the available Chromecast devices that are connected to the same Wi-Fi network as your device. You can also control the playback from your device or from the Chromecast remote.
-
- Conclusion: Summarize the main points and benefits of MX Player
-In conclusion, MX Player is a great video player app for Android that offers many features and benefits that make it stand out from other video players. It supports a wide range of video formats, codecs, containers, resolutions, and quality levels. It supports a variety of subtitle formats and sources. It supports hardware acceleration and multi-core decoding for faster and smoother playback. It supports gestures control for easy and intuitive operation. It supports picture-in-picture mode and background playback mode for multitasking. It supports streaming videos from online sources or network devices with ease. It also allows you to customize its settings and preferences according to your needs.
-If you want to enjoy all these features and benefits of MX Player, you should download and install MX Player 2016 APK on your Android device today. You will not regret it!
- FAQs
-Here are some of the frequently asked questions about MX Player:
-
-- Is MX Player free?
-Yes, MX Player is free to download and use. However, it does show ads on some parts of the app. If you want to remove ads completely, you can purchase MX Player Pro for a small fee.
-- Is MX Player safe?
-Yes, MX Player is safe to use. It does not contain any malware or viruses that could harm your device or data. However, you should always download it from its official website or Google Play Store to avoid any fake or modified versions.
-- How can I update MX Player?
-You can update MX Player by visiting its official website or Google Play Store and downloading the latest version of the app. You can also enable auto-update on your device settings to get the latest updates automatically.
-- What are the minimum requirements for MX Player?
-The minimum requirements for MX Player are Android 4.1 or higher, 1 GB of RAM, and 100 MB of free storage space. However, some features may not work on some devices or videos depending on their specifications and characteristics.
-- How can I contact MX Player support?
-You can contact MX Player support by visiting its official website and filling out the contact form. You can also send an email to mxvideoplayer@gmail.com or join the MX Player community on Facebook, Twitter, Instagram, or YouTube.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download and Play Growtopia on 32 bit Devices - A Multiplayer Online Adventure.md b/spaces/fatiXbelha/sd/Download and Play Growtopia on 32 bit Devices - A Multiplayer Online Adventure.md
deleted file mode 100644
index 923eef8e43c0b5f122570b0c59a6bb8a51947873..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download and Play Growtopia on 32 bit Devices - A Multiplayer Online Adventure.md
+++ /dev/null
@@ -1,152 +0,0 @@
-
-Growtopia 32 Bit Download: How to Play the Sandbox MMO Game on Your PC
-Do you love sandbox games where you can create your own character, craft and trade items, and build your own worlds? If so, you might want to check out Growtopia, a free-to-play MMO game with almost endless possibilities. In this article, we will show you how to download and play Growtopia 32 bit on your PC, and give you some tips and tricks to enjoy the game.
-What is Growtopia?
-Growtopia is a 2D sandbox game developed by Robinson Technologies and published by Ubisoft. It was released on November 5, 2013, for Microsoft Windows, iOS, and Android. The game features a procedurally generated world where players can grow crops, build structures, and interact with other players. Players can also create and trade unique items through crafting, and participate in thousands of mini-games, such as parkour, races, PVP battles, and ghost hunting. The game is updated monthly with new items and events.
-growtopia 32 bit download
Download Zip 🔗 https://urllie.com/2uNz3k
-Why download Growtopia 32 bit?
-The benefits of playing Growtopia on PC
-While Growtopia is available on mobile devices, playing it on PC has some advantages. For instance, you can enjoy a bigger screen, better graphics, faster performance, and more comfortable controls. You can also use keyboard shortcuts, chat easily with other players, and record or stream your gameplay. Playing Growtopia on PC can also save your battery life and data usage on your mobile device.
-The system requirements for Growtopia 32 bit
-To play Growtopia 32 bit on your PC, you need to meet the following minimum system requirements :
-growtopia 32 bit installer
-growtopia 32 bit windows 10
-growtopia 32 bit pc download
-growtopia 32 bit free download
-growtopia 32 bit version download
-growtopia 32 bit exe download
-growtopia 32 bit setup download
-growtopia 32 bit offline installer
-growtopia 32 bit full download
-growtopia 32 bit crack download
-growtopia 32 bit game download
-growtopia 32 bit steam download
-growtopia 32 bit apk download
-growtopia 32 bit android download
-growtopia 32 bit emulator download
-growtopia 32 bit mac download
-growtopia 32 bit linux download
-growtopia 32 bit ubuntu download
-growtopia 32 bit rar download
-growtopia 32 bit zip download
-growtopia 32 bit mod download
-growtopia 32 bit hack download
-growtopia 32 bit cheat download
-growtopia 32 bit trainer download
-growtopia 32 bit patch download
-growtopia 32 bit update download
-growtopia 32 bit latest version download
-growtopia 32 bit old version download
-growtopia 32 bit beta version download
-growtopia 32 bit original version download
-growtopia 32 bit official website download
-growtopia 32 bit ubisoft website download
-growtopia 32 bit softonic website download
-growtopia 32 bit filehippo website download
-growtopia 32 bit cnet website download
-growtopia 32 bit malavida website download
-growtopia 32 bit uptodown website download
-growtopia 32 bit apkpure website download
-growtopia 32 bit bluestacks website download
-growtopia 32 bit nox website download
-growtopia 32 bit direct link download
-growtopia 32 bit torrent link download
-growtopia 32 bit magnet link download
-growtopia 32 bit mediafire link download
-growtopia 32 bit google drive link download
-growtopia 32 bit mega link download
-growtopia 32 bit dropbox link download
-growtopia 32 bit onedrive link download
-growtopia 32 bit zippyshare link download
-
-Operating System Processor RAM Graphics Card Hard Drive Space
-Windows 7, Windows 8.1 or Windows 10 Any 1 GHz processor 2 GB Shared or dedicated graphics with at least 100 MB VRAM 500 MB
-
-If your PC meets these requirements, you are ready to download and install Growtopia 32 bit.
-How to download and install Growtopia 32 bit?
-The steps to download Growtopia from the official website
-To download Growtopia 32 bit from the official website, follow these steps:
-
-- Go to [the official website](^1^) of Growtopia.
-- Click on the "Download" button at the top right corner of the page.
-- Select "Windows" as your platform.
-- Click on the "Download Now" button.
-- Save the file "GrowSetup.exe" to your preferred location.
-
-The steps to install Growtopia on your PC
-To install Growtopia 32 bit on your PC, follow these steps:
-
-- Loc ate the file "GrowSetup.exe" and double-click on it.
-- Follow the instructions on the screen to complete the installation process.
-- Launch Growtopia from your desktop or start menu.
-
-How to play Growtopia on PC?
-The basic controls and gameplay tips for Growtopia on PC
-To play Growtopia on PC, you need to know the basic controls and gameplay tips. Here are some of them :
-
-- To move your character, use the arrow keys or the WASD keys.
-- To jump, press the spacebar or the up arrow key.
-- To punch, click the left mouse button or press the CTRL key.
-- To chat, press the ENTER key and type your message.
-- To access your inventory, press the I key or click the backpack icon at the bottom of the screen.
-- To use an item, drag it from your inventory to your hand slot or press the number keys 1-9.
-- To drop an item, drag it from your hand slot to the ground or press the Q key.
-- To interact with an object, punch it or click on it.
-- To grow a seed, plant it on a dirt block and water it with a watering can.
-- To craft an item, combine two seeds in a chemical combiner or splice them in a world.
-- To trade with another player, punch them or click on their name and select "Trade".
-- To visit another world, enter its name in the world lock or use a portal.
-- To create your own world, buy a world lock from the store or find one in a random world.
-- To protect your world, place a world lock and lock it with a wrench.
-- To customize your character, buy clothes from the store or other players and equip them in your inventory.
-
-The best worlds and mini-games to explore in Growtopia
-Growtopia has thousands of worlds and mini-games to explore and enjoy. Here are some of the best ones :
-
-- START: The official tutorial world where you can learn the basics of Growtopia.
-- GROWTOPIA: The official news world where you can read the latest updates and announcements.
-- WIKI: The official wiki world where you can find information and guides about Growtopia.
-- TRADE: The official trading world where you can buy and sell items with other players.
-- FASHION: The official fashion world where you can show off your outfits and win prizes.
-- PARKOUR: A popular mini-game world where you can test your skills and agility in various obstacles and challenges.
-- RACE: A popular mini-game world where you can compete with other players in different tracks and modes.
-- PVP: A popular mini-game world where you can fight with other players in different arenas and modes.
-- GHOSTHUNT: A popular mini-game world where you can hunt ghosts and earn rewards.
-- CASINO: A popular mini-game world where you can gamble with other players using gems or items.
-
- Conclusion
- Growtopia is a fun and creative sandbox MMO game that you can play on your PC. By downloading Growtopia 32 bit, you can enjoy a better gaming experience with a bigger screen, better graphics, faster performance, and more comfortable controls. You can also create and trade unique items, build your own worlds, and interact with other players. To download and play Growtopia 32 bit on your PC, just follow the steps we have provided in this article. Have fun growing your own adventure!
- FAQs
- What is the difference between Growtopia 32 bit and 64 bit?
- Growtopia 32 bit is compatible with both 32-bit and 64-bit operating systems, while Growtopia 64 bit is only compatible with 64-bit operating systems. Growtopia 64 bit may have better performance and stability than Growtopia 32 bit, but it may also have more bugs and errors. You can choose whichever version suits your PC better.
- Is Growtopia safe to download and play?
- Yes, Growtopia is safe to download and play as long as you download it from the official website or a trusted source. However, you should be careful of sc ammers, hackers, and phishing websites that may try to steal your account or personal information. You should also follow the rules and guidelines of the game and respect other players.
- How can I get free gems in Growtopia?
- Gems are the main currency in Growtopia that you can use to buy items, worlds, and upgrades. You can get free gems in Growtopia by doing the following:
-
-- Completing offers and surveys in the store.
-- Watching ads in the store or in the world GEMS.
-- Breaking blocks and harvesting trees in your world or other worlds.
-- Participating in events and contests.
-- Winning mini-games and prizes.
-- Receiving gifts from other players or developers.
-
- How can I contact the Growtopia support team?
- If you have any issues, questions, or feedback about Growtopia, you can contact the Growtopia support team by doing the following:
-
-- Sending an email to support@growtopiagame.com with your username, world name, and a detailed description of your problem.
-- Filling out a form on [the official website] of Growtopia under the "Support" section.
-- Posting on [the official forum] of Growtopia under the "Bugs & Glitches" or "Suggestions & Ideas" sections.
-- Messaging a moderator or developer in-game or on social media.
-
- How can I join the Growtopia community?
- If you want to join the Growtopia community and meet other players, you can do the following:
-
-- Chatting with other players in-game or on [the official Discord server] of Growtopia.
-- Following [the official social media accounts] of Growtopia on Facebook, Twitter, Instagram, YouTube, and TikTok.
-- Subscribing to [the official newsletter] of Growtopia to get exclusive news and offers.
-- Creating and sharing your own content, such as fan art, videos, blogs, or podcasts.
-- Joining or creating your own guild, clan, or team in Growtopia.
-
197e85843d
-
-
\ No newline at end of file
diff --git "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py" "b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py"
deleted file mode 100644
index e57f80f1d45bd3ec23837253848f7b32a5ccd751..0000000000000000000000000000000000000000
--- "a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/\344\273\243\347\240\201\351\207\215\345\206\231\344\270\272\345\205\250\350\213\261\346\226\207_\345\244\232\347\272\277\347\250\213.py"
+++ /dev/null
@@ -1,138 +0,0 @@
-import threading
-from request_llm.bridge_all import predict_no_ui_long_connection
-from toolbox import update_ui
-from toolbox import CatchException, write_results_to_file, report_execption
-from .crazy_utils import breakdown_txt_to_satisfy_token_limit
-
-def extract_code_block_carefully(txt):
- splitted = txt.split('```')
- n_code_block_seg = len(splitted) - 1
- if n_code_block_seg <= 1: return txt
- # 剩下的情况都开头除去 ``` 结尾除去一次 ```
- txt_out = '```'.join(splitted[1:-1])
- return txt_out
-
-
-
-def break_txt_into_half_at_some_linebreak(txt):
- lines = txt.split('\n')
- n_lines = len(lines)
- pre = lines[:(n_lines//2)]
- post = lines[(n_lines//2):]
- return "\n".join(pre), "\n".join(post)
-
-
-@CatchException
-def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
- # 第1步:清空历史,以免输入溢出
- history = []
-
- # 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a = f"解析项目: {txt}",
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- # 第3步:集合文件
- import time, glob, os, shutil, re
- os.makedirs('gpt_log/generated_english_version', exist_ok=True)
- os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
- file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
- [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
- # file_manifest = ['./toolbox.py']
- i_say_show_user_buffer = []
-
- # 第4步:随便显示点什么防止卡顿的感觉
- for index, fp in enumerate(file_manifest):
- # if 'test_project' in fp: continue
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
- i_say_show_user_buffer.append(i_say_show_user)
- chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
- # 第5步:Token限制下的截断与处理
- MAX_TOKEN = 3000
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
-
-
- # 第6步:任务函数
- mutable_return = [None for _ in file_manifest]
- observe_window = [[""] for _ in file_manifest]
- def thread_worker(fp,index):
- if index > 10:
- time.sleep(60)
- print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
- try:
- gpt_say = ""
- # 分解代码文件
- file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
- for file_content_partial in file_content_breakdown:
- i_say = i_say_template(fp, file_content_partial)
- # # ** gpt request **
- gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
- gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
- gpt_say += gpt_say_partial
- mutable_return[index] = gpt_say
- except ConnectionAbortedError as token_exceed_err:
- print('至少一个线程任务Token溢出而失败', e)
- except Exception as e:
- print('至少一个线程任务意外失败', e)
-
- # 第7步:所有线程同时开始执行任务函数
- handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
- for h in handles:
- h.daemon = True
- h.start()
- chatbot.append(('开始了吗?', f'多线程操作已经开始'))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 第8步:循环轮询各个线程是否执行完毕
- cnt = 0
- while True:
- cnt += 1
- time.sleep(0.2)
- th_alive = [h.is_alive() for h in handles]
- if not any(th_alive): break
- # 更好的UI视觉效果
- observe_win = []
- for thread_index, alive in enumerate(th_alive):
- observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('
','.....').replace('$','.')+"... ]")
- stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
- stat_str = ''.join(stat)
- chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 第9步:把结果写入文件
- for index, h in enumerate(handles):
- h.join() # 这里其实不需要join了,肯定已经都结束了
- fp = file_manifest[index]
- gpt_say = mutable_return[index]
- i_say_show_user = i_say_show_user_buffer[index]
-
- where_to_relocate = f'gpt_log/generated_english_version/{fp}'
- if gpt_say is not None:
- with open(where_to_relocate, 'w+', encoding='utf-8') as f:
- f.write(gpt_say)
- else: # 失败
- shutil.copyfile(file_manifest[index], where_to_relocate)
- chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
- history.append(i_say_show_user); history.append(gpt_say)
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- time.sleep(1)
-
- # 第10步:备份一个文件
- res = write_results_to_file(history)
- chatbot.append(("生成一份任务执行报告", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
diff --git a/spaces/fclong/summary/fengshen/pipelines/tcbert.py b/spaces/fclong/summary/fengshen/pipelines/tcbert.py
deleted file mode 100644
index 1fecdd7cd77c64c82e9b168aaae82d85ec438801..0000000000000000000000000000000000000000
--- a/spaces/fclong/summary/fengshen/pipelines/tcbert.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# coding=utf-8
-# Copyright 2021 The IDEA Authors. All rights reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from logging import basicConfig
-import torch
-from torch import nn
-import json
-from tqdm import tqdm
-import os
-import numpy as np
-from transformers import BertTokenizer
-import pytorch_lightning as pl
-
-from pytorch_lightning import trainer, loggers
-from transformers import AutoConfig
-from transformers.pipelines.base import Pipeline
-import argparse
-import copy
-from fengshen.utils.universal_checkpoint import UniversalCheckpoint
-import warnings
-from fengshen.models.tcbert.modeling_tcbert import (
- TCBertDataModel,
- TCBertLitModel,
- TCBertPredict,
-)
-
-
-class TCBertPipelines(Pipeline):
- @staticmethod
- def piplines_args(parent_args):
- total_parser = parent_args.add_argument_group("piplines args")
- total_parser.add_argument(
- '--pretrained_model_path', default='', type=str)
- total_parser.add_argument('--load_checkpoints_path',
- default='', type=str)
- total_parser.add_argument('--train', action='store_true')
- total_parser.add_argument('--language',
- default='chinese', type=str)
-
- total_parser = TCBertDataModel.add_data_specific_args(total_parser)
- total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
- total_parser = TCBertLitModel.add_model_specific_args(total_parser)
- total_parser = pl.Trainer.add_argparse_args(parent_args)
- return parent_args
-
- def __init__(self, args, model_path, nlabels):
- self.args = args
- self.checkpoint_callback = UniversalCheckpoint(args)
- self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
- self.trainer = pl.Trainer.from_argparse_args(args,
- logger=self.logger,
- callbacks=[self.checkpoint_callback])
- self.config = AutoConfig.from_pretrained(model_path)
- self.tokenizer = BertTokenizer.from_pretrained(
- model_path)
-
- if args.load_checkpoints_path != '':
- self.model = TCBertLitModel.load_from_checkpoint(
- args.load_checkpoints_path, args=args, model_path=model_path, nlabels=nlabels)
- print('load model from: ', args.load_checkpoints_path)
- else:
- self.model = TCBertLitModel(
- args, model_path=model_path, nlabels=nlabels)
-
- def train(self, train_data, dev_data, prompt, prompt_label):
-
- data_model = TCBertDataModel(
- train_data, dev_data, self.tokenizer, self.args, prompt, prompt_label)
- self.model.num_data = len(train_data)
- self.trainer.fit(self.model, data_model)
-
- def predict(self, test_data, prompt, prompt_label, cuda=True):
-
- result = []
- start = 0
- if cuda:
- self.model = self.model.cuda()
- self.model.model.eval()
- predict_model = TCBertPredict(self.model, self.tokenizer, self.args, prompt, prompt_label)
- while start < len(test_data):
- batch_data = test_data[start:start+self.args.batchsize]
- start += self.args.batchsize
- batch_result = predict_model.predict(batch_data)
- result.extend(batch_result)
- # result = self.postprocess(result)
- return result
-
-
- def preprocess(self, data):
- return data
-
- def postprocess(self, data):
- return data
-
-
- def _forward(self, model_inputs):
- return self.model(**model_inputs)
-
- def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
- # Using "" as default argument because we're going to use `top_k=None` in user code to declare
- # "No top_k"
- preprocess_params = tokenizer_kwargs
-
- postprocess_params = {}
- if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
- return_all_scores = self.model.config.return_all_scores
-
- if isinstance(top_k, int) or top_k is None:
- postprocess_params["top_k"] = top_k
- postprocess_params["_legacy"] = False
- elif return_all_scores is not None:
- warnings.warn(
- "`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
- " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
- UserWarning,
- )
- if return_all_scores:
- postprocess_params["top_k"] = None
- else:
- postprocess_params["top_k"] = 1
-
- if function_to_apply is not None:
- postprocess_params["function_to_apply"] = function_to_apply
- return preprocess_params, {}, postprocess_params
diff --git a/spaces/fffiloni/Image-Caption-2-Shap-E/utils.py b/spaces/fffiloni/Image-Caption-2-Shap-E/utils.py
deleted file mode 100644
index 36e072134588bf5252bf0f018aa7912d9c45567c..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Image-Caption-2-Shap-E/utils.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import random
-
-from settings import MAX_SEED
-
-
-def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
- if randomize_seed:
- seed = random.randint(0, MAX_SEED)
- return seed
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/mime/src/test.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/mime/src/test.js
deleted file mode 100644
index 42958a20d5bdbae6a4724c6a8cc99f292b00b265..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/mime/src/test.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Usage: node test.js
- */
-
-var mime = require('../mime');
-var assert = require('assert');
-var path = require('path');
-
-//
-// Test mime lookups
-//
-
-assert.equal('text/plain', mime.lookup('text.txt')); // normal file
-assert.equal('text/plain', mime.lookup('TEXT.TXT')); // uppercase
-assert.equal('text/plain', mime.lookup('dir/text.txt')); // dir + file
-assert.equal('text/plain', mime.lookup('.text.txt')); // hidden file
-assert.equal('text/plain', mime.lookup('.txt')); // nameless
-assert.equal('text/plain', mime.lookup('txt')); // extension-only
-assert.equal('text/plain', mime.lookup('/txt')); // extension-less ()
-assert.equal('text/plain', mime.lookup('\\txt')); // Windows, extension-less
-assert.equal('application/octet-stream', mime.lookup('text.nope')); // unrecognized
-assert.equal('fallback', mime.lookup('text.fallback', 'fallback')); // alternate default
-
-//
-// Test extensions
-//
-
-assert.equal('txt', mime.extension(mime.types.text));
-assert.equal('html', mime.extension(mime.types.htm));
-assert.equal('bin', mime.extension('application/octet-stream'));
-assert.equal('bin', mime.extension('application/octet-stream '));
-assert.equal('html', mime.extension(' text/html; charset=UTF-8'));
-assert.equal('html', mime.extension('text/html; charset=UTF-8 '));
-assert.equal('html', mime.extension('text/html; charset=UTF-8'));
-assert.equal('html', mime.extension('text/html ; charset=UTF-8'));
-assert.equal('html', mime.extension('text/html;charset=UTF-8'));
-assert.equal('html', mime.extension('text/Html;charset=UTF-8'));
-assert.equal(undefined, mime.extension('unrecognized'));
-
-//
-// Test node.types lookups
-//
-
-assert.equal('font/woff', mime.lookup('file.woff'));
-assert.equal('application/octet-stream', mime.lookup('file.buffer'));
-// TODO: Uncomment once #157 is resolved
-// assert.equal('audio/mp4', mime.lookup('file.m4a'));
-assert.equal('font/otf', mime.lookup('file.otf'));
-
-//
-// Test charsets
-//
-
-assert.equal('UTF-8', mime.charsets.lookup('text/plain'));
-assert.equal('UTF-8', mime.charsets.lookup(mime.types.js));
-assert.equal('UTF-8', mime.charsets.lookup(mime.types.json));
-assert.equal(undefined, mime.charsets.lookup(mime.types.bin));
-assert.equal('fallback', mime.charsets.lookup('application/octet-stream', 'fallback'));
-
-console.log('\nAll tests passed');
diff --git a/spaces/fffiloni/lama-video-watermark-remover/app.py b/spaces/fffiloni/lama-video-watermark-remover/app.py
deleted file mode 100644
index e80fee0deb4a52e855b8988b893771da31fd728b..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/lama-video-watermark-remover/app.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import os
-os.system("wget https://huggingface.co/akhaliq/lama/resolve/main/best.ckpt")
-os.system("pip install imageio")
-os.system("pip install albumentations==0.5.2")
-os.system("pip install opencv-python")
-os.system("pip install ffmpeg-python")
-os.system("pip install moviepy")
-import cv2
-import paddlehub as hub
-import gradio as gr
-import torch
-from PIL import Image, ImageOps
-import numpy as np
-import imageio
-from moviepy.editor import *
-os.mkdir("data")
-os.rename("best.ckpt", "models/best.ckpt")
-os.mkdir("dataout")
-
-def get_frames(video_in):
- frames = []
- #resize the video
- clip = VideoFileClip(video_in)
-
- #check fps
- if clip.fps > 30:
- print("vide rate is over 30, resetting to 30")
- clip_resized = clip.resize(height=256)
- clip_resized.write_videofile("video_resized.mp4", fps=30)
- else:
- print("video rate is OK")
- clip_resized = clip.resize(height=256)
- clip_resized.write_videofile("video_resized.mp4", fps=clip.fps)
-
- print("video resized to 512 height")
-
- # Opens the Video file with CV2
- cap= cv2.VideoCapture("video_resized.mp4")
-
- fps = cap.get(cv2.CAP_PROP_FPS)
- print("video fps: " + str(fps))
- i=0
- while(cap.isOpened()):
- ret, frame = cap.read()
- if ret == False:
- break
- cv2.imwrite('kang'+str(i)+'.jpg',frame)
- frames.append('kang'+str(i)+'.jpg')
- i+=1
-
- cap.release()
- cv2.destroyAllWindows()
- print("broke the video into frames")
-
- return frames, fps
-
-def create_video(frames, fps, type):
- print("building video result")
- clip = ImageSequenceClip(frames, fps=fps)
- clip.write_videofile(type + "_result.mp4", fps=fps)
-
- return type + "_result.mp4"
-
-
-def magic_lama(img):
-
- i = img
- img = Image.open(img)
- mask = Image.open("./masks/modelscope-mask.png")
- inverted_mask = ImageOps.invert(mask)
-
-
- imageio.imwrite(f"./data/data.png", img)
- imageio.imwrite(f"./data/data_mask.png", inverted_mask)
- os.system('python predict.py model.path=/home/user/app/ indir=/home/user/app/data/ outdir=/home/user/app/dataout/ device=cpu')
- return f"./dataout/data_mask.png"
-
-def infer(video_in):
- # 1. break video into frames and get FPS
- break_vid = get_frames(video_in)
- frames_list= break_vid[0]
- fps = break_vid[1]
- #n_frame = int(trim_value*fps)
- n_frame = len(frames_list)
-
- if n_frame >= len(frames_list):
- print("video is shorter than the cut value")
- n_frame = len(frames_list)
-
- # 2. prepare frames result arrays
- result_frames = []
- print("set stop frames to: " + str(n_frame))
-
- for i in frames_list[0:int(n_frame)]:
- lama_frame = magic_lama(i)
- lama_frame = Image.open(lama_frame)
- imageio.imwrite(f"cleaned_frame_{i}", lama_frame)
- result_frames.append(f"cleaned_frame_{i}")
- print("frame " + i + "/" + str(n_frame) + ": done;")
-
-
- final_vid = create_video(result_frames, fps, "cleaned")
-
- files = [final_vid]
-
- return final_vid
-
-inputs = [gr.Video(label="Input", source="upload", type="filepath")]
-outputs = [gr.Video(label="output")]
-title = "LaMa Video Watermark Remover"
-description = "LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions.
This demo in meant to be used as a watermark remover on Modelscope generated videos.
Simply upload your modelscope video and hit Submit
"
-article = "Resolution-robust Large Mask Inpainting with Fourier Convolutions | Github Repo
"
-examples = ["./examples/modelscope-astronaut-horse.mp4", "./examples/modelscope-panda.mp4", "./examples/modelscope-spiderman-surfing.mp4"]
-gr.Interface(infer, inputs, outputs, title=title,
- description=description, article=article, examples=examples).launch()
diff --git a/spaces/firsk/ai_otto/bert_gen.py b/spaces/firsk/ai_otto/bert_gen.py
deleted file mode 100644
index 25cd7d97bafa02c514d0e1a34621546eac10da53..0000000000000000000000000000000000000000
--- a/spaces/firsk/ai_otto/bert_gen.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import torch
-from multiprocessing import Pool
-import commons
-import utils
-from tqdm import tqdm
-from text import cleaned_text_to_sequence, get_bert
-import argparse
-import torch.multiprocessing as mp
-
-
-def process_line(line):
- rank = mp.current_process()._identity
- rank = rank[0] if len(rank) > 0 else 0
- if torch.cuda.is_available():
- gpu_id = rank % torch.cuda.device_count()
- device = torch.device(f"cuda:{gpu_id}")
- wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
- phone = phones.split(" ")
- tone = [int(i) for i in tone.split(" ")]
- word2ph = [int(i) for i in word2ph.split(" ")]
- word2ph = [i for i in word2ph]
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
-
- phone = commons.intersperse(phone, 0)
- tone = commons.intersperse(tone, 0)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
-
- bert_path = wav_path.replace(".wav", ".bert.pt")
-
- try:
- bert = torch.load(bert_path)
- assert bert.shape[-1] == len(phone)
- except Exception:
- bert = get_bert(text, word2ph, language_str, device)
- assert bert.shape[-1] == len(phone)
- torch.save(bert, bert_path)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("-c", "--config", type=str, default="configs/config.json")
- parser.add_argument("--num_processes", type=int, default=2)
- args = parser.parse_args()
- config_path = args.config
- hps = utils.get_hparams_from_file(config_path)
- lines = []
- with open(hps.data.training_files, encoding="utf-8") as f:
- lines.extend(f.readlines())
-
- with open(hps.data.validation_files, encoding="utf-8") as f:
- lines.extend(f.readlines())
-
- num_processes = args.num_processes
- with Pool(processes=num_processes) as pool:
- for _ in tqdm(pool.imap_unordered(process_line, lines), total=len(lines)):
- pass
diff --git a/spaces/furrutiav/beto_coherence/app.py b/spaces/furrutiav/beto_coherence/app.py
deleted file mode 100644
index cc3b9a688b88f3d7a00c63928f878721b9bd0b30..0000000000000000000000000000000000000000
--- a/spaces/furrutiav/beto_coherence/app.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from util import *
-
-st.markdown('# Prediction of coherence in answers to open-ended questions')
-
-st.markdown('## Task')
-
-st.markdown('The task is to predict whether an answer is incoherent to the question or not.')
-
-st.markdown('## Examples')
-
-st.markdown('*Examples originally in Spanish*')
-st.markdown("""|Question|Coherent answer|Incoherent answer|
-|--------------|-----------|------------|
-|Maria and her husband cooked a tortilla yesterday, they divided it into 6 equal parts. Maria ate 2/6 and her husband ate 3/6. What fraction of the tortilla was left?| 1/6 | no |
-|Catalina bought 12 onions. Of the 12 onions, she used 1/4 of them to make some delicious empanadas. How many onions did she use for the empanadas? Explain how you knew the result.|I need 3 and I know this because I divided 12:4=3x1=3|it is ok teacher|
-|Camilo has to collect 60 balls. So far he has collected 23. To find out how many balls he has left to collect, subtract 23 from 60. Is Camilo’s exercise correct? Justify your answer|it’s ok because I added 37+23 and a half 60|43|
-|Pablo takes 5 hours to travel from Santiago to La Serena. His friend Pedro traveled from La Serena to Santiago and took 300 minutes. Which of the two children took less time? Explain your answer|both took the same time because I multiplied 5x60=300 and 300 minutes is 5 hours|60x5 gives 300|
-|What is a line of symmetry? Explain in your own words and give me an example|a line of symmetry is a line that separates two equal images|f|
-|Pamela has 25 flowers and her friend gives her 17 flowers. Write in words the total number of flowers Pamela has|forty-two|areflowers|
-""")
-
-st.markdown('## Model')
-
-st.markdown('Fine tuning of Spanish version of BERT ([BETO](https://huggingface.co/dccuchile/bert-base-spanish-wwm-cased)). This new model is trained using Spanish pairs of questions/answers to predict incoherence. Available on [furrutiav/beto_coherence](https://huggingface.co/furrutiav/beto_coherence).')
-
-st.markdown('## Inference')
-
-Q = st.text_area('Question:', 'Julieta tiene 20 láminas y le regaló 12 a Daniela ¿Cuántas láminas tiene ahora Julieta?')
-
-A = st.text_area('Answer:', 'nose :C')
-if Q:
- if A:
- probs = C1Classifier(Q, A, is_probs=True)
- value = "coherent" if probs[1]<=0.5 else "incoherent"
- st.write('Prediction:', f"The answer is {100*probs[1]: .0f}% likely to be incoherent. So, it is {value}!")
\ No newline at end of file
diff --git a/spaces/geninhu/whisper-vietnamese/README.md b/spaces/geninhu/whisper-vietnamese/README.md
deleted file mode 100644
index bb9753b784a4fd15bc9dc3e050ed31e56e9b3194..0000000000000000000000000000000000000000
--- a/spaces/geninhu/whisper-vietnamese/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Whisper Demo
-emoji: 🇻🇳
-colorFrom: indigo
-colorTo: red
-sdk: gradio
-sdk_version: 3.9.1
-app_file: app.py
-pinned: false
-tags:
-- whisper-event
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py
deleted file mode 100644
index 873957d8d6468147c994493d92ff5c1b15bfb703..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/segmentors/cascade_encoder_decoder.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from torch import nn
-
-from annotator.uniformer.mmseg.core import add_prefix
-from annotator.uniformer.mmseg.ops import resize
-from .. import builder
-from ..builder import SEGMENTORS
-from .encoder_decoder import EncoderDecoder
-
-
-@SEGMENTORS.register_module()
-class CascadeEncoderDecoder(EncoderDecoder):
- """Cascade Encoder Decoder segmentors.
-
- CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of
- CascadeEncoderDecoder are cascaded. The output of previous decoder_head
- will be the input of next decoder_head.
- """
-
- def __init__(self,
- num_stages,
- backbone,
- decode_head,
- neck=None,
- auxiliary_head=None,
- train_cfg=None,
- test_cfg=None,
- pretrained=None):
- self.num_stages = num_stages
- super(CascadeEncoderDecoder, self).__init__(
- backbone=backbone,
- decode_head=decode_head,
- neck=neck,
- auxiliary_head=auxiliary_head,
- train_cfg=train_cfg,
- test_cfg=test_cfg,
- pretrained=pretrained)
-
- def _init_decode_head(self, decode_head):
- """Initialize ``decode_head``"""
- assert isinstance(decode_head, list)
- assert len(decode_head) == self.num_stages
- self.decode_head = nn.ModuleList()
- for i in range(self.num_stages):
- self.decode_head.append(builder.build_head(decode_head[i]))
- self.align_corners = self.decode_head[-1].align_corners
- self.num_classes = self.decode_head[-1].num_classes
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in backbone and heads.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- self.backbone.init_weights(pretrained=pretrained)
- for i in range(self.num_stages):
- self.decode_head[i].init_weights()
- if self.with_auxiliary_head:
- if isinstance(self.auxiliary_head, nn.ModuleList):
- for aux_head in self.auxiliary_head:
- aux_head.init_weights()
- else:
- self.auxiliary_head.init_weights()
-
- def encode_decode(self, img, img_metas):
- """Encode images with backbone and decode into a semantic segmentation
- map of the same size as input."""
- x = self.extract_feat(img)
- out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg)
- for i in range(1, self.num_stages):
- out = self.decode_head[i].forward_test(x, out, img_metas,
- self.test_cfg)
- out = resize(
- input=out,
- size=img.shape[2:],
- mode='bilinear',
- align_corners=self.align_corners)
- return out
-
- def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
- """Run forward function and calculate loss for decode head in
- training."""
- losses = dict()
-
- loss_decode = self.decode_head[0].forward_train(
- x, img_metas, gt_semantic_seg, self.train_cfg)
-
- losses.update(add_prefix(loss_decode, 'decode_0'))
-
- for i in range(1, self.num_stages):
- # forward test again, maybe unnecessary for most methods.
- prev_outputs = self.decode_head[i - 1].forward_test(
- x, img_metas, self.test_cfg)
- loss_decode = self.decode_head[i].forward_train(
- x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg)
- losses.update(add_prefix(loss_decode, f'decode_{i}'))
-
- return losses
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Bluetooth Stack For Windows By Toshiba V8.00.03 Crack [CRACKED].md b/spaces/gotiQspiryo/whisper-ui/examples/Bluetooth Stack For Windows By Toshiba V8.00.03 Crack [CRACKED].md
deleted file mode 100644
index 8175173cc7366c73a388f4c85a49db1032c1cd06..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Bluetooth Stack For Windows By Toshiba V8.00.03 Crack [CRACKED].md
+++ /dev/null
@@ -1,6 +0,0 @@
-bluetooth stack for windows by toshiba v8.00.03 crack
Download File ✔✔✔ https://urlgoal.com/2uyMaM
-
-Come and download Toshiba bluetooth rfcomm driver windows 72j absolutely for free, Fast and Direct ... Toshiba.Bluetooth.Stack_9.00.03_Ml.RUS_incl.Cracked-dll. ... TOSHIBA Bluetooth Stack v8.00.12 x86-x64.a1985.NNTT. 4d29de3e1b
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X12 Full Version Free Downloadzip Learn to Design Like a Pro with This Software.md b/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X12 Full Version Free Downloadzip Learn to Design Like a Pro with This Software.md
deleted file mode 100644
index f4a320f56382f8a5c0769f405edc063c97639e73..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Corel Draw X12 Full Version Free Downloadzip Learn to Design Like a Pro with This Software.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-CorelDraw is one of the most powerful graphic designing applications with huge range of effective tools. CorelDraw 12 is classic version to entertain digital art lovers and professionals to deal with mega projects. It contains a big collection of useful tools and options to design attractive and professional drawings. It allows its users to edit digital images perfectly with a wide range of custom settings that enables users to process complex designs as desired. You can easily manage different designing tasks like logo design, web design and adds designs. You can also like CorelDRAW Graphics Suite 2019 v21.1.
-Nothing was more useful for this than the smart drawing tool. It would intelligently detect whether the user was trying to draw a rectangle, ellipse, or triangle, and transform the shapes into perfect versions of those shapes. No more time spent refining every line and every curve - CorelDRAW 12 knew exactly what users wanted to create and helped them speed down that path.
-Corel Draw X12 Full Version Free Downloadzip
Download --->>> https://urlgoal.com/2uyMCq
-Subscribe and save! A CorelDRAW Graphics Suite subscription provides a flexible, affordable way to enjoy the latest software without having to pay the hefty upfront cost of ownership. Instead, you'll get a full-featured, downloadable version of this professional suite with every new release, as long as your subscription is active.
-The seamless integration of the suites allowed users to seamlessly switch between applications and work within one interface. Designers were able to attach 3 utilities to a single suite. This was a great idea. This version had the smartest drawing tool. It was able to accurately detect the shape designers wanted and create perfect shapes such as rectangles, ellipses, or triangles.
-Smart drawing tools are the best for this purpose. The smart drawing tool would detect whether the user wanted to draw a rectangle or ellipse and then transform those shapes into the perfect version. CorelDRAW 12 saved users time and made it easy to quickly create the designs they wanted. CorelDRAW 12 was not designed to replace pen and paper. Instead, it was intended to be a time-saver. CorelDRAW Graphics suite 12 offered three applications that could be used for different purposes. CorelDRAW was used for vector graphics. Photo-Point was used to paint and edit bitmap images. Rave was another great tool for creating amazing animations.
-Note: as with the fountain transparency fills, if you are using CorelDRAW 2018 and higher you can find more vector and bitmap patterns free to download from Welcome Screen > Get More/Store > Content. If you are using a previous version of CorelDRAW, these extra patterns will already be installed.
-Free Download CorelDRAW Graphics Suite 2022 v24 + Extras Content full version offline installer for Windows PC. This program helps you craft and personalize projects by delivering expert results in record time.
-In its first versions, the CDR file format was a completely proprietary file format primarily used for vector graphic drawings, recognizable by the first two bytes of the file being "WL". Starting with CorelDraw 3, the file format changed to a Resource Interchange File Format (RIFF) envelope, recognizable by the first four bytes of the file being "RIFF", and a "CDR*vrsn" in bytes 9 to 15, with the asterisk "*" being just a blank in early versions.[54] Beginning with CorelDraw 4 it included the version number of the writing program in hexadecimal ("4" meaning version 4, "D" meaning version 13). The actual data chunk of the RIFF remains a Corel proprietary format.
-In 2012 the joint LibreOffice/re-lab team implemented libcdr, a library for reading CDR files from version 7 to X3 and CMX files.[68] The library has extensive support for shapes and their properties, including support for color management and spot colors, and has a basic support for text.[69] The library provides a built-in converter to SVG, and a converter to OpenDocument is provided by writerperfect package. The libcdr library is used in LibreOffice starting from version 3.6,[70] and thanks to public API it can be freely used by other applications.
-
-CorelDRAW Graphics Suite 2022 is a universal graphic design suite which provides everything you need to create high-quality vector illustration, layout, photo editing, and typography projects with complete efficiency. The latest version offers cutting-edge design capabilities that can meet all types of creativity needs. It allows you to edit images and apply attractive effects on graphics to make them more award-winner. It also enables you to capture and save screenshots of the entire page, menu, or individual windows, with a single click. You can also make changes to the font to fit into the desired layout. It is fully compatible with a wide range of different formats, including AI, PSD, PDF, JPG, PNG, SVG, DWG, DXF, EPS, TIFF, DOCX and PPT. All in all, CorelDRAW Graphics Suite 2022 is a complete graphics design suite which allows you to design impressive graphics and layouts, edit photos, and create websites. You can also download Xara Photo & Graphic Designer 2022 Free Download.
-The VideoStudio Trial is designed to give first-time users the access to explore the editing workspace and test out a few of the key features. The trial is not a full-featured version and is limited by formats and support. Full purchase is required to experience the complete power of VideoStudio.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/FlashBack Pro 5.30 _HOT_ Crack.md b/spaces/gotiQspiryo/whisper-ui/examples/FlashBack Pro 5.30 _HOT_ Crack.md
deleted file mode 100644
index 52fc530408e879214ddf4334868f8b61bdf9c5a2..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/FlashBack Pro 5.30 _HOT_ Crack.md
+++ /dev/null
@@ -1,82 +0,0 @@
-
-How to Download and Use FlashBack Pro 5.30 Crack
-FlashBack Pro 5.30 is a powerful screen recorder and video editor that allows you to capture and edit your screen activities with ease. You can use it to create video tutorials, presentations, demos, animations, and more. However, FlashBack Pro 5.30 is not a free software, and you need to purchase a license to use it without limitations. If you don't want to spend money on it, you might be tempted to download and use FlashBack Pro 5.30 crack, which is a modified version of the software that bypasses the activation process. But is it safe and legal to do so? In this article, we will explain what FlashBack Pro 5.30 crack is, what are the risks and disadvantages of using it, and what are the alternatives to it.
-What is FlashBack Pro 5.30 Crack?
-FlashBack Pro 5.30 crack is a file that modifies the original installation of FlashBack Pro 5.30 and makes it think that it has been activated with a valid license key. This way, you can use all the features of FlashBack Pro 5.30 without paying for it. Usually, FlashBack Pro 5.30 crack is distributed through torrent sites or other shady websites that claim to offer free software downloads.
-FlashBack Pro 5.30 Crack
Download >>> https://urlgoal.com/2uyMt4
-What are the Risks and Disadvantages of Using FlashBack Pro 5.30 Crack?
-While using FlashBack Pro 5.30 crack might seem like a good idea at first, it actually comes with many risks and disadvantages that you should be aware of before downloading and installing it on your PC. Some of the risks and disadvantages of using FlashBack Pro 5.30 crack are:
-
-- It is illegal: Using FlashBack Pro 5.30 crack is a violation of the software's terms of service and copyright laws. You are essentially stealing the software from its developers and depriving them of their rightful income. This could result in legal consequences such as fines or lawsuits.
-- It is unsafe: Downloading FlashBack Pro 5.30 crack from untrusted sources could expose your PC to malware, viruses, spyware, or other harmful programs that could damage your system or compromise your data and privacy. You never know what kind of malicious code could be hidden inside the crack file or the installer.
-- It is unreliable: Using FlashBack Pro 5.30 crack could cause various problems with the software's functionality and performance. You might experience errors, crashes, glitches, or compatibility issues that could ruin your recordings or edits. You also won't be able to receive any updates or technical support from the official developers.
-- It is unethical: Using FlashBack Pro 5.30 crack is unfair to the developers who have spent time and effort to create a quality product that provides value to its users. By using the crack, you are disrespecting their work and discouraging them from creating more innovative software in the future.
-
-What are the Alternatives to FlashBack Pro 5.30 Crack?
-If you want to use FlashBack Pro 5.30 without breaking the law or risking your PC's security, there are some alternatives that you can consider instead of using the crack. Some of the alternatives to FlashBack Pro 5.30 crack are:
-
-- Use the free trial: FlashBack Pro 5.30 offers a free trial version that you can download and use for 10 days without any limitations or watermarks. This way, you can test the software's features and see if it meets your needs before deciding whether to buy it or not.
-- Use the free version: FlashBack Pro 5.30 also has a free version called FlashBack Express that you can use for personal or non-commercial purposes without any time limit or watermarks. However, the free version has some limitations such as no video editing tools, no webcam recording, no audio editing tools, etc.
-- Use an alternative software: There are many other screen recorder and video editor software that you can use instead of FlashBack Pro 5.30, some of which are free or cheaper than FlashBack Pro 5.30. For example, you can use OBS Studio, Camtasia Studio, Bandicam Screen Recorder, etc.
-
-Conclusion
-FlashBack Pro 5.30 is a professional screen recorder and video editor that allows you to capture and edit your screen activities with ease. However, if you don't want to pay for it, you might be tempted to download and use FlashBack Pro 5.30 crack, which is a modified version of the software that bypasses the activation process.
-However, using FlashBack Pro 5.30 crack is not a good idea as it comes with many risks and disadvantages such as being illegal, unsafe, unreliable, and unethical. Instead of using the crack, you should consider some alternatives such as using the free trial or free version of FlashBack Pro 5.30 or using an alternative software.
-We hope this article has helped you understand what FlashBack Pro 5.30 crack is, what are the risks and disadvantages of using it, and what are the alternatives to it.
-How to Download and Install FlashBack Pro 5.30 Crack?
-If you want to download and install FlashBack Pro 5.30 crack on your PC, you need to be careful and follow some steps to avoid any problems. Here are the steps that you need to follow:
-
-- Go to a torrent site or a website that offers free software downloads and search for FlashBack Pro 5.30 crack. Make sure you choose a reliable and trusted source that has positive reviews and ratings from other users.
-- Download the crack file and save it on your PC. You might need a torrent client or a download manager to do this.
-- Extract the crack file using a tool like WinRAR or 7-Zip. You should see two files: FlashBack Player.exe and FlashBack Recorder.exe.
-- Go to the folder where you have installed FlashBack Pro 5.30. You can find it in C:\\Program Files\\Blueberry Software\\BB FlashBack Pro 5 or C:\\Program Files (x86)\\Blueberry Software\\BB FlashBack Pro 5 depending on your operating system.
-- Copy and paste the crack files into the installation folder and replace the original files.
-- Run FlashBack Player.exe or FlashBack Recorder.exe as an administrator and enjoy using FlashBack Pro 5.30 without any limitations or watermarks.
-
-How to Use FlashBack Pro 5.30 Crack?
-Once you have installed FlashBack Pro 5.30 crack on your PC, you can use it to record and edit your screen videos with ease. Here are some tips on how to use FlashBack Pro 5.30 crack:
-
-
-- To start recording, click on the red button on the toolbar or press Ctrl+R. You can choose to record the full screen, a region, or a window. You can also record sound from your microphone or speakers and footage from your webcam.
-- To stop recording, click on the blue button on the toolbar or press Ctrl+S. You can then preview your recording in the built-in media player and save it as an FBR file.
-- To edit your recording, click on the green button on the toolbar or press Ctrl+E. You can then use various tools such as cut, crop, zoom, pan, add text, add images, add sounds, add transitions, etc. to enhance your video.
-- To export your recording, click on the yellow button on the toolbar or press Ctrl+X. You can then choose from different formats such as MP4, AVI, WMV, GIF, etc. to save your video. You can also upload your video directly to YouTube or other platforms.
-
-Conclusion
-FlashBack Pro 5.30 is a powerful screen recorder and video editor that allows you to capture and edit your screen activities with ease. However, if you don't want to pay for it, you might be tempted to download and use FlashBack Pro 5.30 crack, which is a modified version of the software that bypasses the activation process.
-However, using FlashBack Pro 5.30 crack is not a good idea as it comes with many risks and disadvantages such as being illegal, unsafe, unreliable, and unethical. Instead of using the crack, you should consider some alternatives such as using the free trial or free version of FlashBack Pro 5.30 or using an alternative software.
-We hope this article has helped you understand what FlashBack Pro 5.30 crack is, what are the risks and disadvantages of using it, how to download and install it, how to use it, and what are the alternatives to it.
-What are the Features of FlashBack Pro 5.30?
-FlashBack Pro 5.30 is a feature-rich screen recorder and video editor that offers many options and tools to help you create professional-looking videos. Some of the features of FlashBack Pro 5.30 are:
-
-- It allows you to record your screen in high quality and with smooth frame rates
-- It lets you edit your recordings with a full-featured timeline that supports multiple tracks, transitions, effects, annotations, etc.
-- It enables you to add interactive elements to your videos such as buttons, text boxes, images, sounds, etc.
-- It supports various formats and codecs for exporting your videos such as MP4, AVI, WMV, GIF, etc.
-- It integrates with popular platforms and services such as YouTube, Dropbox, Google Drive, etc.
-- It has a built-in media player that lets you preview your recordings and edits
-- It has a user-friendly interface that is easy to navigate and customize
-- It has a comprehensive help system that provides tutorials, tips, and FAQs
-
-How to Get FlashBack Pro 5.30 Legally?
-If you want to use FlashBack Pro 5.30 legally and without any risks or disadvantages, you need to purchase a license from the official website of the software. The license will give you access to all the features and updates of FlashBack Pro 5.30 as well as technical support from the developers. The license will also protect you from any legal or ethical issues that might arise from using the crack version.
-To get FlashBack Pro 5.30 legally, you can follow these steps:
-
-- Go to the official website of FlashBack Pro 5.30 at https://www.flashbackrecorder.com/
-- Click on the "Buy Now" button and choose the edition that suits your needs (Standard or Pro)
-- Fill in your personal and payment details and complete the order
-- You will receive an email with your license key and download link
-- Download and install FlashBack Pro 5.30 on your PC using the download link
-- Activate FlashBack Pro 5.30 with your license key
-- Enjoy using FlashBack Pro 5.30 legally and without any limitations or watermarks
-
-Conclusion
-FlashBack Pro 5.30 is a powerful screen recorder and video editor that allows you to capture and edit your screen activities with ease. However, if you don't want to pay for it, you might be tempted to download and use FlashBack Pro 5.30 crack, which is a modified version of the software that bypasses the activation process.
-However, using FlashBack Pro 5.30 crack is not a good idea as it comes with many risks and disadvantages such as being illegal, unsafe, unreliable, and unethical. Instead of using the crack, you should consider some alternatives such as using the free trial or free version of FlashBack Pro 5.30 or using an alternative software.
-We hope this article has helped you understand what FlashBack Pro 5.30 crack is, what are the risks and disadvantages of using it, how to download and install it, how to use it, what are the features of it, and how to get it legally.
-Conclusion
-FlashBack Pro 5.30 is a powerful screen recorder and video editor that allows you to capture and edit your screen activities with ease. However, if you don't want to pay for it, you might be tempted to download and use FlashBack Pro 5.30 crack, which is a modified version of the software that bypasses the activation process.
-However, using FlashBack Pro 5.30 crack is not a good idea as it comes with many risks and disadvantages such as being illegal, unsafe, unreliable, and unethical. Instead of using the crack, you should consider some alternatives such as using the free trial or free version of FlashBack Pro 5.30 or using an alternative software.
-We hope this article has helped you understand what FlashBack Pro 5.30 crack is, what are the risks and disadvantages of using it, how to download and install it, how to use it, what are the features of it, and how to get it legally.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/nvdiffrast/torch/torch_types.h b/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/nvdiffrast/torch/torch_types.h
deleted file mode 100644
index d047cc67d4c901f26ab59bb8eb93c7a209368fc4..0000000000000000000000000000000000000000
--- a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/nvdiffrast/torch/torch_types.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include "torch_common.inl"
-
-//------------------------------------------------------------------------
-// Python GL state wrapper.
-
-class RasterizeGLState;
-class RasterizeGLStateWrapper
-{
-public:
- RasterizeGLStateWrapper (bool enableDB, bool automatic, int cudaDeviceIdx);
- ~RasterizeGLStateWrapper (void);
-
- void setContext (void);
- void releaseContext (void);
-
- RasterizeGLState* pState;
- bool automatic;
- int cudaDeviceIdx;
-};
-
-//------------------------------------------------------------------------
-// Mipmap wrapper to prevent intrusion from Python side.
-
-class TextureMipWrapper
-{
-public:
- torch::Tensor mip;
- int max_mip_level;
- std::vector texture_size; // For error checking.
- bool cube_mode; // For error checking.
-};
-
-
-//------------------------------------------------------------------------
-// Antialias topology hash wrapper to prevent intrusion from Python side.
-
-class TopologyHashWrapper
-{
-public:
- torch::Tensor ev_hash;
-};
-
-//------------------------------------------------------------------------
diff --git a/spaces/h2oai/wave-tour/examples/background_executor.py b/spaces/h2oai/wave-tour/examples/background_executor.py
deleted file mode 100644
index c831f2cde29052efca06659f462b08d449170fba..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/background_executor.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Background Tasks / Executor
-# Use q.exec() to execute background functions using a thread-pool or process-pool.
-# #background_tasks #executor
-# ---
-import time
-import random
-import concurrent.futures
-from h2o_wave import main, app, Q, ui
-
-
-def blocking_function(secs) -> str:
- time.sleep(secs) # Blocks!
- return f'Done waiting for {secs} seconds!'
-
-
-@app('/demo')
-async def serve(q: Q):
- if q.args.start:
- q.page['form'] = ui.form_card(box='1 1 6 2', items=[ui.progress('Running...')])
- await q.page.save()
-
- seconds = random.randint(1, 6)
-
- # DON'T DO THIS!
- # This will make your app unresponsive for some time:
- # message = blocking_function(seconds)
-
- # Do this instead:
- with concurrent.futures.ThreadPoolExecutor() as pool:
- message = await q.exec(pool, blocking_function, seconds)
-
- # You can also pass a ProcessPoolExecutor, like this:
- # with concurrent.futures.ProcessPoolExecutor() as pool:
- # message = await q.exec(pool, blocking_function, seconds)
-
- q.page['form'] = ui.form_card(box='1 1 6 1', items=[ui.message_bar('info', message)])
- await q.page.save()
- else:
- q.page['form'] = ui.form_card(box='1 1 2 1', items=[ui.button(name='start', label='Start')])
- await q.page.save()
diff --git a/spaces/hank1996/yolopv2/lib/models/YOLOP.py b/spaces/hank1996/yolopv2/lib/models/YOLOP.py
deleted file mode 100644
index 2b896be137f9b514ac1c90c296c46161886241df..0000000000000000000000000000000000000000
--- a/spaces/hank1996/yolopv2/lib/models/YOLOP.py
+++ /dev/null
@@ -1,547 +0,0 @@
-import torch
-from torch import tensor
-import torch.nn as nn
-import sys,os
-import math
-import sys
-sys.path.append(os.getcwd())
-#sys.path.append("lib/models")
-#sys.path.append("lib/utils")
-#sys.path.append("/workspace/wh/projects/DaChuang")
-#from lib.utils import initialize_weights
-# from lib.models.common2 import DepthSeperabelConv2d as Conv
-# from lib.models.common2 import SPP, Bottleneck, BottleneckCSP, Focus, Concat, Detect
-from lib.models.common import Conv, SPP, Bottleneck, BottleneckCSP, Focus, Concat, Detect, SharpenConv
-from torch.nn import Upsample
-#from lib.utils import check_anchor_order
-#from lib.core.evaluate import SegmentationMetric
-#from lib.utils.utils import time_synchronized
-
-def check_anchor_order(m):
- # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
- a = m.anchor_grid.prod(-1).view(-1) # anchor area
- da = a[-1] - a[0] # delta a
- ds = m.stride[-1] - m.stride[0] # delta s
- if da.sign() != ds.sign(): # same order
- print('Reversing anchor order')
- m.anchors[:] = m.anchors.flip(0)
- m.anchor_grid[:] = m.anchor_grid.flip(0)
-
-"""
-MCnet_SPP = [
-[ -1, Focus, [3, 32, 3]],
-[ -1, Conv, [32, 64, 3, 2]],
-[ -1, BottleneckCSP, [64, 64, 1]],
-[ -1, Conv, [64, 128, 3, 2]],
-[ -1, BottleneckCSP, [128, 128, 3]],
-[ -1, Conv, [128, 256, 3, 2]],
-[ -1, BottleneckCSP, [256, 256, 3]],
-[ -1, Conv, [256, 512, 3, 2]],
-[ -1, SPP, [512, 512, [5, 9, 13]]],
-[ -1, BottleneckCSP, [512, 512, 1, False]],
-[ -1, Conv,[512, 256, 1, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1, 6], Concat, [1]],
-[ -1, BottleneckCSP, [512, 256, 1, False]],
-[ -1, Conv, [256, 128, 1, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1,4], Concat, [1]],
-[ -1, BottleneckCSP, [256, 128, 1, False]],
-[ -1, Conv, [128, 128, 3, 2]],
-[ [-1, 14], Concat, [1]],
-[ -1, BottleneckCSP, [256, 256, 1, False]],
-[ -1, Conv, [256, 256, 3, 2]],
-[ [-1, 10], Concat, [1]],
-[ -1, BottleneckCSP, [512, 512, 1, False]],
-# [ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]],
-[ [17, 20, 23], Detect, [13, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]],
-[ 17, Conv, [128, 64, 3, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1,2], Concat, [1]],
-[ -1, BottleneckCSP, [128, 64, 1, False]],
-[ -1, Conv, [64, 32, 3, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ -1, Conv, [32, 16, 3, 1]],
-[ -1, BottleneckCSP, [16, 8, 1, False]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ -1, SPP, [8, 2, [5, 9, 13]]] #segmentation output
-]
-# [2,6,3,9,5,13], [7,19,11,26,17,39], [28,64,44,103,61,183]
-MCnet_0 = [
-[ -1, Focus, [3, 32, 3]],
-[ -1, Conv, [32, 64, 3, 2]],
-[ -1, BottleneckCSP, [64, 64, 1]],
-[ -1, Conv, [64, 128, 3, 2]],
-[ -1, BottleneckCSP, [128, 128, 3]],
-[ -1, Conv, [128, 256, 3, 2]],
-[ -1, BottleneckCSP, [256, 256, 3]],
-[ -1, Conv, [256, 512, 3, 2]],
-[ -1, SPP, [512, 512, [5, 9, 13]]],
-[ -1, BottleneckCSP, [512, 512, 1, False]],
-[ -1, Conv,[512, 256, 1, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1, 6], Concat, [1]],
-[ -1, BottleneckCSP, [512, 256, 1, False]],
-[ -1, Conv, [256, 128, 1, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1,4], Concat, [1]],
-[ -1, BottleneckCSP, [256, 128, 1, False]],
-[ -1, Conv, [128, 128, 3, 2]],
-[ [-1, 14], Concat, [1]],
-[ -1, BottleneckCSP, [256, 256, 1, False]],
-[ -1, Conv, [256, 256, 3, 2]],
-[ [-1, 10], Concat, [1]],
-[ -1, BottleneckCSP, [512, 512, 1, False]],
-[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
-[ 16, Conv, [128, 64, 3, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1,2], Concat, [1]],
-[ -1, BottleneckCSP, [128, 64, 1, False]],
-[ -1, Conv, [64, 32, 3, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ -1, Conv, [32, 16, 3, 1]],
-[ -1, BottleneckCSP, [16, 8, 1, False]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ -1, Conv, [8, 2, 3, 1]], #Driving area segmentation output
-[ 16, Conv, [128, 64, 3, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ [-1,2], Concat, [1]],
-[ -1, BottleneckCSP, [128, 64, 1, False]],
-[ -1, Conv, [64, 32, 3, 1]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ -1, Conv, [32, 16, 3, 1]],
-[ -1, BottleneckCSP, [16, 8, 1, False]],
-[ -1, Upsample, [None, 2, 'nearest']],
-[ -1, Conv, [8, 2, 3, 1]], #Lane line segmentation output
-]
-# The lane line and the driving area segment branches share information with each other
-MCnet_share = [
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16
-[ -1, BottleneckCSP, [256, 128, 1, False]], #17
-[ -1, Conv, [128, 128, 3, 2]], #18
-[ [-1, 14], Concat, [1]], #19
-[ -1, BottleneckCSP, [256, 256, 1, False]], #20
-[ -1, Conv, [256, 256, 3, 2]], #21
-[ [-1, 10], Concat, [1]], #22
-[ -1, BottleneckCSP, [512, 512, 1, False]], #23
-[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
-[ 16, Conv, [256, 64, 3, 1]], #25
-[ -1, Upsample, [None, 2, 'nearest']], #26
-[ [-1,2], Concat, [1]], #27
-[ -1, BottleneckCSP, [128, 64, 1, False]], #28
-[ -1, Conv, [64, 32, 3, 1]], #29
-[ -1, Upsample, [None, 2, 'nearest']], #30
-[ -1, Conv, [32, 16, 3, 1]], #31
-[ -1, BottleneckCSP, [16, 8, 1, False]], #32 driving area segment neck
-[ 16, Conv, [256, 64, 3, 1]], #33
-[ -1, Upsample, [None, 2, 'nearest']], #34
-[ [-1,2], Concat, [1]], #35
-[ -1, BottleneckCSP, [128, 64, 1, False]], #36
-[ -1, Conv, [64, 32, 3, 1]], #37
-[ -1, Upsample, [None, 2, 'nearest']], #38
-[ -1, Conv, [32, 16, 3, 1]], #39
-[ -1, BottleneckCSP, [16, 8, 1, False]], #40 lane line segment neck
-[ [31,39], Concat, [1]], #41
-[ -1, Conv, [32, 8, 3, 1]], #42 Share_Block
-[ [32,42], Concat, [1]], #43
-[ -1, Upsample, [None, 2, 'nearest']], #44
-[ -1, Conv, [16, 2, 3, 1]], #45 Driving area segmentation output
-[ [40,42], Concat, [1]], #46
-[ -1, Upsample, [None, 2, 'nearest']], #47
-[ -1, Conv, [16, 2, 3, 1]] #48Lane line segmentation output
-]
-# The lane line and the driving area segment branches without share information with each other
-MCnet_no_share = [
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16
-[ -1, BottleneckCSP, [256, 128, 1, False]], #17
-[ -1, Conv, [128, 128, 3, 2]], #18
-[ [-1, 14], Concat, [1]], #19
-[ -1, BottleneckCSP, [256, 256, 1, False]], #20
-[ -1, Conv, [256, 256, 3, 2]], #21
-[ [-1, 10], Concat, [1]], #22
-[ -1, BottleneckCSP, [512, 512, 1, False]], #23
-[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
-[ 16, Conv, [256, 64, 3, 1]], #25
-[ -1, Upsample, [None, 2, 'nearest']], #26
-[ [-1,2], Concat, [1]], #27
-[ -1, BottleneckCSP, [128, 64, 1, False]], #28
-[ -1, Conv, [64, 32, 3, 1]], #29
-[ -1, Upsample, [None, 2, 'nearest']], #30
-[ -1, Conv, [32, 16, 3, 1]], #31
-[ -1, BottleneckCSP, [16, 8, 1, False]], #32 driving area segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #33
-[ -1, Conv, [8, 3, 3, 1]], #34 Driving area segmentation output
-[ 16, Conv, [256, 64, 3, 1]], #35
-[ -1, Upsample, [None, 2, 'nearest']], #36
-[ [-1,2], Concat, [1]], #37
-[ -1, BottleneckCSP, [128, 64, 1, False]], #38
-[ -1, Conv, [64, 32, 3, 1]], #39
-[ -1, Upsample, [None, 2, 'nearest']], #40
-[ -1, Conv, [32, 16, 3, 1]], #41
-[ -1, BottleneckCSP, [16, 8, 1, False]], #42 lane line segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #43
-[ -1, Conv, [8, 2, 3, 1]] #44 Lane line segmentation output
-]
-MCnet_feedback = [
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16
-[ -1, BottleneckCSP, [256, 128, 1, False]], #17
-[ -1, Conv, [128, 128, 3, 2]], #18
-[ [-1, 14], Concat, [1]], #19
-[ -1, BottleneckCSP, [256, 256, 1, False]], #20
-[ -1, Conv, [256, 256, 3, 2]], #21
-[ [-1, 10], Concat, [1]], #22
-[ -1, BottleneckCSP, [512, 512, 1, False]], #23
-[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
-[ 16, Conv, [256, 128, 3, 1]], #25
-[ -1, Upsample, [None, 2, 'nearest']], #26
-[ -1, BottleneckCSP, [128, 64, 1, False]], #28
-[ -1, Conv, [64, 32, 3, 1]], #29
-[ -1, Upsample, [None, 2, 'nearest']], #30
-[ -1, Conv, [32, 16, 3, 1]], #31
-[ -1, BottleneckCSP, [16, 8, 1, False]], #32 driving area segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #33
-[ -1, Conv, [8, 2, 3, 1]], #34 Driving area segmentation output
-[ 16, Conv, [256, 128, 3, 1]], #35
-[ -1, Upsample, [None, 2, 'nearest']], #36
-[ -1, BottleneckCSP, [128, 64, 1, False]], #38
-[ -1, Conv, [64, 32, 3, 1]], #39
-[ -1, Upsample, [None, 2, 'nearest']], #40
-[ -1, Conv, [32, 16, 3, 1]], #41
-[ -1, BottleneckCSP, [16, 8, 1, False]], #42 lane line segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #43
-[ -1, Conv, [8, 2, 3, 1]] #44 Lane line segmentation output
-]
-MCnet_Da_feedback1 = [
-[46, 26, 35], #Det_out_idx, Da_Segout_idx, LL_Segout_idx
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16 backbone+fpn
-[ -1,Conv,[256,256,1,1]], #17
-[ 16, Conv, [256, 128, 3, 1]], #18
-[ -1, Upsample, [None, 2, 'nearest']], #19
-[ -1, BottleneckCSP, [128, 64, 1, False]], #20
-[ -1, Conv, [64, 32, 3, 1]], #21
-[ -1, Upsample, [None, 2, 'nearest']], #22
-[ -1, Conv, [32, 16, 3, 1]], #23
-[ -1, BottleneckCSP, [16, 8, 1, False]], #24 driving area segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #25
-[ -1, Conv, [8, 2, 3, 1]], #26 Driving area segmentation output
-[ 16, Conv, [256, 128, 3, 1]], #27
-[ -1, Upsample, [None, 2, 'nearest']], #28
-[ -1, BottleneckCSP, [128, 64, 1, False]], #29
-[ -1, Conv, [64, 32, 3, 1]], #30
-[ -1, Upsample, [None, 2, 'nearest']], #31
-[ -1, Conv, [32, 16, 3, 1]], #32
-[ -1, BottleneckCSP, [16, 8, 1, False]], #33 lane line segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #34
-[ -1, Conv, [8, 2, 3, 1]], #35Lane line segmentation output
-[ 23, Conv, [16, 16, 3, 2]], #36
-[ -1, Conv, [16, 32, 3, 2]], #2 times 2xdownsample 37
-[ [-1,17], Concat, [1]], #38
-[ -1, BottleneckCSP, [288, 128, 1, False]], #39
-[ -1, Conv, [128, 128, 3, 2]], #40
-[ [-1, 14], Concat, [1]], #41
-[ -1, BottleneckCSP, [256, 256, 1, False]], #42
-[ -1, Conv, [256, 256, 3, 2]], #43
-[ [-1, 10], Concat, [1]], #44
-[ -1, BottleneckCSP, [512, 512, 1, False]], #45
-[ [39, 42, 45], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]] #Detect output 46
-]
-# The lane line and the driving area segment branches share information with each other and feedback to det_head
-MCnet_Da_feedback2 = [
-[47, 26, 35], #Det_out_idx, Da_Segout_idx, LL_Segout_idx
-[25, 28, 31, 33], #layer in Da_branch to do SAD
-[34, 37, 40, 42], #layer in LL_branch to do SAD
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16 backbone+fpn
-[ -1,Conv,[256,256,1,1]], #17
-[ 16, Conv, [256, 128, 3, 1]], #18
-[ -1, Upsample, [None, 2, 'nearest']], #19
-[ -1, BottleneckCSP, [128, 64, 1, False]], #20
-[ -1, Conv, [64, 32, 3, 1]], #21
-[ -1, Upsample, [None, 2, 'nearest']], #22
-[ -1, Conv, [32, 16, 3, 1]], #23
-[ -1, BottleneckCSP, [16, 8, 1, False]], #24 driving area segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #25
-[ -1, Conv, [8, 2, 3, 1]], #26 Driving area segmentation output
-[ 16, Conv, [256, 128, 3, 1]], #27
-[ -1, Upsample, [None, 2, 'nearest']], #28
-[ -1, BottleneckCSP, [128, 64, 1, False]], #29
-[ -1, Conv, [64, 32, 3, 1]], #30
-[ -1, Upsample, [None, 2, 'nearest']], #31
-[ -1, Conv, [32, 16, 3, 1]], #32
-[ -1, BottleneckCSP, [16, 8, 1, False]], #33 lane line segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #34
-[ -1, Conv, [8, 2, 3, 1]], #35Lane line segmentation output
-[ 23, Conv, [16, 64, 3, 2]], #36
-[ -1, Conv, [64, 256, 3, 2]], #2 times 2xdownsample 37
-[ [-1,17], Concat, [1]], #38
-[-1, Conv, [512, 256, 3, 1]], #39
-[ -1, BottleneckCSP, [256, 128, 1, False]], #40
-[ -1, Conv, [128, 128, 3, 2]], #41
-[ [-1, 14], Concat, [1]], #42
-[ -1, BottleneckCSP, [256, 256, 1, False]], #43
-[ -1, Conv, [256, 256, 3, 2]], #44
-[ [-1, 10], Concat, [1]], #45
-[ -1, BottleneckCSP, [512, 512, 1, False]], #46
-[ [40, 42, 45], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]] #Detect output 47
-]
-MCnet_share1 = [
-[24, 33, 45], #Det_out_idx, Da_Segout_idx, LL_Segout_idx
-[25, 28, 31, 33], #layer in Da_branch to do SAD
-[34, 37, 40, 42], #layer in LL_branch to do SAD
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16
-[ -1, BottleneckCSP, [256, 128, 1, False]], #17
-[ -1, Conv, [128, 128, 3, 2]], #18
-[ [-1, 14], Concat, [1]], #19
-[ -1, BottleneckCSP, [256, 256, 1, False]], #20
-[ -1, Conv, [256, 256, 3, 2]], #21
-[ [-1, 10], Concat, [1]], #22
-[ -1, BottleneckCSP, [512, 512, 1, False]], #23
-[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
-[ 16, Conv, [256, 128, 3, 1]], #25
-[ -1, Upsample, [None, 2, 'nearest']], #26
-[ -1, BottleneckCSP, [128, 64, 1, False]], #27
-[ -1, Conv, [64, 32, 3, 1]], #28
-[ -1, Upsample, [None, 2, 'nearest']], #29
-[ -1, Conv, [32, 16, 3, 1]], #30
-[ -1, BottleneckCSP, [16, 8, 1, False]], #31 driving area segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #32
-[ -1, Conv, [8, 2, 3, 1]], #33 Driving area segmentation output
-[ 16, Conv, [256, 128, 3, 1]], #34
-[ -1, Upsample, [None, 2, 'nearest']], #35
-[ -1, BottleneckCSP, [128, 64, 1, False]], #36
-[ -1, Conv, [64, 32, 3, 1]], #37
-[ -1, Upsample, [None, 2, 'nearest']], #38
-[ -1, Conv, [32, 16, 3, 1]], #39
-[ 30, SharpenConv, [16,16, 3, 1]], #40
-[ -1, Conv, [16, 16, 3, 1]], #41
-[ [-1, 39], Concat, [1]], #42
-[ -1, BottleneckCSP, [32, 8, 1, False]], #43 lane line segment neck
-[ -1, Upsample, [None, 2, 'nearest']], #44
-[ -1, Conv, [8, 2, 3, 1]] #45 Lane line segmentation output
-]"""
-
-
-# The lane line and the driving area segment branches without share information with each other and without link
-YOLOP = [
-[24, 33, 42], #Det_out_idx, Da_Segout_idx, LL_Segout_idx
-[ -1, Focus, [3, 32, 3]], #0
-[ -1, Conv, [32, 64, 3, 2]], #1
-[ -1, BottleneckCSP, [64, 64, 1]], #2
-[ -1, Conv, [64, 128, 3, 2]], #3
-[ -1, BottleneckCSP, [128, 128, 3]], #4
-[ -1, Conv, [128, 256, 3, 2]], #5
-[ -1, BottleneckCSP, [256, 256, 3]], #6
-[ -1, Conv, [256, 512, 3, 2]], #7
-[ -1, SPP, [512, 512, [5, 9, 13]]], #8
-[ -1, BottleneckCSP, [512, 512, 1, False]], #9
-[ -1, Conv,[512, 256, 1, 1]], #10
-[ -1, Upsample, [None, 2, 'nearest']], #11
-[ [-1, 6], Concat, [1]], #12
-[ -1, BottleneckCSP, [512, 256, 1, False]], #13
-[ -1, Conv, [256, 128, 1, 1]], #14
-[ -1, Upsample, [None, 2, 'nearest']], #15
-[ [-1,4], Concat, [1]], #16 #Encoder
-
-[ -1, BottleneckCSP, [256, 128, 1, False]], #17
-[ -1, Conv, [128, 128, 3, 2]], #18
-[ [-1, 14], Concat, [1]], #19
-[ -1, BottleneckCSP, [256, 256, 1, False]], #20
-[ -1, Conv, [256, 256, 3, 2]], #21
-[ [-1, 10], Concat, [1]], #22
-[ -1, BottleneckCSP, [512, 512, 1, False]], #23
-[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detection head 24
-
-[ 16, Conv, [256, 128, 3, 1]], #25
-[ -1, Upsample, [None, 2, 'nearest']], #26
-[ -1, BottleneckCSP, [128, 64, 1, False]], #27
-[ -1, Conv, [64, 32, 3, 1]], #28
-[ -1, Upsample, [None, 2, 'nearest']], #29
-[ -1, Conv, [32, 16, 3, 1]], #30
-[ -1, BottleneckCSP, [16, 8, 1, False]], #31
-[ -1, Upsample, [None, 2, 'nearest']], #32
-[ -1, Conv, [8, 2, 3, 1]], #33 Driving area segmentation head
-
-[ 16, Conv, [256, 128, 3, 1]], #34
-[ -1, Upsample, [None, 2, 'nearest']], #35
-[ -1, BottleneckCSP, [128, 64, 1, False]], #36
-[ -1, Conv, [64, 32, 3, 1]], #37
-[ -1, Upsample, [None, 2, 'nearest']], #38
-[ -1, Conv, [32, 16, 3, 1]], #39
-[ -1, BottleneckCSP, [16, 8, 1, False]], #40
-[ -1, Upsample, [None, 2, 'nearest']], #41
-[ -1, Conv, [8, 2, 3, 1]] #42 Lane line segmentation head
-]
-
-
-class MCnet(nn.Module):
- def __init__(self, block_cfg, **kwargs):
- super(MCnet, self).__init__()
- layers, save= [], []
- self.nc = 1
- self.detector_index = -1
- self.det_out_idx = block_cfg[0][0]
- self.seg_out_idx = block_cfg[0][1:]
-
-
- # Build model
- for i, (from_, block, args) in enumerate(block_cfg[1:]):
- block = eval(block) if isinstance(block, str) else block # eval strings
- if block is Detect:
- self.detector_index = i
- block_ = block(*args)
- block_.index, block_.from_ = i, from_
- layers.append(block_)
- save.extend(x % i for x in ([from_] if isinstance(from_, int) else from_) if x != -1) # append to savelist
- assert self.detector_index == block_cfg[0][0]
-
- self.model, self.save = nn.Sequential(*layers), sorted(save)
- self.names = [str(i) for i in range(self.nc)]
-
- # set stride、anchor for detector
- Detector = self.model[self.detector_index] # detector
- if isinstance(Detector, Detect):
- s = 128 # 2x min stride
- # for x in self.forward(torch.zeros(1, 3, s, s)):
- # print (x.shape)
- with torch.no_grad():
- model_out = self.forward(torch.zeros(1, 3, s, s))
- detects, _, _= model_out
- Detector.stride = torch.tensor([s / x.shape[-2] for x in detects]) # forward
- # print("stride"+str(Detector.stride ))
- Detector.anchors /= Detector.stride.view(-1, 1, 1) # Set the anchors for the corresponding scale
- check_anchor_order(Detector)
- self.stride = Detector.stride
- self._initialize_biases()
-
- #initialize_weights(self)
-
- def forward(self, x):
- cache = []
- out = []
- det_out = None
- Da_fmap = []
- LL_fmap = []
- for i, block in enumerate(self.model):
- if block.from_ != -1:
- x = cache[block.from_] if isinstance(block.from_, int) else [x if j == -1 else cache[j] for j in block.from_] #calculate concat detect
- x = block(x)
- if i in self.seg_out_idx: #save driving area segment result
- m=nn.Sigmoid()
- out.append(m(x))
- if i == self.detector_index:
- det_out = x
- cache.append(x if block.index in self.save else None)
- out.insert(0,det_out)
- return out
-
-
- def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
- # https://arxiv.org/abs/1708.02002 section 3.3
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
- # m = self.model[-1] # Detect() module
- m = self.model[self.detector_index] # Detect() module
- for mi, s in zip(m.m, m.stride): # from
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
- b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
- b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-def get_net(cfg, **kwargs):
- m_block_cfg = YOLOP
- model = MCnet(m_block_cfg, **kwargs)
- return model
diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/layers/__init__.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/layers/__init__.py
deleted file mode 100644
index bc4922cd22ca66a7d77591560d6842fd8f0504a7..0000000000000000000000000000000000000000
--- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/layers/__init__.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-import torch
-
-from .batch_norm import FrozenBatchNorm2d, NaiveSyncBatchNorm2d
-from .misc import Conv2d, _NewEmptyTensorOp
-from .misc import ConvTranspose2d
-from .misc import DFConv2d
-from .misc import interpolate
-from .misc import Scale
-from .nms import nms
-from .nms import ml_nms
-from .nms import soft_nms
-from .roi_align import ROIAlign
-from .roi_align import roi_align
-from .roi_align import ROIAlignV2
-from .roi_pool import ROIPool
-from .roi_pool import roi_pool
-from .smooth_l1_loss import smooth_l1_loss
-from .sigmoid_focal_loss import SigmoidFocalLoss, TokenSigmoidFocalLoss
-from .iou_loss import IOULoss, IOUWHLoss
-from .deform_conv import DeformConv, ModulatedDeformConv
-from .dropblock import DropBlock2D, DropBlock3D
-from .evonorm import EvoNorm2d
-from .dyrelu import DYReLU, swish
-from .se import SELayer, SEBlock
-from .dyhead import DyHead
-from .set_loss import HungarianMatcher, SetCriterion
-
-__all__ = ["nms", "ml_nms", "soft_nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool",
- "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", "swish",
- "FrozenBatchNorm2d", "NaiveSyncBatchNorm2d", "SigmoidFocalLoss", "TokenSigmoidFocalLoss", "IOULoss",
- "IOUWHLoss", "Scale", "DeformConv", "ModulatedDeformConv", "DyHead",
- "DropBlock2D", "DropBlock3D", "EvoNorm2d", "DYReLU", "SELayer", "SEBlock",
- "HungarianMatcher", "SetCriterion", "ROIAlignV2", "_NewEmptyTensorOp"]
diff --git a/spaces/harish3110/emotion_detection/README.md b/spaces/harish3110/emotion_detection/README.md
deleted file mode 100644
index 13fdf6105b874f6b0ca644763b3cc4ba24013112..0000000000000000000000000000000000000000
--- a/spaces/harish3110/emotion_detection/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Emotion Detection
-emoji: 🌖
-colorFrom: blue
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.0.19
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/haxenbane/20230903/README.md b/spaces/haxenbane/20230903/README.md
deleted file mode 100644
index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000
--- a/spaces/haxenbane/20230903/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: bingo
-emoji: 😊
-colorFrom: red
-colorTo: red
-sdk: docker
-license: mit
-duplicated_from: hf4all/bingo
----
-
-
-
-# Bingo
-
-Bingo,一个让你呼吸顺畅 New Bing。
-
-高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
-
-
-
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://github.com/weaigc/bingo/blob/main/license)
-
-问题反馈请前往 https://github.com/weaigc/bingo/issues
-
-
-
diff --git a/spaces/hebert2099/MusicGen/audiocraft/modules/activations.py b/spaces/hebert2099/MusicGen/audiocraft/modules/activations.py
deleted file mode 100644
index 8bd6f2917a56d72db56555d0ff54b2311bc21778..0000000000000000000000000000000000000000
--- a/spaces/hebert2099/MusicGen/audiocraft/modules/activations.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-from torch import Tensor
-from typing import Union, Callable
-
-
-class CustomGLU(nn.Module):
- """Custom Gated Linear Unit activation.
- Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half
- of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation
- function (i.e. sigmoid, swish, etc.).
-
- Args:
- activation (nn.Module): The custom activation to apply in the Gated Linear Unit
- dim (int): the dimension on which to split the input. Default: -1
-
- Shape:
- - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
- dimensions
- - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
-
- Examples::
- >>> m = CustomGLU(nn.Sigmoid())
- >>> input = torch.randn(4, 2)
- >>> output = m(input)
- """
- def __init__(self, activation: nn.Module, dim: int = -1):
- super(CustomGLU, self).__init__()
- self.dim = dim
- self.activation = activation
-
- def forward(self, x: Tensor):
- assert x.shape[self.dim] % 2 == 0 # M = N / 2
- a, b = torch.chunk(x, 2, dim=self.dim)
- return a * self.activation(b)
-
-
-class SwiGLU(CustomGLU):
- """SiLU Gated Linear Unit activation.
- Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is
- the first half of the input matrices, :math:`b` is the second half.
-
- Args:
- dim (int): the dimension on which to split the input. Default: -1
- """
- def __init__(self, dim: int = -1):
- super(SwiGLU, self).__init__(nn.SiLU(), dim)
-
-
-class GeGLU(CustomGLU):
- """GeLU Gated Linear Unit activation.
- Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is
- the first half of the input matrices, :math:`b` is the second half.
-
- Args:
- dim (int): the dimension on which to split the input. Default: -1
- """
- def __init__(self, dim: int = -1):
- super(GeGLU, self).__init__(nn.GELU(), dim)
-
-
-class ReGLU(CustomGLU):
- """ReLU Gated Linear Unit activation.
- Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is
- the first half of the input matrices, :math:`b` is the second half.
-
- Args:
- dim (int): the dimension on which to split the input. Default: -1
- """
- def __init__(self, dim: int = -1):
- super(ReGLU, self).__init__(nn.ReLU(), dim)
-
-
-def get_activation_fn(
- activation: Union[str, Callable[[Tensor], Tensor]]
-) -> Union[str, Callable[[Tensor], Tensor]]:
- """Helper function to map an activation string to the activation class.
- If the supplied activation is not a string that is recognized, the activation is passed back.
-
- Args:
- activation (Union[str, Callable[[Tensor], Tensor]]): Activation to check
- """
- if isinstance(activation, str):
- if activation == "reglu":
- return ReGLU()
- elif activation == "geglu":
- return GeGLU()
- elif activation == "swiglu":
- return SwiGLU()
- return activation
diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_lowerLR.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_lowerLR.py
deleted file mode 100644
index 9ca7ee03f9dfcc9acdeb69abd764c3ab960ab740..0000000000000000000000000000000000000000
--- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/cascade/nnUNetTrainerV2CascadeFullRes_lowerLR.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
-
-
-class nnUNetTrainerV2CascadeFullRes_lowerLR(nnUNetTrainerV2CascadeFullRes):
- def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
- unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False):
- super().__init__(plans_file, fold, output_folder, dataset_directory,
- batch_dice, stage, unpack_data, deterministic,
- previous_trainer, fp16)
- self.initial_lr = 1e-3
diff --git a/spaces/hoshilumine/combined-GI-RVC-models/config.py b/spaces/hoshilumine/combined-GI-RVC-models/config.py
deleted file mode 100644
index 2fda460b186b86923e757618c2f4f6fc0c45d8cf..0000000000000000000000000000000000000000
--- a/spaces/hoshilumine/combined-GI-RVC-models/config.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import argparse
-import sys
-import torch
-from multiprocessing import cpu_count
-
-class Config:
- def __init__(self):
- self.device = "cuda:0"
- self.is_half = True
- self.n_cpu = 0
- self.gpu_name = None
- self.gpu_mem = None
- (
- self.python_cmd,
- self.listen_port,
- self.colab,
- self.noparallel,
- self.noautoopen,
- self.api
- ) = self.arg_parse()
- self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
-
- @staticmethod
- def arg_parse() -> tuple:
- exe = sys.executable or "python"
- parser = argparse.ArgumentParser()
- parser.add_argument("--port", type=int, default=7865, help="Listen port")
- parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
- parser.add_argument("--colab", action="store_true", help="Launch in colab")
- parser.add_argument(
- "--noparallel", action="store_true", help="Disable parallel processing"
- )
- parser.add_argument(
- "--noautoopen",
- action="store_true",
- help="Do not open in browser automatically",
- )
- parser.add_argument("--api", action="store_true", help="Launch with api")
- cmd_opts = parser.parse_args()
-
- cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
-
- return (
- cmd_opts.pycmd,
- cmd_opts.port,
- cmd_opts.colab,
- cmd_opts.noparallel,
- cmd_opts.noautoopen,
- cmd_opts.api
- )
-
- # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
- # check `getattr` and try it for compatibility
- @staticmethod
- def has_mps() -> bool:
- if not torch.backends.mps.is_available():
- return False
- try:
- torch.zeros(1).to(torch.device("mps"))
- return True
- except Exception:
- return False
-
- def device_config(self) -> tuple:
- if torch.cuda.is_available():
- i_device = int(self.device.split(":")[-1])
- self.gpu_name = torch.cuda.get_device_name(i_device)
- if (
- ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
- or "P40" in self.gpu_name.upper()
- or "1060" in self.gpu_name
- or "1070" in self.gpu_name
- or "1080" in self.gpu_name
- ):
- print("Found GPU", self.gpu_name, ", force to fp32")
- self.is_half = False
- else:
- print("Found GPU", self.gpu_name)
- self.gpu_mem = int(
- torch.cuda.get_device_properties(i_device).total_memory
- / 1024
- / 1024
- / 1024
- + 0.4
- )
- elif self.has_mps():
- print("No supported Nvidia GPU found, use MPS instead")
- self.device = "mps"
- self.is_half = False
- else:
- print("No supported Nvidia GPU found, use CPU instead")
- self.device = "cpu"
- self.is_half = False
-
- if self.n_cpu == 0:
- self.n_cpu = cpu_count()
-
- if self.is_half:
- # 6G显存配置
- x_pad = 3
- x_query = 10
- x_center = 60
- x_max = 65
- else:
- # 5G显存配置
- x_pad = 1
- x_query = 6
- x_center = 38
- x_max = 41
-
- if self.gpu_mem != None and self.gpu_mem <= 4:
- x_pad = 1
- x_query = 5
- x_center = 30
- x_max = 32
-
- return x_pad, x_query, x_center, x_max
diff --git a/spaces/hugggof/vampnet/scripts/exp/experiment.py b/spaces/hugggof/vampnet/scripts/exp/experiment.py
deleted file mode 100644
index d4426736949111ad0195c7373cce7d7d71a71a63..0000000000000000000000000000000000000000
--- a/spaces/hugggof/vampnet/scripts/exp/experiment.py
+++ /dev/null
@@ -1,254 +0,0 @@
-from pathlib import Path
-import random
-from typing import List
-import tempfile
-import subprocess
-
-import argbind
-from tqdm import tqdm
-import torch
-
-from vampnet.interface import Interface
-from vampnet import mask as pmask
-import audiotools as at
-
-Interface: Interface = argbind.bind(Interface)
-
-
-
-def calculate_bitrate(
- interface, num_codebooks,
- downsample_factor
- ):
- bit_width = 10
- sr = interface.codec.sample_rate
- hop = interface.codec.hop_size
- rate = (sr / hop) * ((bit_width * num_codebooks) / downsample_factor)
- return rate
-
-def baseline(sig, interface):
- return interface.preprocess(sig)
-
-def reconstructed(sig, interface):
- return interface.to_signal(
- interface.encode(sig)
- )
-
-def coarse2fine(sig, interface):
- z = interface.encode(sig)
- z = z[:, :interface.c2f.n_conditioning_codebooks, :]
-
- z = interface.coarse_to_fine(z)
- return interface.to_signal(z)
-
-class CoarseCond:
-
- def __init__(self, num_conditioning_codebooks, downsample_factor):
- self.num_conditioning_codebooks = num_conditioning_codebooks
- self.downsample_factor = downsample_factor
-
- def __call__(self, sig, interface):
- z = interface.encode(sig)
- mask = pmask.full_mask(z)
- mask = pmask.codebook_unmask(mask, self.num_conditioning_codebooks)
- mask = pmask.periodic_mask(mask, self.downsample_factor)
-
- zv = interface.coarse_vamp(z, mask)
- zv = interface.coarse_to_fine(zv)
- return interface.to_signal(zv)
-
-def opus(sig, interface, bitrate=128):
- sig = interface.preprocess(sig)
-
- with tempfile.NamedTemporaryFile(suffix=".wav") as f:
- sig.write(f.name)
-
- opus_name = Path(f.name).with_suffix(".opus")
- # convert to opus
- cmd = [
- "ffmpeg", "-y", "-i", f.name,
- "-c:a", "libopus",
- "-b:a", f"{bitrate}",
- opus_name
- ]
- subprocess.run(cmd, check=True)
-
- # convert back to wav
- output_name = Path(f"{f.name}-opus").with_suffix(".wav")
- cmd = [
- "ffmpeg", "-y", "-i", opus_name,
- output_name
- ]
-
- subprocess.run(cmd, check=True)
-
- sig = at.AudioSignal(
- output_name,
- sample_rate=sig.sample_rate
- )
- return sig
-
-def mask_ratio_1_step(ratio=1.0):
- def wrapper(sig, interface):
- z = interface.encode(sig)
- mask = pmask.linear_random(z, ratio)
- zv = interface.coarse_vamp(
- z,
- mask,
- sampling_steps=1,
- )
-
- return interface.to_signal(zv)
- return wrapper
-
-def num_sampling_steps(num_steps=1):
- def wrapper(sig, interface: Interface):
- z = interface.encode(sig)
- mask = pmask.periodic_mask(z, 16)
- zv = interface.coarse_vamp(
- z,
- mask,
- sampling_steps=num_steps,
- )
-
- zv = interface.coarse_to_fine(zv)
- return interface.to_signal(zv)
- return wrapper
-
-def beat_mask(ctx_time):
- def wrapper(sig, interface):
- beat_mask = interface.make_beat_mask(
- sig,
- before_beat_s=ctx_time/2,
- after_beat_s=ctx_time/2,
- invert=True
- )
-
- z = interface.encode(sig)
-
- zv = interface.coarse_vamp(
- z, beat_mask
- )
-
- zv = interface.coarse_to_fine(zv)
- return interface.to_signal(zv)
- return wrapper
-
-def inpaint(ctx_time):
- def wrapper(sig, interface: Interface):
- z = interface.encode(sig)
- mask = pmask.inpaint(z, interface.s2t(ctx_time), interface.s2t(ctx_time))
-
- zv = interface.coarse_vamp(z, mask)
- zv = interface.coarse_to_fine(zv)
-
- return interface.to_signal(zv)
- return wrapper
-
-def token_noise(noise_amt):
- def wrapper(sig, interface: Interface):
- z = interface.encode(sig)
- mask = pmask.random(z, noise_amt)
- z = torch.where(
- mask,
- torch.randint_like(z, 0, interface.coarse.vocab_size),
- z
- )
- return interface.to_signal(z)
- return wrapper
-
-EXP_REGISTRY = {}
-
-EXP_REGISTRY["gen-compression"] = {
- "baseline": baseline,
- "reconstructed": reconstructed,
- "coarse2fine": coarse2fine,
- **{
- f"{n}_codebooks_downsampled_{x}x": CoarseCond(num_conditioning_codebooks=n, downsample_factor=x)
- for (n, x) in (
- (1, 1), # 1 codebook, no downsampling
- (4, 4), # 4 codebooks, downsampled 4x
- (4, 16), # 4 codebooks, downsampled 16x
- (4, 32), # 4 codebooks, downsampled 16x
- )
- },
- **{
- f"token_noise_{x}": mask_ratio_1_step(ratio=x)
- for x in [0.25, 0.5, 0.75]
- },
-
-}
-
-
-EXP_REGISTRY["sampling-steps"] = {
- # "codec": reconstructed,
- **{f"steps_{n}": num_sampling_steps(n) for n in [1, 4, 12, 36, 64, 72]},
-}
-
-
-EXP_REGISTRY["musical-sampling"] = {
- **{f"beat_mask_{t}": beat_mask(t) for t in [0.075]},
- **{f"inpaint_{t}": inpaint(t) for t in [0.5, 1.0,]}, # multiply these by 2 (they go left and right)
-}
-
-@argbind.bind(without_prefix=True)
-def main(
- sources=[
- "/media/CHONK/hugo/spotdl/val",
- ],
- output_dir: str = "./samples",
- max_excerpts: int = 2000,
- exp_type: str = "gen-compression",
- seed: int = 0,
- ext: str = [".mp3"],
- ):
- at.util.seed(seed)
- interface = Interface()
-
- output_dir = Path(output_dir)
- output_dir.mkdir(exist_ok=True, parents=True)
-
- from audiotools.data.datasets import AudioLoader, AudioDataset
-
- loader = AudioLoader(sources=sources, shuffle_state=seed, ext=ext)
- dataset = AudioDataset(loader,
- sample_rate=interface.codec.sample_rate,
- duration=interface.coarse.chunk_size_s,
- n_examples=max_excerpts,
- without_replacement=True,
- )
-
- if exp_type in EXP_REGISTRY:
- SAMPLE_CONDS = EXP_REGISTRY[exp_type]
- else:
- raise ValueError(f"Unknown exp_type {exp_type}")
-
-
- indices = list(range(max_excerpts))
- random.shuffle(indices)
- for i in tqdm(indices):
- # if all our files are already there, skip
- done = []
- for name in SAMPLE_CONDS:
- o_dir = Path(output_dir) / name
- done.append((o_dir / f"{i}.wav").exists())
- if all(done):
- continue
-
- sig = dataset[i]["signal"]
- results = {
- name: cond(sig, interface).cpu()
- for name, cond in SAMPLE_CONDS.items()
- }
-
- for name, sig in results.items():
- o_dir = Path(output_dir) / name
- o_dir.mkdir(exist_ok=True, parents=True)
-
- sig.write(o_dir / f"{i}.wav")
-
-if __name__ == "__main__":
- args = argbind.parse_args()
-
- with argbind.scope(args):
- main()
diff --git a/spaces/iamironman4279/SadTalker/src/facerender/sync_batchnorm/comm.py b/spaces/iamironman4279/SadTalker/src/facerender/sync_batchnorm/comm.py
deleted file mode 100644
index 922f8c4a3adaa9b32fdcaef09583be03b0d7eb2b..0000000000000000000000000000000000000000
--- a/spaces/iamironman4279/SadTalker/src/facerender/sync_batchnorm/comm.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : comm.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-# Distributed under MIT License.
-
-import queue
-import collections
-import threading
-
-__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
-
-
-class FutureResult(object):
- """A thread-safe future implementation. Used only as one-to-one pipe."""
-
- def __init__(self):
- self._result = None
- self._lock = threading.Lock()
- self._cond = threading.Condition(self._lock)
-
- def put(self, result):
- with self._lock:
- assert self._result is None, 'Previous result has\'t been fetched.'
- self._result = result
- self._cond.notify()
-
- def get(self):
- with self._lock:
- if self._result is None:
- self._cond.wait()
-
- res = self._result
- self._result = None
- return res
-
-
-_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
-_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
-
-
-class SlavePipe(_SlavePipeBase):
- """Pipe for master-slave communication."""
-
- def run_slave(self, msg):
- self.queue.put((self.identifier, msg))
- ret = self.result.get()
- self.queue.put(True)
- return ret
-
-
-class SyncMaster(object):
- """An abstract `SyncMaster` object.
-
- - During the replication, as the data parallel will trigger an callback of each module, all slave devices should
- call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
- and passed to a registered callback.
- - After receiving the messages, the master device should gather the information and determine to message passed
- back to each slave devices.
- """
-
- def __init__(self, master_callback):
- """
-
- Args:
- master_callback: a callback to be invoked after having collected messages from slave devices.
- """
- self._master_callback = master_callback
- self._queue = queue.Queue()
- self._registry = collections.OrderedDict()
- self._activated = False
-
- def __getstate__(self):
- return {'master_callback': self._master_callback}
-
- def __setstate__(self, state):
- self.__init__(state['master_callback'])
-
- def register_slave(self, identifier):
- """
- Register an slave device.
-
- Args:
- identifier: an identifier, usually is the device id.
-
- Returns: a `SlavePipe` object which can be used to communicate with the master device.
-
- """
- if self._activated:
- assert self._queue.empty(), 'Queue is not clean before next initialization.'
- self._activated = False
- self._registry.clear()
- future = FutureResult()
- self._registry[identifier] = _MasterRegistry(future)
- return SlavePipe(identifier, self._queue, future)
-
- def run_master(self, master_msg):
- """
- Main entry for the master device in each forward pass.
- The messages were first collected from each devices (including the master device), and then
- an callback will be invoked to compute the message to be sent back to each devices
- (including the master device).
-
- Args:
- master_msg: the message that the master want to send to itself. This will be placed as the first
- message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
-
- Returns: the message to be sent back to the master device.
-
- """
- self._activated = True
-
- intermediates = [(0, master_msg)]
- for i in range(self.nr_slaves):
- intermediates.append(self._queue.get())
-
- results = self._master_callback(intermediates)
- assert results[0][0] == 0, 'The first result should belongs to the master.'
-
- for i, res in results:
- if i == 0:
- continue
- self._registry[i].result.put(res)
-
- for i in range(self.nr_slaves):
- assert self._queue.get() is True
-
- return results[0][1]
-
- @property
- def nr_slaves(self):
- return len(self._registry)
diff --git a/spaces/iamironman4279/SadTalker/src/utils/text2speech.py b/spaces/iamironman4279/SadTalker/src/utils/text2speech.py
deleted file mode 100644
index 00d165b6cc7774fd200929aafa0ff3b15916111e..0000000000000000000000000000000000000000
--- a/spaces/iamironman4279/SadTalker/src/utils/text2speech.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-import tempfile
-from TTS.api import TTS
-
-
-class TTSTalker():
- def __init__(self) -> None:
- model_name = TTS.list_models()[0]
- self.tts = TTS(model_name)
-
- def test(self, text, language='en'):
-
- tempf = tempfile.NamedTemporaryFile(
- delete = False,
- suffix = ('.'+'wav'),
- )
-
- self.tts.tts_to_file(text, speaker=self.tts.speakers[0], language=language, file_path=tempf.name)
-
- return tempf.name
\ No newline at end of file
diff --git a/spaces/inamXcontru/PoeticTTS/Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack Full Version !NEW!.md b/spaces/inamXcontru/PoeticTTS/Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack Full Version !NEW!.md
deleted file mode 100644
index 748fe6a000500c18b6bb0871ce986c738197b7da..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack Full Version !NEW!.md
+++ /dev/null
@@ -1,9 +0,0 @@
-Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack full version
Download Zip ⚡ https://gohhs.com/2uz5QF
-
-Oct 3, 2019 - 3.44 Full Version Cracked program can do any task very quickly, because with this shortcut you can edit any video very quickly and you ... Download full version of the program for windows and android.
-3.44 full version cracked, download full version, cracked software, cracked full version, cracked key, full cracked, key, crack, full.
-Download full version of the program for windows and android.
-3.44 full version cracked, download full 8a78ff9644
-
-
-
diff --git a/spaces/inamXcontru/PoeticTTS/Chennai Vs China Movie Free Download ((LINK)).md b/spaces/inamXcontru/PoeticTTS/Chennai Vs China Movie Free Download ((LINK)).md
deleted file mode 100644
index 630e3ccdc4815be2f9f9dd441596ccd07a00edc5..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Chennai Vs China Movie Free Download ((LINK)).md
+++ /dev/null
@@ -1,6 +0,0 @@
-chennai vs china movie free download
DOWNLOAD https://gohhs.com/2uz3uB
-
- d5da3c52bf
-
-
-
diff --git a/spaces/indy256/protogen_v2.2/app.py b/spaces/indy256/protogen_v2.2/app.py
deleted file mode 100644
index 5aad4ca1b3f5e60af394a66e88233ea70a7987d9..0000000000000000000000000000000000000000
--- a/spaces/indy256/protogen_v2.2/app.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import gradio as gr
-
-API_KEY=os.environ.get('HUGGING_FACE_HUB_TOKEN', None)
-
-article = """---
-This space was created using [SD Space Creator](https://huggingface.co/spaces/anzorq/sd-space-creator)."""
-
-gr.Interface.load(
- name="models/yuvalkirstain/protogen_v2.2",
- title="""Protogen V2.2""",
- description="""Demo for Protogen V2.2 Stable Diffusion model.""",
- article=article,
- api_key=API_KEY,
- ).queue(concurrency_count=20).launch()
diff --git a/spaces/innnky/vits-nyaru/train.py b/spaces/innnky/vits-nyaru/train.py
deleted file mode 100644
index 336698ef8ce260048ed8a6e4f0efa5daffb50eb2..0000000000000000000000000000000000000000
--- a/spaces/innnky/vits-nyaru/train.py
+++ /dev/null
@@ -1,295 +0,0 @@
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-
-import librosa
-import logging
-
-logging.getLogger('numba').setLevel(logging.WARNING)
-
-import commons
-import utils
-from data_utils import (
- TextAudioLoader,
- TextAudioCollate,
- DistributedBucketSampler
-)
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
-)
-from losses import (
- generator_loss,
- discriminator_loss,
- feature_loss,
- kl_loss
-)
-from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-from text.symbols import symbols
-
-
-torch.backends.cudnn.benchmark = True
-global_step = 0
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = '80000'
-
- hps = utils.get_hparams()
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
-
- train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
- train_sampler = DistributedBucketSampler(
- train_dataset,
- hps.train.batch_size,
- [32,300,400,500,600,700,800,900,1000],
- num_replicas=n_gpus,
- rank=rank,
- shuffle=True)
- collate_fn = TextAudioCollate()
- train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
- collate_fn=collate_fn, batch_sampler=train_sampler)
- if rank == 0:
- eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data)
- eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
- batch_size=hps.train.batch_size, pin_memory=True,
- drop_last=False, collate_fn=collate_fn)
-
- net_g = SynthesizerTrn(
- len(symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- **hps.model).cuda(rank)
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- net_g.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- net_g = DDP(net_g, device_ids=[rank])
- net_d = DDP(net_d, device_ids=[rank])
-
- try:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
- global_step = (epoch_str - 1) * len(train_loader)
- except:
- epoch_str = 1
- global_step = 0
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
-
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank==0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d = nets
- optim_g, optim_d = optims
- scheduler_g, scheduler_d = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader):
- x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
- spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
- y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
- (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths)
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- with autocast(enabled=False):
- loss_dur = torch.sum(l_length.float())
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
-
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank==0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
-
- scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
- }
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict)
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
- global_step += 1
-
- if rank == 0:
- logger.info('====> Epoch: {}'.format(epoch))
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- with torch.no_grad():
- for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader):
- x, x_lengths = x.cuda(0), x_lengths.cuda(0)
- spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
- y, y_lengths = y.cuda(0), y_lengths.cuda(0)
-
- # remove else
- x = x[:1]
- x_lengths = x_lengths[:1]
- spec = spec[:1]
- spec_lengths = spec_lengths[:1]
- y = y[:1]
- y_lengths = y_lengths[:1]
- break
- y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000)
- y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
-
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- image_dict = {
- "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
- }
- audio_dict = {
- "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
- }
- if global_step == 0:
- image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
- audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
-
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocom Cars Cdp V2 10.3 Multilanguage Setup Activation Key.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Autocom Cars Cdp V2 10.3 Multilanguage Setup Activation Key.md
deleted file mode 100644
index 1f7b60362a34178a9a81ccf0654ca2bc0e7f1a48..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocom Cars Cdp V2 10.3 Multilanguage Setup Activation Key.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-How to Download and Install Autocom Cars CDP V2 10.3 Multilanguage with Activation Key
-Autocom Cars CDP is a software that allows you to diagnose and repair cars using a PC or laptop. Autocom Cars CDP V2 10.3 Multilanguage is one of the latest versions of Autocom Cars CDP that supports multiple languages and has many features and functions. However, downloading and installing Autocom Cars CDP V2 10.3 Multilanguage can be tricky, especially if you don't have a valid activation key.
-In this article, we will show you how to download and install Autocom Cars CDP V2 10.3 Multilanguage with activation key for free. We will also provide you with some tips and tricks to make the process easier and faster. By following this guide, you will be able to use Autocom Cars CDP V2 10.3 Multilanguage on your Windows 10 computer without any problems.
-autocom cars cdp v2 10.3 multilanguage setup activation key
DOWNLOAD ✪ https://urlin.us/2uExOd
-Step 1: Download Autocom Cars CDP V2 10.3 Multilanguage Setup File
-The first step to install Autocom Cars CDP V2 10.3 Multilanguage is to download the setup file from a reliable source. You can use the links below to download the setup file from Google Drive or Fshare. The setup file is about 4 GB in size, so make sure you have enough space on your hard drive and a stable internet connection.
-Google Drive link: https://drive.google.com/open?id=0B4N...
-Fshare link: https://www.fshare.vn/file/6NH789F1LN5J
-After downloading the setup file, you will need to extract it using a tool like WinRAR or 7-Zip. You will get a folder named "Autocom_Cars_CDP_v2_10_3_Multilanguage_Setup" that contains all the files you need to install Autocom Cars CDP V2 10.3 Multilanguage.
-Step 2: Download Autocom Cars CDP V2 10.3 Multilanguage Crack File
-The next step is to download the crack file that will allow you to activate Autocom Cars CDP V2 10.3 Multilanguage without a license or activation key. You can use the links below to download the crack file from Google Drive or Fshare. The crack file is about 1 MB in size, so it won't take long to download.
-Google Drive link: https://drive.google.com/open?id=0B4N...
-Fshare link: https://www.fshare.vn/file/6NH789F1LN5J
-
-After downloading the crack file, you will need to extract it using a tool like WinRAR or 7-Zip. You will get a folder named "crack" that contains two files: "activation_key.txt" and "patch.exe". These are the files that will help you activate Autocom Cars CDP V2 10.3 Multilanguage.
-Step 3: Install Autocom Cars CDP V2 10.3 Multilanguage
-The third step is to install Autocom Cars CDP V2 10.3 Multilanguage using the setup file and the crack file. To do this, follow these steps:
-
-- Open the folder "Autocom_Cars_CDP_v2_10_3_Multilanguage_Setup" and run the file "setup.exe" as administrator.
-- Choose "Install manually without using the internet" and click "Next".
-- Enter the file installation key "38699-60149-36808-21840-05491" and click "Next".
-- Accept the license agreement and click "Next".
-- Select the products you want to install and click "Next".
-- Select the installation folder and click "Next".
-- Select "Create symbolic links to MATLAB scripts in:" and click "Next".
-- Select "Activate manually without using the internet" and click "Next".
-- Open the file "activation_key.txt" from the folder "crack" and copy the activation key.
-- Paste the activation key into the activation window and click "Next".
-- Click "Confirm" to start the installation.
-- Wait for the installation to finish and click "Finish".
-
-Congratulations! You have successfully installed Autocom Cars CDP V2 10.3 Multilanguage on your computer.
-Step 4: Test Autocom Cars CDP V2 10.3 Multilanguage
-The final step is to test Autocom Cars CDP V2 10.3 Multilanguage and make sure it works properly. To do this, follow these steps:
-
-- Open Autocom Cars CDP V2 10.3 Multilanguage from your start menu or desktop shortcut.
-- You should see a window like this:
-
-
-
-- Type "ver" in the command window and press enter. You should see something like this:
-
-
-
-- You can see that your Autocom version is V2 10.3 Multilanguage and that it is activated with an activation key.
-- You can also try some basic commands or functions in Autocom to see if they work correctly.
-- If everything works fine, then you have successfully installed and activated Autocom Cars CDP V2 10.3 Multilanguage with crack.
-
-
-Conclusion
-
-Autocom Cars CDP V2 10.3 Multilanguage is a software that allows you to diagnose and repair cars using a PC or laptop. It supports multiple languages and has many features and functions. However, downloading and installing Autocom Cars CDP V2 10.3 Multilanguage can be tricky, especially if you don't have a valid activation key.
-
-In this article, we showed you how to download and install Autocom Cars CDP V2 10.3 Multilanguage with activation key and crack for free. We also provided you with some tips and tricks to make the process easier and faster.
-
-We hope this article was helpful for you and that you enjoyed using Autocom Cars CDP V2 10.3 Multilanguage on your Windows 10 computer. If you have any questions or comments, feel free to leave them below. Thank you for reading!
-Conclusion
-
-Autocom Cars CDP V2 10.3 Multilanguage is a software that allows you to diagnose and repair cars using a PC or laptop. It supports multiple languages and has many features and functions. However, downloading and installing Autocom Cars CDP V2 10.3 Multilanguage can be tricky, especially if you don't have a valid activation key.
-
-In this article, we showed you how to download and install Autocom Cars CDP V2 10.3 Multilanguage with activation key and crack for free. We also provided you with some tips and tricks to make the process easier and faster.
-
-We hope this article was helpful for you and that you enjoyed using Autocom Cars CDP V2 10.3 Multilanguage on your Windows 10 computer. If you have any questions or comments, feel free to leave them below. Thank you for reading!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Dual Audio Movies Hindi English 720p Skyfall 1080pl.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Dual Audio Movies Hindi English 720p Skyfall 1080pl.md
deleted file mode 100644
index d4cf5d9fd670674fbd1cc5740c6a2596b1124c88..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Dual Audio Movies Hindi English 720p Skyfall 1080pl.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Dual Audio Movies Hindi English 720p Skyfall 1080pl
Download Zip ⚡ https://urlin.us/2uEwhX
-
-Skyfall (2012) BRrip 720p x264 Dual Audio [Eng DD 5. ... Top Gun 1986 BRRip 720P Dual Audio English-Hindi - intellect chaturbate tokens generator ... Dual Audio 1080p Movies, Hollywood 720p Movies, South Hindi Dubbed 720p Movies, ... 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/A First Book Of ANSI C Fourth Edition (Introduction To Programming) Book Pdf HOT!.md b/spaces/inreVtussa/clothingai/Examples/A First Book Of ANSI C Fourth Edition (Introduction To Programming) Book Pdf HOT!.md
deleted file mode 100644
index 64acee96939314761809c21da0a473b3e8df7a33..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/A First Book Of ANSI C Fourth Edition (Introduction To Programming) Book Pdf HOT!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-A First Book of ANSI C, Fourth Edition (Introduction to Programming) book pdf
Download ↔ https://tiurll.com/2uCl1H
-
-A first book of ansi c fourth edition introduction. The book presents the e balaguruswamy java pdf 28 E. Page 2 of 37. E Balagurusamy: Programming in ANSI C, ... 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Africa X Sauvage Vol 3.md b/spaces/inreVtussa/clothingai/Examples/Africa X Sauvage Vol 3.md
deleted file mode 100644
index b55b0b27f3a4729fef666ea4db9fadaf4b392663..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Africa X Sauvage Vol 3.md
+++ /dev/null
@@ -1,14 +0,0 @@
-Africa X Sauvage Vol 3
DOWNLOAD 🗹 https://tiurll.com/2uCkHf
-
-diadescbersfist/africa-x-sauvage-vol-3-15-cotjane · dior sauvage south africa · sauvage perfume price in south africa . Perfume savage South Africa.
-In order to return or exchange Sauvage perfume from Dior, you will need to provide a copy of your passport, the bottle of Sauvage perfume itself.
-South Africa.
-Price: 12500 rubles.
-Quantity: Sauvage Eau de Parfum, Dior.
-A fragrance based on the contrast of freshness and warmth, created for a passionate man.
-Perfume Sauvage by Dior.
-Perfume Sauvage Eau de Parfum, Dior.
-A fragrance based on contrast 8a78ff9644
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Defense Zone 3 Ultra HD Torrent Download [FULL].md b/spaces/inreVtussa/clothingai/Examples/Defense Zone 3 Ultra HD Torrent Download [FULL].md
deleted file mode 100644
index e1118d393cf56c98195163a801d64a8f0007d98d..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Defense Zone 3 Ultra HD Torrent Download [FULL].md
+++ /dev/null
@@ -1,6 +0,0 @@
-Defense Zone 3 Ultra HD Torrent Download [FULL]
Download Zip ☑ https://tiurll.com/2uCkFj
-
- d5da3c52bf
-
-
-
diff --git a/spaces/ioniumX/SDXL-High-quality-art/README.md b/spaces/ioniumX/SDXL-High-quality-art/README.md
deleted file mode 100644
index 90a7af98e376019398deac49fd4058d5de4d1e09..0000000000000000000000000000000000000000
--- a/spaces/ioniumX/SDXL-High-quality-art/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: SDXL High Quality Art
-emoji: 🦀
-colorFrom: red
-colorTo: pink
-sdk: static
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git "a/spaces/jarvisbot/ChatImprovement/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/spaces/jarvisbot/ChatImprovement/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py"
deleted file mode 100644
index bbaa545820ce927915578aafbaec77a2dbe56378..0000000000000000000000000000000000000000
--- "a/spaces/jarvisbot/ChatImprovement/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py"
+++ /dev/null
@@ -1,17 +0,0 @@
-from predict import predict_no_ui
-from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
-fast_debug = False
-
-@CatchException
-def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
- history = [] # 清空历史,以免输入溢出
- for i in range(5):
- i_say = f'我给出一个数字,你给出该数字的平方。我给出数字:{i}'
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
- yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
-
- gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature) # 请求gpt,需要一段时间
-
- chatbot[-1] = (i_say, gpt_say)
- history.append(i_say);history.append(gpt_say)
- yield chatbot, history, '正常' # 显示
\ No newline at end of file
diff --git a/spaces/jasmeet1001/jasmeetmoviebox/README.md b/spaces/jasmeet1001/jasmeetmoviebox/README.md
deleted file mode 100644
index 162da84fa4fdc4c65c55620d039e49d25178db2b..0000000000000000000000000000000000000000
--- a/spaces/jasmeet1001/jasmeetmoviebox/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Jasmeetmoviebox
-emoji: 📊
-colorFrom: red
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
-license: unknown
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/jbilcke-hf/VideoQuest/src/components/ui/switch.tsx b/spaces/jbilcke-hf/VideoQuest/src/components/ui/switch.tsx
deleted file mode 100644
index 9d1e79dffe05b79b4208570f487e506513430355..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/VideoQuest/src/components/ui/switch.tsx
+++ /dev/null
@@ -1,29 +0,0 @@
-"use client"
-
-import * as React from "react"
-import * as SwitchPrimitives from "@radix-ui/react-switch"
-
-import { cn } from "@/lib/utils"
-
-const Switch = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-
-
-))
-Switch.displayName = SwitchPrimitives.Root.displayName
-
-export { Switch }
diff --git a/spaces/jhwen/bingo/src/lib/bots/bing/index.ts b/spaces/jhwen/bingo/src/lib/bots/bing/index.ts
deleted file mode 100644
index 4596f9de42fe9079f8128a03bf416a485a4ebb15..0000000000000000000000000000000000000000
--- a/spaces/jhwen/bingo/src/lib/bots/bing/index.ts
+++ /dev/null
@@ -1,410 +0,0 @@
-import { fetch, WebSocket, debug } from '@/lib/isomorphic'
-import WebSocketAsPromised from 'websocket-as-promised'
-import {
- SendMessageParams,
- BingConversationStyle,
- ConversationResponse,
- ChatResponseMessage,
- ConversationInfo,
- InvocationEventType,
- ChatError,
- ErrorCode,
- ChatUpdateCompleteResponse,
- ImageInfo,
- KBlobResponse
-} from './types'
-
-import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils'
-import { WatchDog, createChunkDecoder } from '@/lib/utils'
-
-type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }>
-
-const OPTIONS_SETS = [
- 'nlu_direct_response_filter',
- 'deepleo',
- 'disable_emoji_spoken_text',
- 'responsible_ai_policy_235',
- 'enablemm',
- 'iycapbing',
- 'iyxapbing',
- 'objopinion',
- 'rweasgv2',
- 'dagslnv1',
- 'dv3sugg',
- 'autosave',
- 'iyoloxap',
- 'iyoloneutral',
- 'clgalileo',
- 'gencontentv3',
-]
-
-export class BingWebBot {
- protected conversationContext?: ConversationInfo
- protected cookie: string
- protected ua: string
- protected endpoint = ''
- private lastText = ''
- private asyncTasks: Array> = []
-
- constructor(opts: {
- cookie: string
- ua: string
- bingConversationStyle?: BingConversationStyle
- conversationContext?: ConversationInfo
- }) {
- const { cookie, ua, conversationContext } = opts
- this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}`
- this.ua = ua
- this.conversationContext = conversationContext
- }
-
- static buildChatRequest(conversation: ConversationInfo) {
- const optionsSets = OPTIONS_SETS
- if (conversation.conversationStyle === BingConversationStyle.Precise) {
- optionsSets.push('h3precise')
- } else if (conversation.conversationStyle === BingConversationStyle.Creative) {
- optionsSets.push('h3imaginative')
- }
- return {
- arguments: [
- {
- source: 'cib',
- optionsSets,
- allowedMessageTypes: [
- 'ActionRequest',
- 'Chat',
- 'Context',
- 'InternalSearchQuery',
- 'InternalSearchResult',
- 'Disengaged',
- 'InternalLoaderMessage',
- 'Progress',
- 'RenderCardRequest',
- 'SemanticSerp',
- 'GenerateContentQuery',
- 'SearchQuery',
- ],
- sliceIds: [
- 'winmuid1tf',
- 'anssupfor_c',
- 'imgchatgptv2',
- 'tts2cf',
- 'contansperf',
- 'mlchatpc8500w',
- 'mlchatpc2',
- 'ctrlworkpay',
- 'winshortmsgtf',
- 'cibctrl',
- 'sydtransctrl',
- 'sydconfigoptc',
- '0705trt4',
- '517opinion',
- '628ajcopus0',
- '330uaugs0',
- '529rwea',
- '0626snptrcs0',
- '424dagslnv1',
- ],
- isStartOfSession: conversation.invocationId === 0,
- message: {
- author: 'user',
- inputMethod: 'Keyboard',
- text: conversation.prompt,
- imageUrl: conversation.imageUrl,
- messageType: 'Chat',
- },
- conversationId: conversation.conversationId,
- conversationSignature: conversation.conversationSignature,
- participant: { id: conversation.clientId },
- },
- ],
- invocationId: conversation.invocationId.toString(),
- target: 'chat',
- type: InvocationEventType.StreamInvocation,
- }
- }
-
- async createConversation(): Promise {
- const headers = {
- 'Accept-Encoding': 'gzip, deflate, br, zsdch',
- 'User-Agent': this.ua,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: this.cookie,
- }
-
- let resp: ConversationResponse | undefined
- try {
- const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' })
- if (response.status === 404) {
- throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR)
- }
- resp = await response.json() as ConversationResponse
- } catch (err) {
- console.error('create conversation error', err)
- }
-
- if (!resp?.result) {
- throw new ChatError('你的 VPS 或代理可能被封禁,如有疑问,请前往 https://github.com/weaigc/bingo 咨询', ErrorCode.BING_IP_FORBIDDEN)
- }
-
- const { value, message } = resp.result || {}
- if (value !== 'Success') {
- const errorMsg = `${value}: ${message}`
- if (value === 'UnauthorizedRequest') {
- if (/fetch failed/i.test(message || '')) {
- throw new ChatError(errorMsg, ErrorCode.BING_IP_FORBIDDEN)
- }
- throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED)
- }
- if (value === 'TryLater') {
- throw new ChatError(errorMsg, ErrorCode.BING_TRY_LATER)
- }
- if (value === 'Forbidden') {
- throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN)
- }
- throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR)
- }
- return resp
- }
-
- private async createContext(conversationStyle: BingConversationStyle) {
- if (!this.conversationContext) {
- const conversation = await this.createConversation()
- this.conversationContext = {
- conversationId: conversation.conversationId,
- conversationSignature: conversation.conversationSignature,
- clientId: conversation.clientId,
- invocationId: 0,
- conversationStyle,
- prompt: '',
- }
- }
- return this.conversationContext
- }
-
- async sendMessage(params: Params) {
- try {
- await this.createContext(params.options.bingConversationStyle)
- Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl })
- return this.sydneyProxy(params)
- } catch (error) {
- params.onEvent({
- type: 'ERROR',
- error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR),
- })
- }
- }
-
- private async sydneyProxy(params: Params) {
- const abortController = new AbortController()
- const response = await fetch(this.endpoint + '/api/sydney', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- },
- signal: abortController.signal,
- body: JSON.stringify(this.conversationContext!)
- })
- if (response.status !== 200) {
- params.onEvent({
- type: 'ERROR',
- error: new ChatError(
- 'Unknown error',
- ErrorCode.UNKOWN_ERROR,
- ),
- })
- }
- params.signal?.addEventListener('abort', () => {
- abortController.abort()
- })
-
- const textDecoder = createChunkDecoder()
- for await (const chunk of streamAsyncIterable(response.body!)) {
- this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk)))
- }
- }
-
- async sendWs() {
- const wsConfig: ConstructorParameters[1] = {
- packMessage: websocketUtils.packMessage,
- unpackMessage: websocketUtils.unpackMessage,
- createWebSocket: (url) => new WebSocket(url, {
- headers: {
- 'accept-language': 'zh-CN,zh;q=0.9',
- 'cache-control': 'no-cache',
- 'User-Agent': this.ua,
- pragma: 'no-cache',
- cookie: this.cookie,
- }
- })
- }
- const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig)
-
- wsp.open().then(() => {
- wsp.sendPacked({ protocol: 'json', version: 1 })
- wsp.sendPacked({ type: 6 })
- wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!))
- })
-
- return wsp
- }
-
- private async createImage(prompt: string, id: string) {
- try {
- const headers = {
- 'Accept-Encoding': 'gzip, deflate, br, zsdch',
- 'User-Agent': this.ua,
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- cookie: this.cookie,
- }
- const query = new URLSearchParams({
- prompt,
- id
- })
- const response = await fetch(this.endpoint + '/api/image?' + query.toString(),
- {
- method: 'POST',
- headers,
- mode: 'cors',
- credentials: 'include'
- })
- .then(res => res.text())
- if (response) {
- this.lastText += '\n' + response
- }
- } catch (err) {
- console.error('Create Image Error', err)
- }
- }
-
- private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) {
- const imageInfo: ImageInfo = {}
- let imageBase64: string | undefined = undefined
- const knowledgeRequest = {
- imageInfo,
- knowledgeRequest: {
- invokedSkills: [
- 'ImageById'
- ],
- subscriptionId: 'Bing.Chat.Multimodal',
- invokedSkillsRequestData: {
- enableFaceBlur: true
- },
- convoData: {
- convoid: this.conversationContext?.conversationId,
- convotone: conversationStyle,
- }
- },
- }
-
- if (imageUrl.startsWith('data:image/')) {
- imageBase64 = imageUrl.replace('data:image/', '');
- const partIndex = imageBase64.indexOf(',')
- if (partIndex) {
- imageBase64 = imageBase64.substring(partIndex + 1)
- }
- } else {
- imageInfo.url = imageUrl
- }
- return { knowledgeRequest, imageBase64 }
- }
-
- async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise {
- if (!imageUrl) {
- return
- }
- await this.createContext(conversationStyle)
- const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle)
-
- const response = await fetch(this.endpoint + '/api/kblob',
- {
- headers: {
- 'Content-Type': 'application/json',
- },
- method: 'POST',
- mode: 'cors',
- credentials: 'include',
- body: JSON.stringify(payload),
- })
- .then(res => res.json())
- .catch(e => {
- console.log('Error', e)
- })
- return response
- }
-
- private async generateContent(message: ChatResponseMessage) {
- if (message.contentType === 'IMAGE') {
- this.asyncTasks.push(this.createImage(message.text, message.messageId))
- }
- }
-
- private async parseEvents(params: Params, events: any) {
- const conversation = this.conversationContext!
-
- events?.forEach(async (event: ChatUpdateCompleteResponse) => {
- debug('bing event', event)
- if (event.type === 3) {
- await Promise.all(this.asyncTasks)
- this.asyncTasks = []
- params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } })
- params.onEvent({ type: 'DONE' })
- conversation.invocationId = parseInt(event.invocationId, 10) + 1
- } else if (event.type === 1) {
- const messages = event.arguments[0].messages
- if (messages) {
- const text = convertMessageToMarkdown(messages[0])
- this.lastText = text
- params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } })
- }
- } else if (event.type === 2) {
- const messages = event.item.messages as ChatResponseMessage[] | undefined
- if (!messages) {
- params.onEvent({
- type: 'ERROR',
- error: new ChatError(
- event.item.result.error || 'Unknown error',
- event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT
- : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA)
- : ErrorCode.UNKOWN_ERROR
- ),
- })
- return
- }
- const limited = messages.some((message) =>
- message.contentOrigin === 'TurnLimiter'
- || message.messageType === 'Disengaged'
- )
- if (limited) {
- params.onEvent({
- type: 'ERROR',
- error: new ChatError(
- 'Sorry, you have reached chat limit in this conversation.',
- ErrorCode.CONVERSATION_LIMIT,
- ),
- })
- return
- }
-
- const lastMessage = event.item.messages.at(-1) as ChatResponseMessage
- const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE')
- if (specialMessage) {
- this.generateContent(specialMessage)
- }
-
- if (lastMessage) {
- const text = convertMessageToMarkdown(lastMessage)
- this.lastText = text
- params.onEvent({
- type: 'UPDATE_ANSWER',
- data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions },
- })
- }
- }
- })
- }
-
- resetConversation() {
- this.conversationContext = undefined
- }
-}
diff --git a/spaces/jlevin/dpv-finetuned-gpt2-tiny/app.py b/spaces/jlevin/dpv-finetuned-gpt2-tiny/app.py
deleted file mode 100644
index f351181afac2529d081941d3b1c866ee6695d268..0000000000000000000000000000000000000000
--- a/spaces/jlevin/dpv-finetuned-gpt2-tiny/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/dpv/finetuned-gpt2-tiny").launch()
\ No newline at end of file
diff --git a/spaces/jone/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/voicebank-demand/sr=44100,chn=1.sh b/spaces/jone/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/voicebank-demand/sr=44100,chn=1.sh
deleted file mode 100644
index b6864ddc299ee2149a5f52e4ed0ad543c207fb33..0000000000000000000000000000000000000000
--- a/spaces/jone/Music_Source_Separation/scripts/1_pack_audios_to_hdf5s/voicebank-demand/sr=44100,chn=1.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-DATASET_DIR=${1:-"./datasets/voicebank-demand"} # The first argument is dataset directory.
-WORKSPACE=${2:-"./workspaces/bytesep"} # The second argument is workspace directory.
-
-echo "DATASET_DIR=${DATASET_DIR}"
-echo "WORKSPACE=${WORKSPACE}"
-
-# Users can change the following settings.
-SAMPLE_RATE=44100
-CHANNELS=1
-
-# Paths
-PARENT_HDF5S_DIR="${WORKSPACE}/hdf5s/voicebank-demand/sr=${SAMPLE_RATE}_chn=${CHANNELS}"
-
-# Pack train subset 100 pieces into hdf5 files.
-HDF5S_DIR="${PARENT_HDF5S_DIR}/train"
-
-python3 bytesep/dataset_creation/pack_audios_to_hdf5s/voicebank-demand.py \
- --dataset_dir=$DATASET_DIR \
- --split="train" \
- --hdf5s_dir=$HDF5S_DIR \
- --sample_rate=$SAMPLE_RATE \
- --channels=$CHANNELS
\ No newline at end of file
diff --git a/spaces/jorgeppp/LDCC-LDCC-Instruct-Llama-2-ko-13B-v1.4/app.py b/spaces/jorgeppp/LDCC-LDCC-Instruct-Llama-2-ko-13B-v1.4/app.py
deleted file mode 100644
index 83da5a167f9399b9fe384d6717c8bcf92e259997..0000000000000000000000000000000000000000
--- a/spaces/jorgeppp/LDCC-LDCC-Instruct-Llama-2-ko-13B-v1.4/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/LDCC/LDCC-Instruct-Llama-2-ko-13B-v1.4").launch()
\ No newline at end of file
diff --git a/spaces/jsxyhelu/skyseg/utils/utils.py b/spaces/jsxyhelu/skyseg/utils/utils.py
deleted file mode 100644
index cbc2922a6032fb7228914a238adcb9e2ed92e636..0000000000000000000000000000000000000000
--- a/spaces/jsxyhelu/skyseg/utils/utils.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import matplotlib.pyplot as plt
-
-
-def plot_img_and_mask(img, mask):
- classes = mask.shape[0] if len(mask.shape) > 2 else 1
- fig, ax = plt.subplots(1, classes + 1)
- ax[0].set_title('Input image')
- ax[0].imshow(img)
- if classes > 1:
- for i in range(classes):
- ax[i + 1].set_title(f'Output mask (class {i + 1})')
- ax[i + 1].imshow(mask[:, :, i])
- else:
- ax[1].set_title(f'Output mask')
- ax[1].imshow(mask)
- plt.xticks([]), plt.yticks([])
- plt.show()
diff --git a/spaces/juliensimon/bridgetower-demo/README.md b/spaces/juliensimon/bridgetower-demo/README.md
deleted file mode 100644
index 15386d6a381f179d758a7c976c17ae543490dfb5..0000000000000000000000000000000000000000
--- a/spaces/juliensimon/bridgetower-demo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Bridgetower Demo
-emoji: 🌍
-colorFrom: pink
-colorTo: pink
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/justest/embeddings-api/model.py b/spaces/justest/embeddings-api/model.py
deleted file mode 100644
index b616648d3e5a3b1e20bc86c178289a29d1a83f67..0000000000000000000000000000000000000000
--- a/spaces/justest/embeddings-api/model.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from sentence_transformers import SentenceTransformer
-model = SentenceTransformer('moka-ai/m3e-base')
-# model = SentenceTransformer('nghuyong/ernie-3.0-base-zh')
-
-def encode(text: str):
- text = text.replace("\n", " ")
- return model.encode([text], normalize_embeddings=True).tolist()[0]
diff --git a/spaces/justest/gpt4free/g4f/Provider/Providers/H2o.py b/spaces/justest/gpt4free/g4f/Provider/Providers/H2o.py
deleted file mode 100644
index cdba5145738a7fa6772d58edd0a78936b547df9c..0000000000000000000000000000000000000000
--- a/spaces/justest/gpt4free/g4f/Provider/Providers/H2o.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from requests import Session
-from uuid import uuid4
-from json import loads
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt-gm.h2o.ai'
-model = ['falcon-40b', 'falcon-7b', 'llama-13b']
-supports_stream = True
-needs_auth = False
-
-models = {
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- conversation = ''
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
-
- conversation += 'assistant: '
- session = requests.Session()
-
- response = session.get("https://gpt-gm.h2o.ai/")
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
- "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
- "Content-Type": "application/x-www-form-urlencoded",
- "Upgrade-Insecure-Requests": "1",
- "Sec-Fetch-Dest": "document",
- "Sec-Fetch-Mode": "navigate",
- "Sec-Fetch-Site": "same-origin",
- "Sec-Fetch-User": "?1",
- "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
- }
- data = {
- "ethicsModalAccepted": "true",
- "shareConversationsWithModelAuthors": "true",
- "ethicsModalAcceptedAt": "",
- "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
- "searchEnabled": "true"
- }
- response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
-
-
-
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
- "Accept": "*/*",
- "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
- "Content-Type": "application/json",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Referer": "https://gpt-gm.h2o.ai/"
- }
- data = {
- "model": models[model]
- }
-
- conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
- data = {
- "inputs": conversation,
- "parameters": {
- "temperature": kwargs.get('temperature', 0.4),
- "truncate": kwargs.get('truncate', 2048),
- "max_new_tokens": kwargs.get('max_new_tokens', 1024),
- "do_sample": kwargs.get('do_sample', True),
- "repetition_penalty": kwargs.get('repetition_penalty', 1.2),
- "return_full_text": kwargs.get('return_full_text', False)
- },
- "stream": True,
- "options": {
- "id": kwargs.get('id', str(uuid4())),
- "response_id": kwargs.get('response_id', str(uuid4())),
- "is_retry": False,
- "use_cache": False,
- "web_search_id": ""
- }
- }
-
- response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
- generated_text = response.text.replace("\n", "").split("data:")
- generated_text = json.loads(generated_text[-1])
-
- return generated_text["generated_text"]
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/ka1kuk/fastapi/g4f/Provider/Providers/Liaobots.py b/spaces/ka1kuk/fastapi/g4f/Provider/Providers/Liaobots.py
deleted file mode 100644
index 745993314e6c7f7204244d50e0c7c74ab90a7fdf..0000000000000000000000000000000000000000
--- a/spaces/ka1kuk/fastapi/g4f/Provider/Providers/Liaobots.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import os
-import uuid
-import requests
-from ...typing import sha256, Dict, get_type_hints
-import requests
-import time
-
-url = 'https://liaobots.com'
-model = ['gpt-3.5-turbo-16k', 'gpt-4']
-supports_stream = True
-needs_auth = False
-working = True
-
-models = {
- 'gpt-4': {
- "id": "gpt-4",
- "name": "GPT-4",
- "maxLength": 24000,
- "tokenLimit": 8000
- },
- 'gpt-3.5-turbo-16k': {
- "id": "gpt-3.5-turbo-16k",
- "name": "GPT-3.5-16k",
- "maxLength": 48000,
- "tokenLimit": 16000
- },
-}
-
-
-def get_response():
- try:
- url = "https://ka1kuk-fastapi.hf.space/authCode"
- headers = {"Content-Type": "application/json"}
- response = requests.get(url, headers=headers)
- if response.status_code == 500:
- print("Received status code 500. Retrying...")
- time.sleep(5)
- return get_response()
- return response.json()
- except requests.RequestException as e:
- print(f"Error occurred: {e}")
- return None
-
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- print(kwargs)
- authCode = get_response()
-
- headers = {
- 'authority': 'liaobots.com',
- 'content-type': 'application/json',
- 'origin': 'https://liaobots.com',
- 'referer': 'https://liaobots.com/',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- 'x-auth-code': authCode
- }
-
- json_data = {
- 'conversationId': str(uuid.uuid4()),
- 'model': models[model],
- 'messages': messages,
- 'key': '',
- 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- }
-
- response = requests.post('https://liaobots.com/api/chat',
- headers=headers, json=json_data, stream=True)
-
- for token in response.iter_content(chunk_size=2046):
- yield (token.decode('utf-8'))
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/kangvcar/RealChar/alembic/versions/27fe156a6d72_change_schema_to_unicode.py b/spaces/kangvcar/RealChar/alembic/versions/27fe156a6d72_change_schema_to_unicode.py
deleted file mode 100644
index b77d2b032c0f044074d76ed7b20883fad4ba2c4d..0000000000000000000000000000000000000000
--- a/spaces/kangvcar/RealChar/alembic/versions/27fe156a6d72_change_schema_to_unicode.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""Change message schema to unicode
-
-Revision ID: 27fe156a6d72
-Revises: 9ed6d1431c1d
-Create Date: 2023-07-18 22:32:03.388403
-
-"""
-from alembic import op
-import sqlalchemy as sa
-
-
-# revision identifiers, used by Alembic.
-revision = '27fe156a6d72'
-down_revision = '9ed6d1431c1d'
-branch_labels = None
-depends_on = None
-
-
-def upgrade() -> None:
- op.add_column('interactions', sa.Column(
- 'client_message_unicode', sa.Unicode(65535)))
- op.add_column('interactions', sa.Column(
- 'server_message_unicode', sa.Unicode(65535)))
-
-
-def downgrade() -> None:
- op.drop_column('interactions', 'client_message_unicode')
- op.drop_column('interactions', 'server_message_unicode')
diff --git a/spaces/kavi1025/Youtube-Whisperer/README.md b/spaces/kavi1025/Youtube-Whisperer/README.md
deleted file mode 100644
index f30d4256155c480f0599698379f798a3365e5bc1..0000000000000000000000000000000000000000
--- a/spaces/kavi1025/Youtube-Whisperer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Youtube Whisperer
-emoji: ⚡
-colorFrom: purple
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-duplicated_from: jeffistyping/Youtube-Whisperer
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kdrkdrkdr/HutaoTTS/text/korean.py b/spaces/kdrkdrkdr/HutaoTTS/text/korean.py
deleted file mode 100644
index edee07429a450c55e3d8e246997faaa1e0b89cc9..0000000000000000000000000000000000000000
--- a/spaces/kdrkdrkdr/HutaoTTS/text/korean.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import re
-from jamo import h2j, j2hcj
-import ko_pron
-
-
-# This is a list of Korean classifiers preceded by pure Korean numerals.
-_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
-
-# List of (hangul, hangul divided) pairs:
-_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄳ', 'ㄱㅅ'),
- ('ㄵ', 'ㄴㅈ'),
- ('ㄶ', 'ㄴㅎ'),
- ('ㄺ', 'ㄹㄱ'),
- ('ㄻ', 'ㄹㅁ'),
- ('ㄼ', 'ㄹㅂ'),
- ('ㄽ', 'ㄹㅅ'),
- ('ㄾ', 'ㄹㅌ'),
- ('ㄿ', 'ㄹㅍ'),
- ('ㅀ', 'ㄹㅎ'),
- ('ㅄ', 'ㅂㅅ'),
- ('ㅘ', 'ㅗㅏ'),
- ('ㅙ', 'ㅗㅐ'),
- ('ㅚ', 'ㅗㅣ'),
- ('ㅝ', 'ㅜㅓ'),
- ('ㅞ', 'ㅜㅔ'),
- ('ㅟ', 'ㅜㅣ'),
- ('ㅢ', 'ㅡㅣ'),
- ('ㅑ', 'ㅣㅏ'),
- ('ㅒ', 'ㅣㅐ'),
- ('ㅕ', 'ㅣㅓ'),
- ('ㅖ', 'ㅣㅔ'),
- ('ㅛ', 'ㅣㅗ'),
- ('ㅠ', 'ㅣㅜ')
-]]
-
-# List of (Latin alphabet, hangul) pairs:
-_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', '에이'),
- ('b', '비'),
- ('c', '시'),
- ('d', '디'),
- ('e', '이'),
- ('f', '에프'),
- ('g', '지'),
- ('h', '에이치'),
- ('i', '아이'),
- ('j', '제이'),
- ('k', '케이'),
- ('l', '엘'),
- ('m', '엠'),
- ('n', '엔'),
- ('o', '오'),
- ('p', '피'),
- ('q', '큐'),
- ('r', '아르'),
- ('s', '에스'),
- ('t', '티'),
- ('u', '유'),
- ('v', '브이'),
- ('w', '더블유'),
- ('x', '엑스'),
- ('y', '와이'),
- ('z', '제트')
-]]
-
-# List of (ipa, lazy ipa) pairs:
-_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('t͡ɕ','ʧ'),
- ('d͡ʑ','ʥ'),
- ('ɲ','n^'),
- ('ɕ','ʃ'),
- ('ʷ','w'),
- ('ɭ','l`'),
- ('ʎ','ɾ'),
- ('ɣ','ŋ'),
- ('ɰ','ɯ'),
- ('ʝ','j'),
- ('ʌ','ə'),
- ('ɡ','g'),
- ('\u031a','#'),
- ('\u0348','='),
- ('\u031e',''),
- ('\u0320',''),
- ('\u0339','')
-]]
-
-
-def latin_to_hangul(text):
- for regex, replacement in _latin_to_hangul:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def divide_hangul(text):
- text = j2hcj(h2j(text))
- for regex, replacement in _hangul_divided:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def hangul_number(num, sino=True):
- '''Reference https://github.com/Kyubyong/g2pK'''
- num = re.sub(',', '', num)
-
- if num == '0':
- return '영'
- if not sino and num == '20':
- return '스무'
-
- digits = '123456789'
- names = '일이삼사오육칠팔구'
- digit2name = {d: n for d, n in zip(digits, names)}
-
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
-
- spelledout = []
- for i, digit in enumerate(num):
- i = len(num) - i - 1
- if sino:
- if i == 0:
- name = digit2name.get(digit, '')
- elif i == 1:
- name = digit2name.get(digit, '') + '십'
- name = name.replace('일십', '십')
- else:
- if i == 0:
- name = digit2mod.get(digit, '')
- elif i == 1:
- name = digit2dec.get(digit, '')
- if digit == '0':
- if i % 4 == 0:
- last_three = spelledout[-min(3, len(spelledout)):]
- if ''.join(last_three) == '':
- spelledout.append('')
- continue
- else:
- spelledout.append('')
- continue
- if i == 2:
- name = digit2name.get(digit, '') + '백'
- name = name.replace('일백', '백')
- elif i == 3:
- name = digit2name.get(digit, '') + '천'
- name = name.replace('일천', '천')
- elif i == 4:
- name = digit2name.get(digit, '') + '만'
- name = name.replace('일만', '만')
- elif i == 5:
- name = digit2name.get(digit, '') + '십'
- name = name.replace('일십', '십')
- elif i == 6:
- name = digit2name.get(digit, '') + '백'
- name = name.replace('일백', '백')
- elif i == 7:
- name = digit2name.get(digit, '') + '천'
- name = name.replace('일천', '천')
- elif i == 8:
- name = digit2name.get(digit, '') + '억'
- elif i == 9:
- name = digit2name.get(digit, '') + '십'
- elif i == 10:
- name = digit2name.get(digit, '') + '백'
- elif i == 11:
- name = digit2name.get(digit, '') + '천'
- elif i == 12:
- name = digit2name.get(digit, '') + '조'
- elif i == 13:
- name = digit2name.get(digit, '') + '십'
- elif i == 14:
- name = digit2name.get(digit, '') + '백'
- elif i == 15:
- name = digit2name.get(digit, '') + '천'
- spelledout.append(name)
- return ''.join(elem for elem in spelledout)
-
-
-def number_to_hangul(text):
- '''Reference https://github.com/Kyubyong/g2pK'''
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
- for token in tokens:
- num, classifier = token
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
- spelledout = hangul_number(num, sino=False)
- else:
- spelledout = hangul_number(num, sino=True)
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
- # digit by digit for remaining digits
- digits = '0123456789'
- names = '영일이삼사오육칠팔구'
- for d, n in zip(digits, names):
- text = text.replace(d, n)
- return text
-
-
-def korean_to_lazy_ipa(text):
- text = latin_to_hangul(text)
- text = number_to_hangul(text)
- text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)
- for regex, replacement in _ipa_to_lazy_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def korean_to_ipa(text):
- text = korean_to_lazy_ipa(text)
- return text.replace('ʧ','tʃ').replace('ʥ','dʑ')
diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/options/test_options.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/options/test_options.py
deleted file mode 100644
index 4ff3ad142779850d1d5a1640bc00f70d34d4a862..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/options/test_options.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""This script contains the test options for Deep3DFaceRecon_pytorch
-"""
-
-from .base_options import BaseOptions
-
-
-class TestOptions(BaseOptions):
- """This class includes test options.
-
- It also includes shared options defined in BaseOptions.
- """
-
- def initialize(self, parser):
- parser = BaseOptions.initialize(self, parser) # define shared options
- parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
- parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]')
- parser.add_argument('--img_folder', type=str, default='examples', help='folder for test images.')
-
- # Dropout and Batchnorm has different behavior during training and test.
- self.isTrain = False
- return parser
diff --git a/spaces/kevinwang676/M4Singer/utils/multiprocess_utils.py b/spaces/kevinwang676/M4Singer/utils/multiprocess_utils.py
deleted file mode 100644
index 24876c4ca777f09d1c1e1b75674cd7aaf37a75a6..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/M4Singer/utils/multiprocess_utils.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import traceback
-from multiprocessing import Queue, Process
-
-
-def chunked_worker(worker_id, map_func, args, results_queue=None, init_ctx_func=None):
- ctx = init_ctx_func(worker_id) if init_ctx_func is not None else None
- for job_idx, arg in args:
- try:
- if ctx is not None:
- res = map_func(*arg, ctx=ctx)
- else:
- res = map_func(*arg)
- results_queue.put((job_idx, res))
- except:
- traceback.print_exc()
- results_queue.put((job_idx, None))
-
-def chunked_multiprocess_run(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, q_max_size=1000):
- args = zip(range(len(args)), args)
- args = list(args)
- n_jobs = len(args)
- if num_workers is None:
- num_workers = int(os.getenv('N_PROC', os.cpu_count()))
- results_queues = []
- if ordered:
- for i in range(num_workers):
- results_queues.append(Queue(maxsize=q_max_size // num_workers))
- else:
- results_queue = Queue(maxsize=q_max_size)
- for i in range(num_workers):
- results_queues.append(results_queue)
- workers = []
- for i in range(num_workers):
- args_worker = args[i::num_workers]
- p = Process(target=chunked_worker, args=(
- i, map_func, args_worker, results_queues[i], init_ctx_func), daemon=True)
- workers.append(p)
- p.start()
- for n_finished in range(n_jobs):
- results_queue = results_queues[n_finished % num_workers]
- job_idx, res = results_queue.get()
- assert job_idx == n_finished or not ordered, (job_idx, n_finished)
- yield res
- for w in workers:
- w.join()
- w.close()
diff --git a/spaces/kevinwang676/SadTalker/src/face3d/util/html.py b/spaces/kevinwang676/SadTalker/src/face3d/util/html.py
deleted file mode 100644
index cc3262a1eafda34842e4dbad47bb6ba72f0c5a68..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/SadTalker/src/face3d/util/html.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import dominate
-from dominate.tags import meta, h3, table, tr, td, p, a, img, br
-import os
-
-
-class HTML:
- """This HTML class allows us to save images and write texts into a single HTML file.
-
- It consists of functions such as (add a text header to the HTML file),
- (add a row of images to the HTML file), and (save the HTML to the disk).
- It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
- """
-
- def __init__(self, web_dir, title, refresh=0):
- """Initialize the HTML classes
-
- Parameters:
- web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0:
- with self.doc.head:
- meta(http_equiv="refresh", content=str(refresh))
-
- def get_image_dir(self):
- """Return the directory that stores images"""
- return self.img_dir
-
- def add_header(self, text):
- """Insert a header to the HTML file
-
- Parameters:
- text (str) -- the header text
- """
- with self.doc:
- h3(text)
-
- def add_images(self, ims, txts, links, width=400):
- """add images to the HTML file
-
- Parameters:
- ims (str list) -- a list of image paths
- txts (str list) -- a list of image names shown on the website
- links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
- """
- self.t = table(border=1, style="table-layout: fixed;") # Insert a table
- self.doc.add(self.t)
- with self.t:
- with tr():
- for im, txt, link in zip(ims, txts, links):
- with td(style="word-wrap: break-word;", halign="center", valign="top"):
- with p():
- with a(href=os.path.join('images', link)):
- img(style="width:%dpx" % width, src=os.path.join('images', im))
- br()
- p(txt)
-
- def save(self):
- """save the current content to the HMTL file"""
- html_file = '%s/index.html' % self.web_dir
- f = open(html_file, 'wt')
- f.write(self.doc.render())
- f.close()
-
-
-if __name__ == '__main__': # we show an example usage here.
- html = HTML('web/', 'test_html')
- html.add_header('hello world')
-
- ims, txts, links = [], [], []
- for n in range(4):
- ims.append('image_%d.png' % n)
- txts.append('text_%d' % n)
- links.append('image_%d.png' % n)
- html.add_images(ims, txts, links)
- html.save()
diff --git a/spaces/kokofixcomputers/chat-ui/src/app.html b/spaces/kokofixcomputers/chat-ui/src/app.html
deleted file mode 100644
index ca0e1100f4e9d6f764446050752999129c0644da..0000000000000000000000000000000000000000
--- a/spaces/kokofixcomputers/chat-ui/src/app.html
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
-
-
-
-
-
- HuggingChat
-
- %sveltekit.head%
-
-
- %sveltekit.body%
-
-
-
-
-
-
-
-
diff --git a/spaces/kvignesh17/YoutubeVideoSummarization/README.md b/spaces/kvignesh17/YoutubeVideoSummarization/README.md
deleted file mode 100644
index b73a35005f2edb7c8756a4c100eca9891f777474..0000000000000000000000000000000000000000
--- a/spaces/kvignesh17/YoutubeVideoSummarization/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: YoutubeVideoSummarization
-emoji: 🌖
-colorFrom: gray
-colorTo: red
-sdk: gradio
-sdk_version: 3.1.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/leilaglewis/04-Gradio-SOTA/README.md b/spaces/leilaglewis/04-Gradio-SOTA/README.md
deleted file mode 100644
index a1778dcf8fd4bc1daed5e68cd5f0256695b4a736..0000000000000000000000000000000000000000
--- a/spaces/leilaglewis/04-Gradio-SOTA/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 04 Gradio SOTA
-emoji: 👀
-colorFrom: indigo
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/lewiswu1209/MockingBird/encoder/data_objects/speaker_verification_dataset.py b/spaces/lewiswu1209/MockingBird/encoder/data_objects/speaker_verification_dataset.py
deleted file mode 100644
index 77a6e05eae6a939ae7575ae70b7173644141fffe..0000000000000000000000000000000000000000
--- a/spaces/lewiswu1209/MockingBird/encoder/data_objects/speaker_verification_dataset.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from encoder.data_objects.random_cycler import RandomCycler
-from encoder.data_objects.speaker_batch import SpeakerBatch
-from encoder.data_objects.speaker import Speaker
-from encoder.params_data import partials_n_frames
-from torch.utils.data import Dataset, DataLoader
-from pathlib import Path
-
-# TODO: improve with a pool of speakers for data efficiency
-
-class SpeakerVerificationDataset(Dataset):
- def __init__(self, datasets_root: Path):
- self.root = datasets_root
- speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()]
- if len(speaker_dirs) == 0:
- raise Exception("No speakers found. Make sure you are pointing to the directory "
- "containing all preprocessed speaker directories.")
- self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs]
- self.speaker_cycler = RandomCycler(self.speakers)
-
- def __len__(self):
- return int(1e10)
-
- def __getitem__(self, index):
- return next(self.speaker_cycler)
-
- def get_logs(self):
- log_string = ""
- for log_fpath in self.root.glob("*.txt"):
- with log_fpath.open("r") as log_file:
- log_string += "".join(log_file.readlines())
- return log_string
-
-
-class SpeakerVerificationDataLoader(DataLoader):
- def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None,
- batch_sampler=None, num_workers=0, pin_memory=False, timeout=0,
- worker_init_fn=None):
- self.utterances_per_speaker = utterances_per_speaker
-
- super().__init__(
- dataset=dataset,
- batch_size=speakers_per_batch,
- shuffle=False,
- sampler=sampler,
- batch_sampler=batch_sampler,
- num_workers=num_workers,
- collate_fn=self.collate,
- pin_memory=pin_memory,
- drop_last=False,
- timeout=timeout,
- worker_init_fn=worker_init_fn
- )
-
- def collate(self, speakers):
- return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames)
-
\ No newline at end of file
diff --git a/spaces/liliyRehtina/color/models/loss.py b/spaces/liliyRehtina/color/models/loss.py
deleted file mode 100644
index a620766975bd0d717ecdf82be9c3a98745a6d3e0..0000000000000000000000000000000000000000
--- a/spaces/liliyRehtina/color/models/loss.py
+++ /dev/null
@@ -1,222 +0,0 @@
-from __future__ import division
-import os, glob, shutil, math, random, json
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision
-import basic
-from utils import util
-
-eps = 0.0000001
-
-class SPixelLoss:
- def __init__(self, psize=8, mpdist=False, gpu_no=0):
- self.mpdist = mpdist
- self.gpu_no = gpu_no
- self.sp_size = psize
-
- def __call__(self, data, epoch_no):
- kernel_size = self.sp_size
- #pos_weight = 0.003
- prob = data['pred_prob']
- labxy_feat = data['target_feat']
- N,C,H,W = labxy_feat.shape
- pooled_labxy = basic.poolfeat(labxy_feat, prob, kernel_size, kernel_size)
- reconstr_feat = basic.upfeat(pooled_labxy, prob, kernel_size, kernel_size)
- loss_map = reconstr_feat[:,:,:,:] - labxy_feat[:,:,:,:]
- featLoss_idx = torch.norm(loss_map[:,:-2,:,:], p=2, dim=1).mean()
- posLoss_idx = torch.norm(loss_map[:,-2:,:,:], p=2, dim=1).mean() / kernel_size
- totalLoss_idx = 10*featLoss_idx + 0.003*posLoss_idx
- return {'totalLoss':totalLoss_idx, 'featLoss':featLoss_idx, 'posLoss':posLoss_idx}
-
-
-class AnchorColorProbLoss:
- def __init__(self, hint2regress=False, enhanced=False, with_grad=False, mpdist=False, gpu_no=0):
- self.mpdist = mpdist
- self.gpu_no = gpu_no
- self.hint2regress = hint2regress
- self.enhanced = enhanced
- self.with_grad = with_grad
- self.rebalance_gradient = basic.RebalanceLoss.apply
- self.entropy_loss = nn.CrossEntropyLoss(ignore_index=-1)
- if self.enhanced:
- self.VGGLoss = VGG19Loss(gpu_no=gpu_no, is_ddp=mpdist)
-
- def _perceptual_loss(self, input_grays, input_colors, pred_colors):
- input_RGBs = basic.lab2rgb(torch.cat([input_grays,input_colors], dim=1))
- pred_RGBs = basic.lab2rgb(torch.cat([input_grays,pred_colors], dim=1))
- ## the output of "lab2rgb" just matches the input of "VGGLoss": [0,1]
- return self.VGGLoss(input_RGBs, pred_RGBs)
-
- def _laplace_gradient(self, pred_AB, target_AB):
- N,C,H,W = pred_AB.shape
- kernel = torch.tensor([[1, 1, 1], [1, -8, 1], [1, 1, 1]], device=pred_AB.get_device()).float()
- kernel = kernel.view(1, 1, *kernel.size()).repeat(C,1,1,1)
- grad_pred = F.conv2d(pred_AB, kernel, groups=C)
- grad_trg = F.conv2d(target_AB, kernel, groups=C)
- return l1_loss(grad_trg, grad_pred)
-
- def __call__(self, data, epoch_no):
- N,C,H,W = data['target_label'].shape
- pal_probs = self.rebalance_gradient(data['pal_prob'], data['class_weight'])
- #ref_probs = data['ref_prob']
- pal_probs = pal_probs.permute(0,2,3,1).contiguous().view(N*H*W, -1)
- gt_labels = data['target_label'].permute(0,2,3,1).contiguous().view(N*H*W, -1)
- '''
- igored_mask = data['empty_entries'].permute(0,2,3,1).contiguous().view(N*H*W, -1)
- gt_labels[igored_mask] = -1
- gt_labels = gt_probs.squeeze()
- '''
- palLoss_idx = self.entropy_loss(pal_probs, gt_labels.squeeze(dim=1))
- if self.hint2regress:
- ref_probs = data['ref_prob']
- refLoss_idx = 50 * l2_loss(data['spix_color'], ref_probs)
- else:
- ref_probs = self.rebalance_gradient(data['ref_prob'], data['class_weight'])
- ref_probs = ref_probs.permute(0,2,3,1).contiguous().view(N*H*W, -1)
- refLoss_idx = self.entropy_loss(ref_probs, gt_labels.squeeze(dim=1))
- reconLoss_idx = torch.zeros_like(palLoss_idx)
- if self.enhanced:
- scalar = 1.0 if self.hint2regress else 5.0
- reconLoss_idx = scalar * self._perceptual_loss(data['input_gray'], data['pred_color'], data['input_color'])
- if self.with_grad:
- gradient_loss = self._laplace_gradient(data['pred_color'], data['input_color'])
- reconLoss_idx += gradient_loss
- totalLoss_idx = palLoss_idx + refLoss_idx + reconLoss_idx
- #print("loss terms:", palLoss_idx.item(), refLoss_idx.item(), reconLoss_idx.item())
- return {'totalLoss':totalLoss_idx, 'palLoss':palLoss_idx, 'refLoss':refLoss_idx, 'recLoss':reconLoss_idx}
-
-
-def compute_affinity_pos_loss(prob_in, labxy_feat, pos_weight=0.003, kernel_size=16):
- S = kernel_size
- m = pos_weight
- prob = prob_in.clone()
- N,C,H,W = labxy_feat.shape
- pooled_labxy = basic.poolfeat(labxy_feat, prob, kernel_size, kernel_size)
- reconstr_feat = basic.upfeat(pooled_labxy, prob, kernel_size, kernel_size)
- loss_map = reconstr_feat[:,:,:,:] - labxy_feat[:,:,:,:]
- loss_feat = torch.norm(loss_map[:,:-2,:,:], p=2, dim=1).mean()
- loss_pos = torch.norm(loss_map[:,-2:,:,:], p=2, dim=1).mean() * m / S
- loss_affinity = loss_feat + loss_pos
- return loss_affinity
-
-
-def l2_loss(y_input, y_target, weight_map=None):
- if weight_map is None:
- return F.mse_loss(y_input, y_target)
- else:
- diff_map = torch.mean(torch.abs(y_input-y_target), dim=1, keepdim=True)
- batch_dev = torch.sum(diff_map*diff_map*weight_map, dim=(1,2,3)) / (eps+torch.sum(weight_map, dim=(1,2,3)))
- return batch_dev.mean()
-
-
-def l1_loss(y_input, y_target, weight_map=None):
- if weight_map is None:
- return F.l1_loss(y_input, y_target)
- else:
- diff_map = torch.mean(torch.abs(y_input-y_target), dim=1, keepdim=True)
- batch_dev = torch.sum(diff_map*weight_map, dim=(1,2,3)) / (eps+torch.sum(weight_map, dim=(1,2,3)))
- return batch_dev.mean()
-
-
-def masked_l1_loss(y_input, y_target, outlier_mask):
- one = torch.tensor([1.0]).cuda(y_input.get_device())
- weight_map = torch.where(outlier_mask, one * 0.0, one * 1.0)
- return l1_loss(y_input, y_target, weight_map)
-
-
-def huber_loss(y_input, y_target, delta=0.01):
- mask = torch.zeros_like(y_input)
- mann = torch.abs(y_input - y_target)
- eucl = 0.5 * (mann**2)
- mask[...] = mann < delta
- loss = eucl * mask / delta + (mann - 0.5 * delta) * (1 - mask)
- return torch.mean(loss)
-
-
-## Perceptual loss that uses a pretrained VGG network
-class VGG19Loss(nn.Module):
- def __init__(self, feat_type='liu', gpu_no=0, is_ddp=False, requires_grad=False):
- super(VGG19Loss, self).__init__()
- os.environ['TORCH_HOME'] = '/apdcephfs/share_1290939/richardxia/Saved/Checkpoints/VGG19'
- ## data requirement: (N,C,H,W) in RGB format, [0,1] range, and resolution >= 224x224
- self.mean = [0.485, 0.456, 0.406]
- self.std = [0.229, 0.224, 0.225]
- self.feat_type = feat_type
-
- vgg_model = torchvision.models.vgg19(pretrained=True)
- ## AssertionError: DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient
- '''
- if is_ddp:
- vgg_model = vgg_model.cuda(gpu_no)
- vgg_model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(vgg_model)
- vgg_model = torch.nn.parallel.DistributedDataParallel(vgg_model, device_ids=[gpu_no], find_unused_parameters=True)
- else:
- vgg_model = vgg_model.cuda(gpu_no)
- '''
- vgg_model = vgg_model.cuda(gpu_no)
- if self.feat_type == 'liu':
- ## conv1_1, conv2_1, conv3_1, conv4_1, conv5_1
- self.slice1 = nn.Sequential(*list(vgg_model.features)[:2]).eval()
- self.slice2 = nn.Sequential(*list(vgg_model.features)[2:7]).eval()
- self.slice3 = nn.Sequential(*list(vgg_model.features)[7:12]).eval()
- self.slice4 = nn.Sequential(*list(vgg_model.features)[12:21]).eval()
- self.slice5 = nn.Sequential(*list(vgg_model.features)[21:30]).eval()
- self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
- elif self.feat_type == 'lei':
- ## conv1_2, conv2_2, conv3_2, conv4_2, conv5_2
- self.slice1 = nn.Sequential(*list(vgg_model.features)[:4]).eval()
- self.slice2 = nn.Sequential(*list(vgg_model.features)[4:9]).eval()
- self.slice3 = nn.Sequential(*list(vgg_model.features)[9:14]).eval()
- self.slice4 = nn.Sequential(*list(vgg_model.features)[14:23]).eval()
- self.slice5 = nn.Sequential(*list(vgg_model.features)[23:32]).eval()
- self.weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10.0/1.5]
- else:
- ## maxpool after conv4_4
- self.featureExactor = nn.Sequential(*list(vgg_model.features)[:28]).eval()
- '''
- for x in range(2):
- self.slice1.add_module(str(x), pretrained_features[x])
- for x in range(2, 7):
- self.slice2.add_module(str(x), pretrained_features[x])
- for x in range(7, 12):
- self.slice3.add_module(str(x), pretrained_features[x])
- for x in range(12, 21):
- self.slice4.add_module(str(x), pretrained_features[x])
- for x in range(21, 30):
- self.slice5.add_module(str(x), pretrained_features[x])
- '''
- self.criterion = nn.L1Loss()
-
- ## fixed parameters
- if not requires_grad:
- for param in self.parameters():
- param.requires_grad = False
- self.eval()
- print('[*] VGG19Loss init!')
-
- def normalize(self, tensor):
- tensor = tensor.clone()
- mean = torch.as_tensor(self.mean, dtype=torch.float32, device=tensor.device)
- std = torch.as_tensor(self.std, dtype=torch.float32, device=tensor.device)
- tensor.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
- return tensor
-
- def forward(self, x, y):
- norm_x, norm_y = self.normalize(x), self.normalize(y)
- ## feature extract
- if self.feat_type == 'liu' or self.feat_type == 'lei':
- x_relu1, y_relu1 = self.slice1(norm_x), self.slice1(norm_y)
- x_relu2, y_relu2 = self.slice2(x_relu1), self.slice2(y_relu1)
- x_relu3, y_relu3 = self.slice3(x_relu2), self.slice3(y_relu2)
- x_relu4, y_relu4 = self.slice4(x_relu3), self.slice4(y_relu3)
- x_relu5, y_relu5 = self.slice5(x_relu4), self.slice5(y_relu4)
- x_vgg = [x_relu1, x_relu2, x_relu3, x_relu4, x_relu5]
- y_vgg = [y_relu1, y_relu2, y_relu3, y_relu4, y_relu5]
- loss = 0
- for i in range(len(x_vgg)):
- loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
- else:
- x_vgg, y_vgg = self.featureExactor(norm_x), self.featureExactor(norm_y)
- loss = self.criterion(x_vgg, y_vgg.detach())
- return loss
\ No newline at end of file
diff --git a/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/index.html b/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/index.html
deleted file mode 100644
index 4efbcf049517f42eea87f352b81a42c11d04a6cd..0000000000000000000000000000000000000000
--- a/spaces/limcheekin/Mistral-7B-Instruct-v0.1-GGUF/index.html
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-
- Mistral-7B-Instruct-v0.1-GGUF (Q4_K_M)
-
-
- Mistral-7B-Instruct-v0.1-GGUF (Q4_K_M)
-
- With the utilization of the
- llama-cpp-python
- package, we are excited to introduce the GGUF model hosted in the Hugging
- Face Docker Spaces, made accessible through an OpenAI-compatible API. This
- space includes comprehensive API documentation to facilitate seamless
- integration.
-
-
- -
- The API endpoint:
- https://limcheekin-mistral-7b-instruct-v0-1-gguf.hf.space/v1
-
- -
- The API doc:
- https://limcheekin-mistral-7b-instruct-v0-1-gguf.hf.space/docs
-
-
-
- Go ahead and try it out the API endpoint yourself with the
-
- mistral-7b-instruct.ipynb
- jupyter notebook.
-
-
- If you find this resource valuable, your support in the form of starring
- the space would be greatly appreciated. Your engagement plays a vital role
- in furthering the application for a community GPU grant, ultimately
- enhancing the capabilities and accessibility of this space.
-
-
-
diff --git a/spaces/limcheekin/orca_mini_v3_13B-GGML/index.html b/spaces/limcheekin/orca_mini_v3_13B-GGML/index.html
deleted file mode 100644
index 38dcdae855c41a9e8902fc6ce4e98b946a29c68a..0000000000000000000000000000000000000000
--- a/spaces/limcheekin/orca_mini_v3_13B-GGML/index.html
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-
- orca_mini_v3_13B-GGML (q5_K_S)
-
-
- orca_mini_v3_13B-GGML (q5_K_S)
-
- With the utilization of the
- llama-cpp-python
- package, we are excited to introduce the GGML model hosted in the Hugging
- Face Docker Spaces, made accessible through an OpenAI-compatible API. This
- space includes comprehensive API documentation to facilitate seamless
- integration.
-
-
- -
- The API endpoint:
- https://limcheekin-orca-mini-v3-13b-ggml.hf.space/v1
-
- -
- The API doc:
- https://limcheekin-orca-mini-v3-13b-ggml.hf.space/docs
-
-
-
- If you find this resource valuable, your support in the form of starring
- the space would be greatly appreciated. Your engagement plays a vital role
- in furthering the application for a community GPU grant, ultimately
- enhancing the capabilities and accessibility of this space.
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Goliyon Ki Raasleela Ram-leela 2 Telugu Full Movie Free Download Hd.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Goliyon Ki Raasleela Ram-leela 2 Telugu Full Movie Free Download Hd.md
deleted file mode 100644
index 77f944f6a95a0790e478cfef1dbeb2db60f21fbc..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Goliyon Ki Raasleela Ram-leela 2 Telugu Full Movie Free Download Hd.md
+++ /dev/null
@@ -1,8 +0,0 @@
-Goliyon Ki Raasleela Ram-leela 2 telugu full movie free download hd
Download File ⭐ https://bytlly.com/2uGwpA
-
-Ram Leela is the tragic love story of Ram (Ranveer Singh) and Leela (Deepika Padukone). history is a small life, but despite the fact that there are many sad moments in the film, it is watched in one breath.
-The love story of two people who had terrible trials, but who managed to overcome them.
-The plot of the film is very interesting and dynamic. It has everything: love, friendship, humor, and drama. I really liked how the film was made. Everything is very bright and colorful. The music was also very good. 8a78ff9644
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Mapmedia Unlock Code.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Mapmedia Unlock Code.md
deleted file mode 100644
index 2e02f903691172e42f33788f35909de4e23df3f7..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Mapmedia Unlock Code.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-If you're not familiar with it, a Yeoman generator is a CLI tool that allows you to generate boilerplate code for JavaScript, HTML, CSS, JQuery, and many more. The angular generator is specific to AngularJS, but it's super easy to extend to use an existing code generator for any other technology.
-Mapmedia Unlock Code
Download Zip ➡ https://bytlly.com/2uGvzn
-namespace class Bank
Mapmedia Unlock Code How do we use it? Simple, we need to tell the Angular. CodeFirst package where to look for the content model. In our Angular project, we can simply add an extra element to our module config:
-angular.module('my.module', ['ngRoute', ['my.module.something']]);
Mapmedia Unlock Code We can then use the our generated model, without having to define a mapping manually from an OData version.
-The DeltaV "Power of 3D" became widely available in late 2016. Now for the Mapmedia unlock code: "3D, with depth". Furuno is pleased to announce the addition of GSD depth color scales to our premier marine sonar. GSD offers the deepest range of unrivaled versatility in any marine sonar system available today. The Depth Color Scale from Furuno GSD can be added to any of our marine sonar systems, including Bowdens Advanced Sonar (BAS), GSD, PowerScan and the Furuno SuperScan. If you want the depth display to be seen, Furuno has eliminated the need to sacrifice resolution by increasing the linear pixel density of our sonar images by up to 50% while maintaining a crisp graphic image quality.
-
-The syntax and semantics of .ahk
files are completely non-mandatory, but there's a convention: codes are prefixed with map
, then they'll be processed by mapping command.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Men In Black 3 Tamil Dubbed Movies.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Men In Black 3 Tamil Dubbed Movies.md
deleted file mode 100644
index da61de145d22448974cf258d82f7d49210371ea5..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Men In Black 3 Tamil Dubbed Movies.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-Entertainment Appreciation was a chapter in the annual program guide of the magazine Motion Picture Herald, the primary trade publication for the United States motion picture industry, until 2006. Movies from the genre-themed magazine were released in the 1960s, 1990s and 2000s. Sometimes dubbed, these films are called 'Emberverse' movies or'millennium blockbusters', and are often some of the biggest American releases of the time.
One variant of the list comprised a star ranking where, among other things, the stars have contributed towards casting light on the movies. Every week there are two lists. One version of the other is among ten categories on the second day, each with ten cinemas. It is called a "cinema meter".
-Men In Black 3 tamil dubbed movies
Download File === https://bytlly.com/2uGybq
-DesixXxtube2.com is a totally free collection of top-notch xxx hollywood tamil dubbed movies in all the genres you could want. Browse for movies in categories like Sexy, Porn, and much more. You may also select a movie and play with the search bar for more results.
-On DesixXxtube2.com you will also find HD Xxx hollywood tamil dubbed movies in the best quality you can have. Delivering all clips in HD or SD, you can enjoy maximum satisfaction and enjoyment from watching your favorite Xxx hollywood tamil dubbed movies here at DesixXxtube2.com.
-True Hollywood is normally loaded with the hottest movies, and by extension the sexiest ones. It's a breeding ground for huge names in the industry and its like nothing else is expected. There are stars and loads of stars in the movie industry, and all agree that there is a dearth of good material out there. With excellent production values, fantastic scripts and onscreen chemistry, this film not only succeeds in telling a sizzling hot story, but it also leaves you with an absolute rumbling in the belly. You can get the perfect Xxx hollywood tamil dubbed movies to come home and view it on your favorite device on DesixXxtube2.com.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/lindeberg/whisper-webui/src/vadParallel.py b/spaces/lindeberg/whisper-webui/src/vadParallel.py
deleted file mode 100644
index 06d0884e8dcbf189d64dcfd4ceb36215ece5ab47..0000000000000000000000000000000000000000
--- a/spaces/lindeberg/whisper-webui/src/vadParallel.py
+++ /dev/null
@@ -1,255 +0,0 @@
-import multiprocessing
-import threading
-import time
-from src.vad import AbstractTranscription, TranscriptionConfig, get_audio_duration
-from src.whisperContainer import WhisperCallback
-
-from multiprocessing import Pool
-
-from typing import Any, Dict, List
-import os
-
-
-class ParallelContext:
- def __init__(self, num_processes: int = None, auto_cleanup_timeout_seconds: float = None):
- self.num_processes = num_processes
- self.auto_cleanup_timeout_seconds = auto_cleanup_timeout_seconds
- self.lock = threading.Lock()
-
- self.ref_count = 0
- self.pool = None
- self.cleanup_timer = None
-
- def get_pool(self):
- # Initialize pool lazily
- if (self.pool is None):
- context = multiprocessing.get_context('spawn')
- self.pool = context.Pool(self.num_processes)
-
- self.ref_count = self.ref_count + 1
-
- if (self.auto_cleanup_timeout_seconds is not None):
- self._stop_auto_cleanup()
-
- return self.pool
-
- def return_pool(self, pool):
- if (self.pool == pool and self.ref_count > 0):
- self.ref_count = self.ref_count - 1
-
- if (self.ref_count == 0):
- if (self.auto_cleanup_timeout_seconds is not None):
- self._start_auto_cleanup()
-
- def _start_auto_cleanup(self):
- if (self.cleanup_timer is not None):
- self.cleanup_timer.cancel()
- self.cleanup_timer = threading.Timer(self.auto_cleanup_timeout_seconds, self._execute_cleanup)
- self.cleanup_timer.start()
-
- print("Started auto cleanup of pool in " + str(self.auto_cleanup_timeout_seconds) + " seconds")
-
- def _stop_auto_cleanup(self):
- if (self.cleanup_timer is not None):
- self.cleanup_timer.cancel()
- self.cleanup_timer = None
-
- print("Stopped auto cleanup of pool")
-
- def _execute_cleanup(self):
- print("Executing cleanup of pool")
-
- if (self.ref_count == 0):
- self.close()
-
- def close(self):
- self._stop_auto_cleanup()
-
- if (self.pool is not None):
- print("Closing pool of " + str(self.num_processes) + " processes")
- self.pool.close()
- self.pool.join()
- self.pool = None
-
-class ParallelTranscriptionConfig(TranscriptionConfig):
- def __init__(self, device_id: str, override_timestamps, initial_segment_index, copy: TranscriptionConfig = None):
- super().__init__(copy.non_speech_strategy, copy.segment_padding_left, copy.segment_padding_right, copy.max_silent_period, copy.max_merge_size, copy.max_prompt_window, initial_segment_index)
- self.device_id = device_id
- self.override_timestamps = override_timestamps
-
-class ParallelTranscription(AbstractTranscription):
- # Silero VAD typically takes about 3 seconds per minute, so there's no need to split the chunks
- # into smaller segments than 2 minute (min 6 seconds per CPU core)
- MIN_CPU_CHUNK_SIZE_SECONDS = 2 * 60
-
- def __init__(self, sampling_rate: int = 16000):
- super().__init__(sampling_rate=sampling_rate)
-
- def transcribe_parallel(self, transcription: AbstractTranscription, audio: str, whisperCallable: WhisperCallback, config: TranscriptionConfig,
- cpu_device_count: int, gpu_devices: List[str], cpu_parallel_context: ParallelContext = None, gpu_parallel_context: ParallelContext = None):
- total_duration = get_audio_duration(audio)
-
- # First, get the timestamps for the original audio
- if (cpu_device_count > 1 and not transcription.is_transcribe_timestamps_fast()):
- merged = self._get_merged_timestamps_parallel(transcription, audio, config, total_duration, cpu_device_count, cpu_parallel_context)
- else:
- timestamp_segments = transcription.get_transcribe_timestamps(audio, config, 0, total_duration)
- merged = transcription.get_merged_timestamps(timestamp_segments, config, total_duration)
-
- # We must make sure the whisper model is downloaded
- if (len(gpu_devices) > 1):
- whisperCallable.model_container.ensure_downloaded()
-
- # Split into a list for each device
- # TODO: Split by time instead of by number of chunks
- merged_split = list(self._split(merged, len(gpu_devices)))
-
- # Parameters that will be passed to the transcribe function
- parameters = []
- segment_index = config.initial_segment_index
-
- for i in range(len(gpu_devices)):
- # Note that device_segment_list can be empty. But we will still create a process for it,
- # as otherwise we run the risk of assigning the same device to multiple processes.
- device_segment_list = list(merged_split[i]) if i < len(merged_split) else []
- device_id = gpu_devices[i]
-
- print("Device " + str(device_id) + " (index " + str(i) + ") has " + str(len(device_segment_list)) + " segments")
-
- # Create a new config with the given device ID
- device_config = ParallelTranscriptionConfig(device_id, device_segment_list, segment_index, config)
- segment_index += len(device_segment_list)
-
- parameters.append([audio, whisperCallable, device_config]);
-
- merged = {
- 'text': '',
- 'segments': [],
- 'language': None
- }
-
- created_context = False
-
- perf_start_gpu = time.perf_counter()
-
- # Spawn a separate process for each device
- try:
- if (gpu_parallel_context is None):
- gpu_parallel_context = ParallelContext(len(gpu_devices))
- created_context = True
-
- # Get a pool of processes
- pool = gpu_parallel_context.get_pool()
-
- # Run the transcription in parallel
- results = pool.starmap(self.transcribe, parameters)
-
- for result in results:
- # Merge the results
- if (result['text'] is not None):
- merged['text'] += result['text']
- if (result['segments'] is not None):
- merged['segments'].extend(result['segments'])
- if (result['language'] is not None):
- merged['language'] = result['language']
-
- finally:
- # Return the pool to the context
- if (gpu_parallel_context is not None):
- gpu_parallel_context.return_pool(pool)
- # Always close the context if we created it
- if (created_context):
- gpu_parallel_context.close()
-
- perf_end_gpu = time.perf_counter()
- print("Parallel transcription took " + str(perf_end_gpu - perf_start_gpu) + " seconds")
-
- return merged
-
- def _get_merged_timestamps_parallel(self, transcription: AbstractTranscription, audio: str, config: TranscriptionConfig, total_duration: float,
- cpu_device_count: int, cpu_parallel_context: ParallelContext = None):
- parameters = []
-
- chunk_size = max(total_duration / cpu_device_count, self.MIN_CPU_CHUNK_SIZE_SECONDS)
- chunk_start = 0
- cpu_device_id = 0
-
- perf_start_time = time.perf_counter()
-
- # Create chunks that will be processed on the CPU
- while (chunk_start < total_duration):
- chunk_end = min(chunk_start + chunk_size, total_duration)
-
- if (chunk_end - chunk_start < 1):
- # No need to process chunks that are less than 1 second
- break
-
- print("Parallel VAD: Executing chunk from " + str(chunk_start) + " to " +
- str(chunk_end) + " on CPU device " + str(cpu_device_id))
- parameters.append([audio, config, chunk_start, chunk_end]);
-
- cpu_device_id += 1
- chunk_start = chunk_end
-
- created_context = False
-
- # Spawn a separate process for each device
- try:
- if (cpu_parallel_context is None):
- cpu_parallel_context = ParallelContext(cpu_device_count)
- created_context = True
-
- # Get a pool of processes
- pool = cpu_parallel_context.get_pool()
-
- # Run the transcription in parallel. Note that transcription must be picklable.
- results = pool.starmap(transcription.get_transcribe_timestamps, parameters)
-
- timestamps = []
-
- # Flatten the results
- for result in results:
- timestamps.extend(result)
-
- merged = transcription.get_merged_timestamps(timestamps, config, total_duration)
-
- perf_end_time = time.perf_counter()
- print("Parallel VAD processing took {} seconds".format(perf_end_time - perf_start_time))
- return merged
-
- finally:
- # Return the pool to the context
- if (cpu_parallel_context is not None):
- cpu_parallel_context.return_pool(pool)
- # Always close the context if we created it
- if (created_context):
- cpu_parallel_context.close()
-
- def get_transcribe_timestamps(self, audio: str, config: ParallelTranscriptionConfig, start_time: float, duration: float):
- return []
-
- def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: ParallelTranscriptionConfig, total_duration: float):
- # Override timestamps that will be processed
- if (config.override_timestamps is not None):
- print("Using override timestamps of size " + str(len(config.override_timestamps)))
- return config.override_timestamps
- return super().get_merged_timestamps(timestamps, config, total_duration)
-
- def transcribe(self, audio: str, whisperCallable: WhisperCallback, config: ParallelTranscriptionConfig):
- # Override device ID the first time
- if (os.environ.get("INITIALIZED", None) is None):
- os.environ["INITIALIZED"] = "1"
-
- # Note that this may be None if the user didn't specify a device. In that case, Whisper will
- # just use the default GPU device.
- if (config.device_id is not None):
- print("Using device " + config.device_id)
- os.environ["CUDA_VISIBLE_DEVICES"] = config.device_id
-
- return super().transcribe(audio, whisperCallable, config)
-
- def _split(self, a, n):
- """Split a list into n approximately equal parts."""
- k, m = divmod(len(a), n)
- return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
-
diff --git a/spaces/liuxiaopai/background-remover/README.md b/spaces/liuxiaopai/background-remover/README.md
deleted file mode 100644
index 0bcc69015de8bd1e071c10e80bf3a6620da755c8..0000000000000000000000000000000000000000
--- a/spaces/liuxiaopai/background-remover/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-title: Background Remover
-emoji: 🖼️✂️
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-duplicated_from: nateraw/background-remover
----
-
-# background-remover
-
-[](https://huggingface.co/spaces/nateraw/background-remover)
-
-A Gradio app to remove the background from an image
-
----⬇️
-
-Autogenerated using [this template](https://github.com/nateraw/spaces-template)
diff --git a/spaces/lizhen30/LangChainGo/llms_cache_gpt.py b/spaces/lizhen30/LangChainGo/llms_cache_gpt.py
deleted file mode 100644
index 9c0ba48ae053f8a2ff2f98de4a97b8d499ba6b89..0000000000000000000000000000000000000000
--- a/spaces/lizhen30/LangChainGo/llms_cache_gpt.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# 测试GPTCache,进行精确匹配缓存或基于语义相似性缓存结果
-import langchain
-import gptcache
-import time
-from langchain.llms import OpenAI
-from gptcache.processor.pre import get_prompt
-from gptcache.manager.factory import get_data_manager
-from langchain.cache import GPTCache
-
-llm = OpenAI(model_name="text-davinci-003", n=2, best_of=2)
-
-# Avoid multiple caches using the same file, causing different llm model caches to affect each other
-i = 0
-file_prefix = "data_map"
-
-
-def init_gptcache_map(cache_obj: gptcache.Cache):
- global i
- cache_path = f'{file_prefix}_{i}.txt'
- cache_obj.init(
- pre_embedding_func=get_prompt,
- data_manager=get_data_manager(data_path=cache_path),
- )
- i += 1
-
-
-langchain.llm_cache = GPTCache(init_gptcache_map)
-
-for i in range(20):
- start = time.perf_counter()
- prompt = "男生有2人,女生有{:d}人,一共多少人?".format(i)
- print("男生有2人,女生有{:d}人, {:s}。 suspend: {:0.4f}".format(
- i, llm(prompt), time.perf_counter() - start))
diff --git a/spaces/lj1995/vocal2guitar/uvr5_pack/lib_v5/layers_537238KB.py b/spaces/lj1995/vocal2guitar/uvr5_pack/lib_v5/layers_537238KB.py
deleted file mode 100644
index 78e539250075d7fed2f349d05e3317dfe2c96804..0000000000000000000000000000000000000000
--- a/spaces/lj1995/vocal2guitar/uvr5_pack/lib_v5/layers_537238KB.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from uvr5_pack.lib_v5 import spec_utils
-
-
-class Conv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(Conv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nout,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- bias=False,
- ),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class SeperableConv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(SeperableConv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nin,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- groups=nin,
- bias=False,
- ),
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class Encoder(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
- super(Encoder, self).__init__()
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
-
- def __call__(self, x):
- skip = self.conv1(x)
- h = self.conv2(skip)
-
- return h, skip
-
-
-class Decoder(nn.Module):
- def __init__(
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
- ):
- super(Decoder, self).__init__()
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.dropout = nn.Dropout2d(0.1) if dropout else None
-
- def __call__(self, x, skip=None):
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
- if skip is not None:
- skip = spec_utils.crop_center(skip, x)
- x = torch.cat([x, skip], dim=1)
- h = self.conv(x)
-
- if self.dropout is not None:
- h = self.dropout(h)
-
- return h
-
-
-class ASPPModule(nn.Module):
- def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
- super(ASPPModule, self).__init__()
- self.conv1 = nn.Sequential(
- nn.AdaptiveAvgPool2d((1, None)),
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
- )
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
- self.conv3 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
- )
- self.conv4 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
- )
- self.conv5 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv6 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv7 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.bottleneck = nn.Sequential(
- Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
- )
-
- def forward(self, x):
- _, _, h, w = x.size()
- feat1 = F.interpolate(
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
- )
- feat2 = self.conv2(x)
- feat3 = self.conv3(x)
- feat4 = self.conv4(x)
- feat5 = self.conv5(x)
- feat6 = self.conv6(x)
- feat7 = self.conv7(x)
- out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
- bottle = self.bottleneck(out)
- return bottle
diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/hifigan/env.py b/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/hifigan/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/hifigan/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
-
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/lnyan/stablediffusion-infinity/postprocess.py b/spaces/lnyan/stablediffusion-infinity/postprocess.py
deleted file mode 100644
index 90c7f535c568fa46b6433390459d82e7967bb1fd..0000000000000000000000000000000000000000
--- a/spaces/lnyan/stablediffusion-infinity/postprocess.py
+++ /dev/null
@@ -1,249 +0,0 @@
-"""
-https://github.com/Trinkle23897/Fast-Poisson-Image-Editing
-MIT License
-
-Copyright (c) 2022 Jiayi Weng
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-"""
-
-import time
-import argparse
-import os
-import fpie
-from process import ALL_BACKEND, CPU_COUNT, DEFAULT_BACKEND
-from fpie.io import read_images, write_image
-from process import BaseProcessor, EquProcessor, GridProcessor
-
-from PIL import Image
-import numpy as np
-import skimage
-import skimage.measure
-import scipy
-import scipy.signal
-
-
-class PhotometricCorrection:
- def __init__(self,quite=False):
- self.get_parser("cli")
- args=self.parser.parse_args(["--method","grid","-g","src","-s","a","-t","a","-o","a"])
- args.mpi_sync_interval = getattr(args, "mpi_sync_interval", 0)
- self.backend=args.backend
- self.args=args
- self.quite=quite
- proc: BaseProcessor
- proc = GridProcessor(
- args.gradient,
- args.backend,
- args.cpu,
- args.mpi_sync_interval,
- args.block_size,
- args.grid_x,
- args.grid_y,
- )
- print(
- f"[PIE]Successfully initialize PIE {args.method} solver "
- f"with {args.backend} backend"
- )
- self.proc=proc
-
- def run(self, original_image, inpainted_image, mode="mask_mode"):
- print(f"[PIE] start")
- if mode=="disabled":
- return inpainted_image
- input_arr=np.array(original_image)
- if input_arr[:,:,-1].sum()<1:
- return inpainted_image
- output_arr=np.array(inpainted_image)
- mask=input_arr[:,:,-1]
- mask=255-mask
- if mask.sum()<1 and mode=="mask_mode":
- mode=""
- if mode=="mask_mode":
- mask = skimage.measure.block_reduce(mask, (8, 8), np.max)
- mask = mask.repeat(8, axis=0).repeat(8, axis=1)
- else:
- mask[8:-9,8:-9]=255
- mask = mask[:,:,np.newaxis].repeat(3,axis=2)
- nmask=mask.copy()
- output_arr2=output_arr[:,:,0:3].copy()
- input_arr2=input_arr[:,:,0:3].copy()
- output_arr2[nmask<128]=0
- input_arr2[nmask>=128]=0
- output_arr2+=input_arr2
- src = output_arr2[:,:,0:3]
- tgt = src.copy()
- proc=self.proc
- args=self.args
- if proc.root:
- n = proc.reset(src, mask, tgt, (args.h0, args.w0), (args.h1, args.w1))
- proc.sync()
- if proc.root:
- result = tgt
- t = time.time()
- if args.p == 0:
- args.p = args.n
-
- for i in range(0, args.n, args.p):
- if proc.root:
- result, err = proc.step(args.p) # type: ignore
- print(f"[PIE] Iter {i + args.p}, abs_err {err}")
- else:
- proc.step(args.p)
-
- if proc.root:
- dt = time.time() - t
- print(f"[PIE] Time elapsed: {dt:.4f}s")
- # make sure consistent with dummy process
- return Image.fromarray(result)
-
-
- def get_parser(self,gen_type: str) -> argparse.Namespace:
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "-v", "--version", action="store_true", help="show the version and exit"
- )
- parser.add_argument(
- "--check-backend", action="store_true", help="print all available backends"
- )
- if gen_type == "gui" and "mpi" in ALL_BACKEND:
- # gui doesn't support MPI backend
- ALL_BACKEND.remove("mpi")
- parser.add_argument(
- "-b",
- "--backend",
- type=str,
- choices=ALL_BACKEND,
- default=DEFAULT_BACKEND,
- help="backend choice",
- )
- parser.add_argument(
- "-c",
- "--cpu",
- type=int,
- default=CPU_COUNT,
- help="number of CPU used",
- )
- parser.add_argument(
- "-z",
- "--block-size",
- type=int,
- default=1024,
- help="cuda block size (only for equ solver)",
- )
- parser.add_argument(
- "--method",
- type=str,
- choices=["equ", "grid"],
- default="equ",
- help="how to parallelize computation",
- )
- parser.add_argument("-s", "--source", type=str, help="source image filename")
- if gen_type == "cli":
- parser.add_argument(
- "-m",
- "--mask",
- type=str,
- help="mask image filename (default is to use the whole source image)",
- default="",
- )
- parser.add_argument("-t", "--target", type=str, help="target image filename")
- parser.add_argument("-o", "--output", type=str, help="output image filename")
- if gen_type == "cli":
- parser.add_argument(
- "-h0", type=int, help="mask position (height) on source image", default=0
- )
- parser.add_argument(
- "-w0", type=int, help="mask position (width) on source image", default=0
- )
- parser.add_argument(
- "-h1", type=int, help="mask position (height) on target image", default=0
- )
- parser.add_argument(
- "-w1", type=int, help="mask position (width) on target image", default=0
- )
- parser.add_argument(
- "-g",
- "--gradient",
- type=str,
- choices=["max", "src", "avg"],
- default="max",
- help="how to calculate gradient for PIE",
- )
- parser.add_argument(
- "-n",
- type=int,
- help="how many iteration would you perfer, the more the better",
- default=5000,
- )
- if gen_type == "cli":
- parser.add_argument(
- "-p", type=int, help="output result every P iteration", default=0
- )
- if "mpi" in ALL_BACKEND:
- parser.add_argument(
- "--mpi-sync-interval",
- type=int,
- help="MPI sync iteration interval",
- default=100,
- )
- parser.add_argument(
- "--grid-x", type=int, help="x axis stride for grid solver", default=8
- )
- parser.add_argument(
- "--grid-y", type=int, help="y axis stride for grid solver", default=8
- )
- self.parser=parser
-
-if __name__ =="__main__":
- import sys
- import io
- import base64
- from PIL import Image
- def base64_to_pil(base64_str):
- data = base64.b64decode(str(base64_str))
- pil = Image.open(io.BytesIO(data))
- return pil
-
- def pil_to_base64(out_pil):
- out_buffer = io.BytesIO()
- out_pil.save(out_buffer, format="PNG")
- out_buffer.seek(0)
- base64_bytes = base64.b64encode(out_buffer.read())
- base64_str = base64_bytes.decode("ascii")
- return base64_str
- correction_func=PhotometricCorrection(quite=True)
- while True:
- buffer = sys.stdin.readline()
- print(f"[PIE] suprocess {len(buffer)} {type(buffer)} ")
- if len(buffer)==0:
- break
- if isinstance(buffer,str):
- lst=buffer.strip().split(",")
- else:
- lst=buffer.decode("ascii").strip().split(",")
- img0=base64_to_pil(lst[0])
- img1=base64_to_pil(lst[1])
- ret=correction_func.run(img0,img1,mode=lst[2])
- ret_base64=pil_to_base64(ret)
- if isinstance(buffer,str):
- sys.stdout.write(f"{ret_base64}\n")
- else:
- sys.stdout.write(f"{ret_base64}\n".encode())
- sys.stdout.flush()
\ No newline at end of file
diff --git a/spaces/luodian/LoRA-DreamBooth-Training-UI/inference.py b/spaces/luodian/LoRA-DreamBooth-Training-UI/inference.py
deleted file mode 100644
index ce0f2b08df75e6d62f06c4119f1dc859930de032..0000000000000000000000000000000000000000
--- a/spaces/luodian/LoRA-DreamBooth-Training-UI/inference.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-
-import gc
-import pathlib
-
-import gradio as gr
-import PIL.Image
-import torch
-from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
-from huggingface_hub import ModelCard
-
-
-class InferencePipeline:
- def __init__(self, hf_token: str | None = None):
- self.hf_token = hf_token
- self.pipe = None
- self.device = torch.device(
- 'cuda:0' if torch.cuda.is_available() else 'cpu')
- self.lora_model_id = None
- self.base_model_id = None
-
- def clear(self) -> None:
- self.lora_model_id = None
- self.base_model_id = None
- del self.pipe
- self.pipe = None
- torch.cuda.empty_cache()
- gc.collect()
-
- @staticmethod
- def check_if_model_is_local(lora_model_id: str) -> bool:
- return pathlib.Path(lora_model_id).exists()
-
- @staticmethod
- def get_model_card(model_id: str,
- hf_token: str | None = None) -> ModelCard:
- if InferencePipeline.check_if_model_is_local(model_id):
- card_path = (pathlib.Path(model_id) / 'README.md').as_posix()
- else:
- card_path = model_id
- return ModelCard.load(card_path, token=hf_token)
-
- @staticmethod
- def get_base_model_info(lora_model_id: str,
- hf_token: str | None = None) -> str:
- card = InferencePipeline.get_model_card(lora_model_id, hf_token)
- return card.data.base_model
-
- def load_pipe(self, lora_model_id: str) -> None:
- if lora_model_id == self.lora_model_id:
- return
- base_model_id = self.get_base_model_info(lora_model_id, self.hf_token)
- if base_model_id != self.base_model_id:
- if self.device.type == 'cpu':
- pipe = DiffusionPipeline.from_pretrained(
- base_model_id, use_auth_token=self.hf_token)
- else:
- pipe = DiffusionPipeline.from_pretrained(
- base_model_id,
- torch_dtype=torch.float16,
- use_auth_token=self.hf_token)
- pipe = pipe.to(self.device)
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(
- pipe.scheduler.config)
- self.pipe = pipe
- self.pipe.unet.load_attn_procs( # type: ignore
- lora_model_id, use_auth_token=self.hf_token)
-
- self.lora_model_id = lora_model_id # type: ignore
- self.base_model_id = base_model_id # type: ignore
-
- def run(
- self,
- lora_model_id: str,
- prompt: str,
- lora_scale: float,
- seed: int,
- n_steps: int,
- guidance_scale: float,
- ) -> PIL.Image.Image:
- if not torch.cuda.is_available():
- raise gr.Error('CUDA is not available.')
-
- self.load_pipe(lora_model_id)
-
- generator = torch.Generator(device=self.device).manual_seed(seed)
- out = self.pipe(
- prompt,
- num_inference_steps=n_steps,
- guidance_scale=guidance_scale,
- generator=generator,
- cross_attention_kwargs={'scale': lora_scale},
- ) # type: ignore
- return out.images[0]
diff --git a/spaces/luxuedong/lxd/src/components/tone-selector.tsx b/spaces/luxuedong/lxd/src/components/tone-selector.tsx
deleted file mode 100644
index 5c6e464c91f564b895acd121f0a4a79ed9c5c356..0000000000000000000000000000000000000000
--- a/spaces/luxuedong/lxd/src/components/tone-selector.tsx
+++ /dev/null
@@ -1,43 +0,0 @@
-import React from 'react'
-import { BingConversationStyle } from '@/lib/bots/bing/types'
-import { cn } from '@/lib/utils'
-
-type ToneItem = {
- type: BingConversationStyle,
- name: string
-}
-
-const ToneList: ToneItem[] = [
- { name: '有创造力', type: BingConversationStyle.Creative },
- { name: '更平衡', type: BingConversationStyle.Balanced },
- { name: '更精确', type: BingConversationStyle.Precise }
-]
-
-interface ToneSelectorProps {
- type: BingConversationStyle | ''
- onChange?: (type: BingConversationStyle) => void
-}
-
-export function ToneSelector({ type, onChange }: ToneSelectorProps) {
- return (
-
-
- 选择对话样式
-
-
-
- {
- ToneList.map(tone => (
- - onChange?.(tone.type)}>
-
-
- ))
- }
-
-
-
- )
-}
diff --git a/spaces/lvwerra/in-the-stack-gr/app.py b/spaces/lvwerra/in-the-stack-gr/app.py
deleted file mode 100644
index 9ef27c1cf638bbcc1cd3ee3c1a329f19d3f5ee64..0000000000000000000000000000000000000000
--- a/spaces/lvwerra/in-the-stack-gr/app.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import gradio as gr
-from huggingface_hub import hf_hub_download
-import json
-import gzip
-import urllib
-
-usernames = {}
-
-filepath = hf_hub_download(repo_id="bigcode/the-stack-username-to-repo", filename="username_to_repo.json.gz", repo_type="dataset", revision="v1.2")
-with gzip.open(filepath, 'r') as f:
- usernames["v1.2"] = json.loads(f.read().decode('utf-8'))
-
-filepath = hf_hub_download(repo_id="bigcode/the-stack-username-to-repo", filename="username_to_repo.json.gz", repo_type="dataset", revision="v1.1")
-with gzip.open(filepath, 'r') as f:
- usernames["v1.1"] = json.loads(f.read().decode('utf-8'))
-
-filepath = hf_hub_download(repo_id="bigcode/the-stack-username-to-repo", filename="username_to_repo.json.gz", repo_type="dataset")
-with gzip.open(filepath, 'r') as f:
- usernames["v1.0"] = json.loads(f.read().decode('utf-8'))
-
-text = """\
-
-**_The Stack is an open governance interface between the AI community and the open source community._**
-
-# Am I in The Stack?
-
-As part of the BigCode project, we released and maintain [The Stack](https://huggingface.co/datasets/bigcode/the-stack), a 6 TB dataset of permissively licensed source code over 300 programming languages. One of our goals in this project is to give people agency over their source code by letting them decide whether or not it should be used to develop and evaluate machine learning models, as we acknowledge that not all developers may wish to have their data used for that purpose.
-""" + """\
-
-This tool lets you check if a repository under a given username is part of The Stack dataset. Would you like to have your data removed from future versions of The Stack? You can opt-out following the instructions [here](https://www.bigcode-project.org/docs/about/the-stack/#how-can-i-request-that-my-data-be-removed-from-the-stack).
-"""
-
-opt_out_text_template = """\
-### Opt-out
-
-If you want your data to be removed from the stack and model training \
-open an issue with this link \
-(if the link doesn't work try right a right click and open it in a new tab) or visit [https://github.com/bigcode-project/opt-out-v2/issues/new?&template=opt-out-request.md](https://github.com/bigcode-project/opt-out-v2/issues/new?&template=opt-out-request.md) .\
-"""
-
-opt_out_issue_title = """Opt-out request for {username}"""
-opt_out_issue_body = """\
-I request that the following data is removed from The Stack:
-
- - Commits
- - GitHub issue
-{repo_list}
-
-_Note_: If you don't want all resources to be included just remove the elements from the list above. If you would like to exclude all repositories and resources just add a single element "all" to the list.
-"""
-
-def issue_url(username, repos):
- title = urllib.parse.quote(opt_out_issue_title.format(username=username))
- body = urllib.parse.quote(opt_out_issue_body.format(repo_list=" - "+ "\n - ".join(repos)))
-
- opt_out_text = opt_out_text_template.format(title=title, body=body)
-
- return opt_out_text
-
-def check_username(username, version):
- output_md = ""
- if username in usernames[version] and len(usernames[version][username])>0:
- repos = usernames[version][username]
- repo_word = "repository" if len(repos)==1 else "repositories"
- output_md += f"**Yes**, there is code from **{len(repos)} {repo_word}** in The Stack:\n\n"
- for repo in repos:
- output_md += f"_{repo}_\n\n"
-
- return output_md.strip(), issue_url(username, repos)
- else:
- output_md += "**No**, your code is not in The Stack."
- return output_md.strip(), ""
-
-with gr.Blocks() as demo:
- with gr.Row():
- _, colum_2, _ = gr.Column(scale=1), gr.Column(scale=6), gr.Column(scale=1)
- with colum_2:
- gr.Markdown(text)
- version = gr.Dropdown(["v1.2", "v1.1", "v1.0"], label="The Stack version:", value="v1.2")
- username = gr.Text("", label="Your GitHub username:")
- check_button = gr.Button("Check!")
-
- repos = gr.Markdown()
- opt_out = gr.Markdown()
-
-
- check_button.click(check_username, [username, version], [repos, opt_out])
-
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/stable_diffusion/text2img_app.py b/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/stable_diffusion/text2img_app.py
deleted file mode 100644
index 4797e71aa0f89b0a076ca37c41dc2434213c37e1..0000000000000000000000000000000000000000
--- a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/stable_diffusion/text2img_app.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import gradio as gr
-import torch
-from diffusers import StableDiffusionPipeline
-
-from diffusion_webui.utils.model_list import stable_model_list
-from diffusion_webui.utils.scheduler_list import (
- SCHEDULER_LIST,
- get_scheduler_list,
-)
-
-
-class StableDiffusionText2ImageGenerator:
- def __init__(self):
- self.pipe = None
-
- def load_model(
- self,
- model_path,
- scheduler,
- ):
- if self.pipe is None:
- self.pipe = StableDiffusionPipeline.from_pretrained(
- model_path, safety_checker=None, torch_dtype=torch.float16
- )
-
- self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
- self.pipe.to("cuda")
- self.pipe.enable_xformers_memory_efficient_attention()
-
- return self.pipe
-
- def generate_image(
- self,
- model_path: str,
- prompt: str,
- negative_prompt: str,
- num_images_per_prompt: int,
- scheduler: str,
- guidance_scale: int,
- num_inference_step: int,
- height: int,
- width: int,
- seed_generator=0,
- ):
- pipe = self.load_model(
- model_path=model_path,
- scheduler=scheduler,
- )
- if seed_generator == 0:
- random_seed = torch.randint(0, 1000000, (1,))
- generator = torch.manual_seed(random_seed)
- else:
- generator = torch.manual_seed(seed_generator)
-
- images = pipe(
- prompt=prompt,
- height=height,
- width=width,
- negative_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- num_inference_steps=num_inference_step,
- guidance_scale=guidance_scale,
- generator=generator,
- ).images
-
- return images
-
- def app():
- with gr.Blocks():
- with gr.Row():
- with gr.Column():
- text2image_prompt = gr.Textbox(
- lines=1,
- placeholder="Prompt",
- show_label=False,
- )
-
- text2image_negative_prompt = gr.Textbox(
- lines=1,
- placeholder="Negative Prompt",
- show_label=False,
- )
- with gr.Row():
- with gr.Column():
- text2image_model_path = gr.Dropdown(
- choices=stable_model_list,
- value=stable_model_list[0],
- label="Text-Image Model Id",
- )
-
- text2image_guidance_scale = gr.Slider(
- minimum=0.1,
- maximum=15,
- step=0.1,
- value=7.5,
- label="Guidance Scale",
- )
-
- text2image_num_inference_step = gr.Slider(
- minimum=1,
- maximum=100,
- step=1,
- value=50,
- label="Num Inference Step",
- )
- text2image_num_images_per_prompt = gr.Slider(
- minimum=1,
- maximum=30,
- step=1,
- value=1,
- label="Number Of Images",
- )
- with gr.Row():
- with gr.Column():
-
- text2image_scheduler = gr.Dropdown(
- choices=SCHEDULER_LIST,
- value=SCHEDULER_LIST[0],
- label="Scheduler",
- )
-
- text2image_height = gr.Slider(
- minimum=128,
- maximum=1280,
- step=32,
- value=512,
- label="Image Height",
- )
-
- text2image_width = gr.Slider(
- minimum=128,
- maximum=1280,
- step=32,
- value=512,
- label="Image Width",
- )
- text2image_seed_generator = gr.Slider(
- label="Seed(0 for random)",
- minimum=0,
- maximum=1000000,
- value=0,
- )
- text2image_predict = gr.Button(value="Generator")
-
- with gr.Column():
- output_image = gr.Gallery(
- label="Generated images",
- show_label=False,
- elem_id="gallery",
- ).style(grid=(1, 2), height=200)
-
- text2image_predict.click(
- fn=StableDiffusionText2ImageGenerator().generate_image,
- inputs=[
- text2image_model_path,
- text2image_prompt,
- text2image_negative_prompt,
- text2image_num_images_per_prompt,
- text2image_scheduler,
- text2image_guidance_scale,
- text2image_num_inference_step,
- text2image_height,
- text2image_width,
- text2image_seed_generator,
- ],
- outputs=output_image,
- )
diff --git a/spaces/m3hrdadfi/gpt2-persian-qa/dictionary.py b/spaces/m3hrdadfi/gpt2-persian-qa/dictionary.py
deleted file mode 100644
index 03b912c0d36d2f963a5eb0d0db8d044e0a871a1d..0000000000000000000000000000000000000000
--- a/spaces/m3hrdadfi/gpt2-persian-qa/dictionary.py
+++ /dev/null
@@ -1,139 +0,0 @@
-characters = {
- "ك": "ک",
- "دِ": "د",
- "بِ": "ب",
- "زِ": "ز",
- "ذِ": "ذ",
- "شِ": "ش",
- "سِ": "س",
- "ى": "ی",
- "ي": "ی",
- "ؤ": "و",
- "ے": "ی",
- "ۀ": "ه",
- "ﭘ": "پ",
- "ﮐ": "ک",
- "ﯽ": "ی",
- "ﺎ": "ا",
- "ﺑ": "ب",
- "ﺘ": "ت",
- "ﺧ": "خ",
- "ﺩ": "د",
- "ﺱ": "س",
- "ﻀ": "ض",
- "ﻌ": "ع",
- "ﻟ": "ل",
- "ﻡ": "م",
- "ﻢ": "م",
- "ﻪ": "ه",
- "ﻮ": "و",
- # "ﺍ": "ا",
- "ة": "ه",
- "ﯾ": "ی",
- "ﯿ": "ی",
- "ﺒ": "ب",
- "ﺖ": "ت",
- "ﺪ": "د",
- "ﺮ": "ر",
- "ﺴ": "س",
- "ﺷ": "ش",
- "ﺸ": "ش",
- "ﻋ": "ع",
- "ﻤ": "م",
- "ﻥ": "ن",
- "ﻧ": "ن",
- "ﻭ": "و",
- "ﺭ": "ر",
- "ﮔ": "گ",
- "إ": "ا",
- "ٕ": " ",
- "ھ": "ه",
- "...": ".",
- "…": ".",
- "-": " - ",
- "هٔ": "ه",
- "ﻯ": "ی",
- "ﻛ": "ک",
- "ﭼ": "چ",
- "ﺓ": "ه",
- "ﻴ": "ی",
- "ﻊ": "ع",
- "ﮬ": "ه",
- "ﺟ": "ج",
- "ﺳ": "س",
- "ﻦ": "ن",
- "ﺬ": "ذ",
- "ﺋ": "ئ",
- "ﷲ": "لله",
- "ﺞ": "ج",
- "ﺙ": "ث",
- "ﻗ": "ق",
- "ﮪ": "ه",
- "ﺰ": "ز",
- "ﯼ": "ی",
- "ٺ": "ت",
- "ﺻ": "ص",
- "ﻂ": "ط",
- "ﻣ": "م",
- "ﻈ": "ظ",
- "ﺐ": "ب",
- "ﻍ": "غ",
- "ݸ": "و",
- "ﻨ": "ن",
- "ﻝ": "ل",
- "ﻩ": "ه",
- "ﻲ": "ی",
- "ﻐ": "غ",
- "ﺲ": "س",
- "ﺁ": "آ",
- "ڔ": "ر",
- "ﺫ": "ذ",
- "ﭻ": "چ",
- "ﺠ": "ج",
- "ﯙ": "و",
- "ﮏ": "ک",
- "ﺣ": "ح",
- "ﺝ": "ج",
- "ﺼ": "ص",
- "ﻳ": "ی",
- "ﻘ": "ق",
- "ﺨ": "خ",
- "ﻔ": "ف",
- "ﻎ": "غ",
- "ئ": "ی",
- "ﻓ": "ف",
- "ﻕ": "ق",
- "ﮋ": "ژ",
- "ﺗ": "ت",
- "ﻁ": "ط",
- "ﺯ": "ز",
- "ﮕ": "گ",
- "ﺌ": "ئ",
- "ﺵ": "ش",
- "ۮ": "د",
- "ﻫ": "ه",
- "ﻬ": "ه",
- "ﻏ": "غ",
- "ﻰ": "ی",
- # "﷼": "ریال",
- "ﺿ": "ض",
- "ﺛ": "ث",
- "ݐ": "پ",
- "ﺏ": "ب",
- "ﭙ": "پ",
- "ﭽ": "چ",
- "ﺜ": "ث",
- "ﻃ": "ط",
- "ۂ": "ه",
- "ﻑ": "ف",
- "ﺕ": "ت",
- "ﻞ": "ل",
-}
-
-special_tokens = {}
-
-words_map = {
- "Leave a comment": "",
- "[…]": "",
- "[.]": "",
-}
diff --git a/spaces/ma-xu/LIVE/pybind11/docs/_static/theme_overrides.css b/spaces/ma-xu/LIVE/pybind11/docs/_static/theme_overrides.css
deleted file mode 100644
index 1071809fa0fecf7c28d3356f37363266e9128b81..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/pybind11/docs/_static/theme_overrides.css
+++ /dev/null
@@ -1,11 +0,0 @@
-.wy-table-responsive table td,
-.wy-table-responsive table th {
- white-space: initial !important;
-}
-.rst-content table.docutils td {
- vertical-align: top !important;
-}
-div[class^='highlight'] pre {
- white-space: pre;
- white-space: pre-wrap;
-}
diff --git a/spaces/magicr/BuboGPT/bubogpt/tasks/image_text_pretrain.py b/spaces/magicr/BuboGPT/bubogpt/tasks/image_text_pretrain.py
deleted file mode 100644
index ac02e140a5af65a13f68451e084b124caa32cbc3..0000000000000000000000000000000000000000
--- a/spaces/magicr/BuboGPT/bubogpt/tasks/image_text_pretrain.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-from bubogpt.common.registry import registry
-from bubogpt.tasks.base_task import BaseTask
-
-
-@registry.register_task("image_text_pretrain")
-class ImageTextPretrainTask(BaseTask):
- def __init__(self):
- super().__init__()
-
- def evaluation(self, model, data_loader, cuda_enabled=True):
- pass
diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/options/__init__.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/options/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/marioboy/neil-breen/vocoder/inference.py b/spaces/marioboy/neil-breen/vocoder/inference.py
deleted file mode 100644
index 7e546845da0b8cdb18b34fbd332b9aaa39cea55c..0000000000000000000000000000000000000000
--- a/spaces/marioboy/neil-breen/vocoder/inference.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from vocoder.models.fatchord_version import WaveRNN
-from vocoder import hparams as hp
-import torch
-
-
-_model = None # type: WaveRNN
-
-def load_model(weights_fpath, verbose=True):
- global _model, _device
-
- if verbose:
- print("Building Wave-RNN")
- _model = WaveRNN(
- rnn_dims=hp.voc_rnn_dims,
- fc_dims=hp.voc_fc_dims,
- bits=hp.bits,
- pad=hp.voc_pad,
- upsample_factors=hp.voc_upsample_factors,
- feat_dims=hp.num_mels,
- compute_dims=hp.voc_compute_dims,
- res_out_dims=hp.voc_res_out_dims,
- res_blocks=hp.voc_res_blocks,
- hop_length=hp.hop_length,
- sample_rate=hp.sample_rate,
- mode=hp.voc_mode
- )
-
- if torch.cuda.is_available():
- _model = _model.cuda()
- _device = torch.device('cuda')
- else:
- _device = torch.device('cpu')
-
- if verbose:
- print("Loading model weights at %s" % weights_fpath)
- checkpoint = torch.load(weights_fpath, _device)
- _model.load_state_dict(checkpoint['model_state'])
- _model.eval()
-
-
-def is_loaded():
- return _model is not None
-
-
-def infer_waveform(mel, normalize=True, batched=True, target=8000, overlap=800,
- progress_callback=None):
- """
- Infers the waveform of a mel spectrogram output by the synthesizer (the format must match
- that of the synthesizer!)
-
- :param normalize:
- :param batched:
- :param target:
- :param overlap:
- :return:
- """
- if _model is None:
- raise Exception("Please load Wave-RNN in memory before using it")
-
- if normalize:
- mel = mel / hp.mel_max_abs_value
- mel = torch.from_numpy(mel[None, ...])
- wav = _model.generate(mel, batched, target, overlap, hp.mu_law, progress_callback)
- return wav
diff --git a/spaces/mateuseap/magic-vocals/rmvpe.py b/spaces/mateuseap/magic-vocals/rmvpe.py
deleted file mode 100644
index 3ad346141340e03bdbaa20121e1ed435bb3da57a..0000000000000000000000000000000000000000
--- a/spaces/mateuseap/magic-vocals/rmvpe.py
+++ /dev/null
@@ -1,432 +0,0 @@
-import sys, torch, numpy as np, traceback, pdb
-import torch.nn as nn
-from time import time as ttime
-import torch.nn.functional as F
-
-
-class BiGRU(nn.Module):
- def __init__(self, input_features, hidden_features, num_layers):
- super(BiGRU, self).__init__()
- self.gru = nn.GRU(
- input_features,
- hidden_features,
- num_layers=num_layers,
- batch_first=True,
- bidirectional=True,
- )
-
- def forward(self, x):
- return self.gru(x)[0]
-
-
-class ConvBlockRes(nn.Module):
- def __init__(self, in_channels, out_channels, momentum=0.01):
- super(ConvBlockRes, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=(3, 3),
- stride=(1, 1),
- padding=(1, 1),
- bias=False,
- ),
- nn.BatchNorm2d(out_channels, momentum=momentum),
- nn.ReLU(),
- nn.Conv2d(
- in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=(3, 3),
- stride=(1, 1),
- padding=(1, 1),
- bias=False,
- ),
- nn.BatchNorm2d(out_channels, momentum=momentum),
- nn.ReLU(),
- )
- if in_channels != out_channels:
- self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
- self.is_shortcut = True
- else:
- self.is_shortcut = False
-
- def forward(self, x):
- if self.is_shortcut:
- return self.conv(x) + self.shortcut(x)
- else:
- return self.conv(x) + x
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- in_channels,
- in_size,
- n_encoders,
- kernel_size,
- n_blocks,
- out_channels=16,
- momentum=0.01,
- ):
- super(Encoder, self).__init__()
- self.n_encoders = n_encoders
- self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
- self.layers = nn.ModuleList()
- self.latent_channels = []
- for i in range(self.n_encoders):
- self.layers.append(
- ResEncoderBlock(
- in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
- )
- )
- self.latent_channels.append([out_channels, in_size])
- in_channels = out_channels
- out_channels *= 2
- in_size //= 2
- self.out_size = in_size
- self.out_channel = out_channels
-
- def forward(self, x):
- concat_tensors = []
- x = self.bn(x)
- for i in range(self.n_encoders):
- _, x = self.layers[i](x)
- concat_tensors.append(_)
- return x, concat_tensors
-
-
-class ResEncoderBlock(nn.Module):
- def __init__(
- self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
- ):
- super(ResEncoderBlock, self).__init__()
- self.n_blocks = n_blocks
- self.conv = nn.ModuleList()
- self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
- for i in range(n_blocks - 1):
- self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
- self.kernel_size = kernel_size
- if self.kernel_size is not None:
- self.pool = nn.AvgPool2d(kernel_size=kernel_size)
-
- def forward(self, x):
- for i in range(self.n_blocks):
- x = self.conv[i](x)
- if self.kernel_size is not None:
- return x, self.pool(x)
- else:
- return x
-
-
-class Intermediate(nn.Module): #
- def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
- super(Intermediate, self).__init__()
- self.n_inters = n_inters
- self.layers = nn.ModuleList()
- self.layers.append(
- ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
- )
- for i in range(self.n_inters - 1):
- self.layers.append(
- ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
- )
-
- def forward(self, x):
- for i in range(self.n_inters):
- x = self.layers[i](x)
- return x
-
-
-class ResDecoderBlock(nn.Module):
- def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
- super(ResDecoderBlock, self).__init__()
- out_padding = (0, 1) if stride == (1, 2) else (1, 1)
- self.n_blocks = n_blocks
- self.conv1 = nn.Sequential(
- nn.ConvTranspose2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=(3, 3),
- stride=stride,
- padding=(1, 1),
- output_padding=out_padding,
- bias=False,
- ),
- nn.BatchNorm2d(out_channels, momentum=momentum),
- nn.ReLU(),
- )
- self.conv2 = nn.ModuleList()
- self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
- for i in range(n_blocks - 1):
- self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
-
- def forward(self, x, concat_tensor):
- x = self.conv1(x)
- x = torch.cat((x, concat_tensor), dim=1)
- for i in range(self.n_blocks):
- x = self.conv2[i](x)
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
- super(Decoder, self).__init__()
- self.layers = nn.ModuleList()
- self.n_decoders = n_decoders
- for i in range(self.n_decoders):
- out_channels = in_channels // 2
- self.layers.append(
- ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
- )
- in_channels = out_channels
-
- def forward(self, x, concat_tensors):
- for i in range(self.n_decoders):
- x = self.layers[i](x, concat_tensors[-1 - i])
- return x
-
-
-class DeepUnet(nn.Module):
- def __init__(
- self,
- kernel_size,
- n_blocks,
- en_de_layers=5,
- inter_layers=4,
- in_channels=1,
- en_out_channels=16,
- ):
- super(DeepUnet, self).__init__()
- self.encoder = Encoder(
- in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
- )
- self.intermediate = Intermediate(
- self.encoder.out_channel // 2,
- self.encoder.out_channel,
- inter_layers,
- n_blocks,
- )
- self.decoder = Decoder(
- self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
- )
-
- def forward(self, x):
- x, concat_tensors = self.encoder(x)
- x = self.intermediate(x)
- x = self.decoder(x, concat_tensors)
- return x
-
-
-class E2E(nn.Module):
- def __init__(
- self,
- n_blocks,
- n_gru,
- kernel_size,
- en_de_layers=5,
- inter_layers=4,
- in_channels=1,
- en_out_channels=16,
- ):
- super(E2E, self).__init__()
- self.unet = DeepUnet(
- kernel_size,
- n_blocks,
- en_de_layers,
- inter_layers,
- in_channels,
- en_out_channels,
- )
- self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
- if n_gru:
- self.fc = nn.Sequential(
- BiGRU(3 * 128, 256, n_gru),
- nn.Linear(512, 360),
- nn.Dropout(0.25),
- nn.Sigmoid(),
- )
- else:
- self.fc = nn.Sequential(
- nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
- )
-
- def forward(self, mel):
- mel = mel.transpose(-1, -2).unsqueeze(1)
- x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
- x = self.fc(x)
- return x
-
-
-from librosa.filters import mel
-
-
-class MelSpectrogram(torch.nn.Module):
- def __init__(
- self,
- is_half,
- n_mel_channels,
- sampling_rate,
- win_length,
- hop_length,
- n_fft=None,
- mel_fmin=0,
- mel_fmax=None,
- clamp=1e-5,
- ):
- super().__init__()
- n_fft = win_length if n_fft is None else n_fft
- self.hann_window = {}
- mel_basis = mel(
- sr=sampling_rate,
- n_fft=n_fft,
- n_mels=n_mel_channels,
- fmin=mel_fmin,
- fmax=mel_fmax,
- htk=True,
- )
- mel_basis = torch.from_numpy(mel_basis).float()
- self.register_buffer("mel_basis", mel_basis)
- self.n_fft = win_length if n_fft is None else n_fft
- self.hop_length = hop_length
- self.win_length = win_length
- self.sampling_rate = sampling_rate
- self.n_mel_channels = n_mel_channels
- self.clamp = clamp
- self.is_half = is_half
-
- def forward(self, audio, keyshift=0, speed=1, center=True):
- factor = 2 ** (keyshift / 12)
- n_fft_new = int(np.round(self.n_fft * factor))
- win_length_new = int(np.round(self.win_length * factor))
- hop_length_new = int(np.round(self.hop_length * speed))
- keyshift_key = str(keyshift) + "_" + str(audio.device)
- if keyshift_key not in self.hann_window:
- self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
- audio.device
- )
- fft = torch.stft(
- audio,
- n_fft=n_fft_new,
- hop_length=hop_length_new,
- win_length=win_length_new,
- window=self.hann_window[keyshift_key],
- center=center,
- return_complex=True,
- )
- magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
- if keyshift != 0:
- size = self.n_fft // 2 + 1
- resize = magnitude.size(1)
- if resize < size:
- magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
- magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
- mel_output = torch.matmul(self.mel_basis, magnitude)
- if self.is_half == True:
- mel_output = mel_output.half()
- log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
- return log_mel_spec
-
-
-class RMVPE:
- def __init__(self, model_path, is_half, device=None):
- self.resample_kernel = {}
- model = E2E(4, 1, (2, 2))
- ckpt = torch.load(model_path, map_location="cpu")
- model.load_state_dict(ckpt)
- model.eval()
- if is_half == True:
- model = model.half()
- self.model = model
- self.resample_kernel = {}
- self.is_half = is_half
- if device is None:
- device = "cuda" if torch.cuda.is_available() else "cpu"
- self.device = device
- self.mel_extractor = MelSpectrogram(
- is_half, 128, 16000, 1024, 160, None, 30, 8000
- ).to(device)
- self.model = self.model.to(device)
- cents_mapping = 20 * np.arange(360) + 1997.3794084376191
- self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
-
- def mel2hidden(self, mel):
- with torch.no_grad():
- n_frames = mel.shape[-1]
- mel = F.pad(
- mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
- )
- hidden = self.model(mel)
- return hidden[:, :n_frames]
-
- def decode(self, hidden, thred=0.03):
- cents_pred = self.to_local_average_cents(hidden, thred=thred)
- f0 = 10 * (2 ** (cents_pred / 1200))
- f0[f0 == 10] = 0
- # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
- return f0
-
- def infer_from_audio(self, audio, thred=0.03):
- audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
- # torch.cuda.synchronize()
- # t0=ttime()
- mel = self.mel_extractor(audio, center=True)
- # torch.cuda.synchronize()
- # t1=ttime()
- hidden = self.mel2hidden(mel)
- # torch.cuda.synchronize()
- # t2=ttime()
- hidden = hidden.squeeze(0).cpu().numpy()
- if self.is_half == True:
- hidden = hidden.astype("float32")
- f0 = self.decode(hidden, thred=thred)
- # torch.cuda.synchronize()
- # t3=ttime()
- # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
- return f0
-
- def to_local_average_cents(self, salience, thred=0.05):
- # t0 = ttime()
- center = np.argmax(salience, axis=1) # 帧长#index
- salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
- # t1 = ttime()
- center += 4
- todo_salience = []
- todo_cents_mapping = []
- starts = center - 4
- ends = center + 5
- for idx in range(salience.shape[0]):
- todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
- todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
- # t2 = ttime()
- todo_salience = np.array(todo_salience) # 帧长,9
- todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
- product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
- weight_sum = np.sum(todo_salience, 1) # 帧长
- devided = product_sum / weight_sum # 帧长
- # t3 = ttime()
- maxx = np.max(salience, axis=1) # 帧长
- devided[maxx <= thred] = 0
- # t4 = ttime()
- # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
- return devided
-
-
-# if __name__ == '__main__':
-# audio, sampling_rate = sf.read("卢本伟语录~1.wav")
-# if len(audio.shape) > 1:
-# audio = librosa.to_mono(audio.transpose(1, 0))
-# audio_bak = audio.copy()
-# if sampling_rate != 16000:
-# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
-# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
-# thred = 0.03 # 0.01
-# device = 'cuda' if torch.cuda.is_available() else 'cpu'
-# rmvpe = RMVPE(model_path,is_half=False, device=device)
-# t0=ttime()
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# t1=ttime()
-# print(f0.shape,t1-t0)
diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/diffusion/4_bands_base_32khz.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/diffusion/4_bands_base_32khz.py
deleted file mode 100644
index f7e67bcc89dd0c8e50d770e600b55f179fe19588..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/grids/diffusion/4_bands_base_32khz.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Training of the 4 diffusion models described in
-"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion"
-(paper link).
-"""
-
-from ._explorers import DiffusionExplorer
-
-
-@DiffusionExplorer
-def explorer(launcher):
- launcher.slurm_(gpus=4, partition='learnfair')
-
- launcher.bind_({'solver': 'diffusion/default',
- 'dset': 'internal/music_10k_32khz'})
-
- with launcher.job_array():
- launcher({'filter.use': True, 'filter.idx_band': 0, "processor.use": False, 'processor.power_std': 0.4})
- launcher({'filter.use': True, 'filter.idx_band': 1, "processor.use": False, 'processor.power_std': 0.4})
- launcher({'filter.use': True, 'filter.idx_band': 2, "processor.use": True, 'processor.power_std': 0.4})
- launcher({'filter.use': True, 'filter.idx_band': 3, "processor.use": True, 'processor.power_std': 0.75})
diff --git a/spaces/matthoffner/chatbot/components/Chat/ErrorMessageDiv.tsx b/spaces/matthoffner/chatbot/components/Chat/ErrorMessageDiv.tsx
deleted file mode 100644
index 11b868f98b4eda7eb215a911f59c4b42951a654c..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/chatbot/components/Chat/ErrorMessageDiv.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import { IconCircleX } from '@tabler/icons-react';
-import { FC } from 'react';
-
-import { ErrorMessage } from '@/types/error';
-
-interface Props {
- error: ErrorMessage;
-}
-
-export const ErrorMessageDiv: FC = ({ error }) => {
- return (
-
-
-
-
- {error.title}
- {error.messageLines.map((line, index) => (
-
- {' '}
- {line}{' '}
-
- ))}
-
- {error.code ? Code: {error.code} : ''}
-
-
- );
-};
diff --git a/spaces/megaaziib/hololive-rvc-models/app.py b/spaces/megaaziib/hololive-rvc-models/app.py
deleted file mode 100644
index 47db29e9de54b1eb0cc22120f40ff7cb984126a7..0000000000000000000000000000000000000000
--- a/spaces/megaaziib/hololive-rvc-models/app.py
+++ /dev/null
@@ -1,185 +0,0 @@
-import os
-import json
-import argparse
-import traceback
-import logging
-import gradio as gr
-import numpy as np
-import librosa
-import torch
-import asyncio
-import edge_tts
-from datetime import datetime
-from fairseq import checkpoint_utils
-from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
-from vc_infer_pipeline import VC
-from config import (
- is_half,
- device
-)
-logging.getLogger("numba").setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
-
-def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
- def vc_fn(
- input_audio,
- f0_up_key,
- f0_method,
- index_rate,
- tts_mode,
- tts_text,
- tts_voice
- ):
- try:
- if tts_mode:
- if len(tts_text) > 500 and limitation:
- return "Text is too long", None
- if tts_text is None or tts_voice is None:
- return "You need to enter text and select a voice", None
- asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
- audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
- else:
- if args.files:
- audio, sr = librosa.load(input_audio, sr=16000, mono=True)
- else:
- if input_audio is None:
- return "You need to upload an audio", None
- sampling_rate, audio = input_audio
- duration = audio.shape[0] / sampling_rate
- if duration > 300 and limitation:
- return "Please upload an audio file that is less than 5 minutes 30 seconds.", None
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
- if len(audio.shape) > 1:
- audio = librosa.to_mono(audio.transpose(1, 0))
- if sampling_rate != 16000:
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
- times = [0, 0, 0]
- f0_up_key = int(f0_up_key)
- audio_opt = vc.pipeline(
- hubert_model,
- net_g,
- 0,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- )
- print(
- f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
- )
- return "Success", (tgt_sr, audio_opt)
- except:
- info = traceback.format_exc()
- print(info)
- return info, (None, None)
- return vc_fn
-
-def load_hubert():
- global hubert_model
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
- ["hubert_base.pt"],
- suffix="",
- )
- hubert_model = models[0]
- hubert_model = hubert_model.to(device)
- if is_half:
- hubert_model = hubert_model.half()
- else:
- hubert_model = hubert_model.float()
- hubert_model.eval()
-
-def change_to_tts_mode(tts_mode):
- if tts_mode:
- return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
- else:
- return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- parser.add_argument("--files", action="store_true", default=False, help="load audio from path")
- args, unknown = parser.parse_known_args()
- load_hubert()
- models = []
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
- voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
- with open("weights/model_info.json", "r", encoding="utf-8") as f:
- models_info = json.load(f)
- for name, info in models_info.items():
- if not info['enable']:
- continue
- title = info['title']
- author = info.get("author", None)
- cover = f"weights/{name}/{info['cover']}"
- index = f"weights/{name}/{info['feature_retrieval_library']}"
- npy = f"weights/{name}/{info['feature_file']}"
- cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
- tgt_sr = cpt["config"][-1]
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
- if_f0 = cpt.get("f0", 1)
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- del net_g.enc_q
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
- net_g.eval().to(device)
- if is_half:
- net_g = net_g.half()
- else:
- net_g = net_g.float()
- vc = VC(tgt_sr, device, is_half)
- models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
- with gr.Blocks() as app:
- gr.Markdown(
- "# Hololive RVC Models\n"
- "## The input audio should be clean and pure voice without background music.\n"
- "[](https://colab.research.google.com/github/aziib/Create-Google-Shared-Drive/blob/master/Hololive-RVC-Models.ipynb)\n\n"
- "[](https://ko-fi.com/megaaziib)\n\n"
- )
- with gr.Tabs():
- for (name, title, author, cover, vc_fn) in models:
- with gr.TabItem(name):
- with gr.Row():
- gr.Markdown(
- ''
- f'{title}\n'+
- (f'Model author: {author}' if author else "")+
- (f'
' if cover else "")+
- ''
- )
- with gr.Row():
- with gr.Column():
- if args.files:
- vc_input = gr.Textbox(label="Input audio path")
- else:
- vc_input = gr.Audio(label="Input audio"+' (less than 5 minutes 30 seconds)' if limitation else '')
- vc_transpose = gr.Number(label="Transpose", value=0)
- vc_f0method = gr.Radio(
- label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies",
- choices=["pm", "harvest"],
- value="pm",
- interactive=True,
- )
- vc_index_ratio = gr.Slider(
- minimum=0,
- maximum=1,
- label="Retrieval feature ratio",
- value=0.6,
- interactive=True,
- )
- tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
- tts_text = gr.Textbox(visible=False,label="TTS text (600 words limitation)" if limitation else "TTS text")
- tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
- vc_submit = gr.Button("Generate", variant="primary")
- with gr.Column():
- vc_output1 = gr.Textbox(label="Output Message")
- vc_output2 = gr.Audio(label="Output Audio")
- vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
- tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice])
- app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share)
\ No newline at end of file
diff --git a/spaces/merve/data-leak/public/third_party/simple-statistics.min.js b/spaces/merve/data-leak/public/third_party/simple-statistics.min.js
deleted file mode 100644
index 9191046b7dc959d771a904875817c2b9c26ff0e5..0000000000000000000000000000000000000000
--- a/spaces/merve/data-leak/public/third_party/simple-statistics.min.js
+++ /dev/null
@@ -1,3 +0,0 @@
-// https://github.com/simple-statistics/simple-statistics Copyright (c) 2014, Tom MacWright
-
-!function(t,r){"object"==typeof exports&&"undefined"!=typeof module?r(exports):"function"==typeof define&&define.amd?define(["exports"],r):r(t.ss={})}(this,function(t){"use strict";function r(t){if(0===t.length)return 0;for(var r,n=t[0],e=0,a=1;a=Math.abs(t[a])?e+=n-r+t[a]:e+=t[a]-r+n,n=r;return n+e}function g(t){if(0===t.length)throw new Error("mean requires at least one data point");return r(t)/t.length}function n(t,r){var n,e,a=g(t),o=0;if(2===r)for(e=0;er&&(r=t[n]);return r}function i(t,r){var n=t.length*r;if(0===t.length)throw new Error("quantile requires at least one data point.");if(r<0||1f&&p(t,n,e);sf;)l--}t[n]===f?p(t,n,l):p(t,++l,e),l<=r&&(n=l+1),r<=l&&(e=l-1)}}function p(t,r,n){var e=t[r];t[r]=t[n],t[n]=e}function s(t,r){var n=t.slice();if(Array.isArray(r)){!function(t,r){for(var n=[0],e=0;et[t.length-1])return 1;var n=function(t,r){var n=0,e=0,a=t.length;for(;e>>1]?a=n:e=-~n;return e}(t,r);if(t[n]!==r)return n/t.length;n++;var e=function(t,r){var n=0,e=0,a=t.length;for(;e=t[n=e+a>>>1]?e=-~n:a=n;return e}(t,r);if(e===n)return n/t.length;var a=e-n+1;return a*(e+n)/2/a/t.length}function m(t){var r=s(t,.75),n=s(t,.25);if("number"==typeof r&&"number"==typeof n)return r-n}function d(t){return+s(t,.5)}function b(t){for(var r=d(t),n=[],e=0;e=e[n][u]);--g)(s=x(h,u,o,i)+e[n-1][h-1])n&&(n=t[e]),t[e]t.length)throw new Error("cannot generate more classes than there are data values");var n=f(t);if(1===y(n))return[n];var e=S(r,n.length),a=S(r,n.length);!function(t,r,n){for(var e,a=r[0].length,o=t[Math.floor(a/2)],i=[],u=[],h=0;h=Math.abs(a)&&(c+=1);else if("greater"===n)for(h=0;h<=e;h++)o[h]>=a&&(c+=1);else for(h=0;h<=e;h++)o[h]<=a&&(c+=1);return c/e},t.bisect=function(t,r,n,e,a){if("function"!=typeof t)throw new TypeError("func must be a function");for(var o=0;o {
- var ttFnSel = d3.select('body').selectAppend('div.tooltip-footnote.tooltip-footnote-hidden')
-
- function index2superscipt(i){
- return (i + 1 + '')
- .split('')
- .map(num => '⁰¹²³⁴⁵⁶⁷⁸⁹'[num])
- .join('')
- }
-
- var footendSel = d3.selectAll('.footend')
- .each(function(d, i){
- var sel = d3.select(this)
- var ogHTML = sel.parent().html()
-
- sel
- .at({href: '#footstart-' + i, id: 'footend-' + i})
- .text(index2superscipt(i))
- .datum(ogHTML)
- })
-
- footendSel.parent().parent().selectAll('br').remove()
-
- var footstartSel = d3.selectAll('.footstart')
- .each(function(d, i){
- d3.select(this)
- .at({
- href: '#footend-' + i,
- })
- .text(index2superscipt(i))
- .datum(footendSel.data()[i])
- .parent().at({id: 'footstart-' + i})
- })
- .call(addLockedTooltip)
-
-
- function addLockedTooltip(sel){
- sel
- .on('mouseover', function(d, i){
- ttFnSel
- .classed('tooltip-footnote-hidden', 0)
- .html(d).select('.footend').remove()
-
- var [x, y] = d3.mouse(d3.select('html').node())
- var bb = ttFnSel.node().getBoundingClientRect(),
- left = d3.clamp(20, (x-bb.width/2), window.innerWidth - bb.width - 20),
- top = innerHeight + scrollY > y + 20 + bb.height ? y + 20 : y - bb.height - 10;
-
- ttFnSel.st({left, top})
- })
- .on('mousemove', mousemove)
- .on('mouseout', mouseout)
-
- ttFnSel
- .on('mousemove', mousemove)
- .on('mouseout', mouseout)
-
- function mousemove(){
- if (window.__ttfade) window.__ttfade.stop()
- }
-
- function mouseout(){
- if (window.__ttfade) window.__ttfade.stop()
- window.__ttfade = d3.timeout(
- () => ttFnSel.classed('tooltip-footnote-hidden', 1),
- 250
- )
- }
- }
-
-})()
-
-
diff --git a/spaces/merve/measuring-fairness/public/base-rate/sliders.js b/spaces/merve/measuring-fairness/public/base-rate/sliders.js
deleted file mode 100644
index 994c9ba490dc44dfa015553d32ff24e822f16de0..0000000000000000000000000000000000000000
--- a/spaces/merve/measuring-fairness/public/base-rate/sliders.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-
-
-
-
-var sliderVals = {}
-
-var sliders = [
- {
- key: 'fNoiseMag',
- text: 'Feature Noise',
- r: [0, 1],
- v: .5
- },
- {
- key: 'fBiasMag',
- text: 'Feature Bias',
- r: [0, 1],
- v: .2
- },
-]
-
-!(function(){
- var width = 145
- var height = 30
-
- sliders.forEach(d => {
- d.s = d3.scaleLinear().domain(d.r).range([0, width])
- sliderVals[d.key] = d
- })
-
- var sliderSel = d3.select('.slider').html('')
- .appendMany('div', sliders)
- .at({class: d => d.key})
- .st({
- display: 'inline-block',
- width: width,
- paddingRight: 60,
- marginTop: 20,
- color: '#000'
- })
-
- sliderSel.append('div')
- .text(d => d.text)
- .st({marginBottom: height/2})
-
- var svgSel = sliderSel.append('svg').at({width, height})
- .on('click', function(d){
- d.v = d.s.invert(d3.mouse(this)[0])
- updatePos()
- })
- .st({
- cursor: 'pointer'
- })
- .append('g').translate(height/2, 1)
- svgSel.append('rect').at({width, height, y: -height/2, fill: '#fff'})
-
- svgSel.append('path').at({
- d: `M 0 0 H ${width}`,
- stroke: '#000',
- strokeWidth: 2
- })
-
- var drag = d3.drag()
- .on('drag', function(d){
- var x = d3.mouse(this)[0]
- d.v = d3.clamp(d3.min(d.r), d.s.invert(x), d3.max(d.r))
-
- updatePos()
- })
-
- var circleSel = svgSel.append('circle')
- .at({
- r: height/2,
- stroke: '#000',
- strokeWidth: 2,
- fill: '#fff',
- })
- .call(drag)
-
-
- function updatePos(){
- circleSel.at({cx: d => d.s(d.v)})
- if (sliderVals.onUpdate) sliderVals.onUpdate()
- }
-
- updatePos()
- sliderVals.updatePos = updatePos
-})()
diff --git a/spaces/merve/uncertainty-calibration/server-side/fill-in-the-blank/node/split-post-cache.js b/spaces/merve/uncertainty-calibration/server-side/fill-in-the-blank/node/split-post-cache.js
deleted file mode 100644
index 5ffee3bb3d706d2eb01fefb71d3e5d7ae997bd53..0000000000000000000000000000000000000000
--- a/spaces/merve/uncertainty-calibration/server-side/fill-in-the-blank/node/split-post-cache.js
+++ /dev/null
@@ -1,23 +0,0 @@
-import urlSlug from 'url-slug'
-
-import ss from 'scrape-stl'
-var {d3, jp, fs, io, _} = ss
-
-import { URL } from 'url'
-var __dirname = new URL('.', import.meta.url).pathname
-
-
-var datadir = __dirname + '/../../../source/fill-in-the-blank/data/'
-var postCache = io.readDataSync(datadir + 'post-cache.json')
-
-var cacheKey2filename = {}
-Object.entries(postCache).forEach(([key, value]) => {
- var filename = urlSlug(key) + '.json'
- io.writeDataSync(datadir + filename, value)
- cacheKey2filename[key] = filename
-})
-
-fs.writeFileSync(
- datadir + 'cachekey2filename.js',
- `window.cacheKey2filename = ${JSON.stringify(cacheKey2filename, null, 2)}`
-)
diff --git a/spaces/merve/uncertainty-calibration/source/_posts/2022-01-28-dataset-worldviews.md b/spaces/merve/uncertainty-calibration/source/_posts/2022-01-28-dataset-worldviews.md
deleted file mode 100644
index 67698648fc4d268a46f0b7f91c3c954b8508eb92..0000000000000000000000000000000000000000
--- a/spaces/merve/uncertainty-calibration/source/_posts/2022-01-28-dataset-worldviews.md
+++ /dev/null
@@ -1,194 +0,0 @@
----
-permalink: /dataset-worldviews/
-template: post.html
-
-title: Datasets Have Worldviews
-summary: Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too.
-summaryalt: Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too.
-shareimg: https://pair.withgoogle.com/explorables/images/dataset-worldviews-shareimg.png
-date: 2022-01-28
----
-
-
-Suppose you have a dataset of shapes. They can either be shaded or unshaded. They look something like this:
-
-
-
- You built a supervised machine learning classifier that will automatically classify each shape as shaded or unshaded. You call it the "Is-Shaded Classifier".
-
-Click "Run Classifier" to see how your model performs.
-
-
-
-
-
-It’s not perfect— some of the shapes are definitely misclassified. You want to improve your model!
-
- To do so, you want to know more about the kinds of mistakes your model is making.
-
-Thinking About Bias
-
-In training, you only gave your model the raw image of each shape and one ground truth label: shaded and unshaded. But maybe something about your model—the distribution of the training data you used, the architecture you chose, or how you set your hyperparameters—resulted in your model performing better on some shapes than others.
-
- In fact, you’ve seen a lot of papers and articles citing issues of biased model performance between circles, triangles, and rectangles in shape data. One paper finds that shape detection algorithms tend to do worse on triangles; another article says color accuracy is an issue with circles. So you wonder: are there biases in your model’s misclassifications?
-
-
-
- You want to make sure that your model is performing equally well across circles, triangles, and rectangles, so you decide to do a fairness analysis.
-
- There’s just one issue: you don’t have labels for which of your shapes are circles, triangles, or rectangles.
-
- So, you decide to send your data to data labelers.
-
-
-
- You receive feedback from your data labeling team that they’re not sure what to do with the shapes that aren’t exactly circles, triangles, or rectangles.
-
-
-
- For the shapes that are unclear, you can have them use their best guess or simply label them as “other”. Then, you can finally do some fairness analysis!
-
- Below is the interface they see:
-
-
-
- These shapes should be labeled...
-
-
-
-
-
-
-If you go back and change the labelers' instructions, which shapes do you perform worst on? Where do you find bias?
-
-You notice that your results hinge on how you choose to classify the shapes in our data.
-
-Because ultimately, this isn’t a world of only circles, triangles, and rectangles!
-
-Thinking About Classification
-
-What could we find out about our classifier's performance if we used different categories altogether?
-
- All shapes are basically...
- Everything else should be labeled...
-
-
-
-
-
-
-With each of the different categories, which shapes do you perform worst on? Where do you find bias?
-
-Each way of categorizing your shapes takes a different stance about what’s important . Each one makes some features more important than others, it make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.
-
-And each one tells you something different about what kind of bias your classifier has!
-
-Grouping and Regrouping
-
- Here's another way to look at the same results. We can draw all the shapes that were correctly classified above the dashed line, and all the incorrectly classified shapes below it.
-
-
-
- We're still looking at the same model making the same classification on the same shapes, so the same shapes stay above and below the line. But each way of grouping the results distributes the errors differently— each way tells you something different.
-
-Labels Tell Stories
-
-The decisions you make about classification, however small…
-
- All shapes are basically...
-
-…begin to shape others’ decisions…
-
-
-
-…they shape the analysis you can do…
-
-
-
-…and they shape the kinds of conversations that happen.
-
-
-
-It’s natural to want to find a way out of this problem by gathering more features or collecting more data. If we just have enough detail on enough data, surely we can avoid making these kinds of decisions, right?
-
-Unfortunately, that isn’t the case. Describing the world around us in any way—whether we’re telling a friend a story or telling a computer about shapes—requires us to choose what information is important to convey and what tools we want to use to convey it.
-
-Whether we think about it or not, we’re always making choices about classification.
-
-
-All people are basically... men or women
-All food is basically... sweet or savory
-All content is basically... kid-friendly or adult
-All speech is basically... hate speech or acceptable speech
-
- All results are basically... significant or insignificant
-
-And as we saw with shapes, all of these choices make some features more important than others, make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.
-
-In Practice
-
-Let’s take a closer look at how this plays out in real machine learning applications. One straightforward example is in supervised object detection tasks.
-
-
-For example, let’s imagine we want to train an object detection model on a dataset including this image:
-
-
Source: Wikimedia Commons
-
-We could give it the following ground truth bounding boxes:
-
-
-
-This looks objective, right? After all, a building is a building, a bush is a bush, and a mountain is a mountain!
- But even labeling the same regions in the same image, you can communicate a very different perspective:
-
-
-
-Or consider the image below, with several sets of “ground truth” labels. Looking at each of these labels, consider:
-
-What features matter? What gets labeled? Whose worldview comes through? What might you learn from this set of labels that you wouldn't learn from another?
-
-Source: Wikimedia Commons
-
-There is no “view from nowhere”, no universal way to organize every object, or word, or image. Datasets are always products of a particular time, place, and set of conditions; they are socially situated artifacts. They have histories; they have politics. And ignoring this fact has very real consequences.
-
-So what do we do with this information?
-
-A great place to start is to reflect on your own context and get curious about your data.
-
-If it’s hard to see a dataset’s values—if it feels “objective”, “universal”, or “neutral”—it may simply be reflecting a worldview you’re accustomed to. So, understanding the limitations of your own worldview can tell you about the limitations of “objective” data. What assumptions do you make about the world? What feels like common sense? What feels foreign?
-
-And do some sleuthing about your data! Who collected this data? Why was it collected? Who paid for it? Where did the “ground truth” come from?
-
-You might even find yourself questioning what kinds of assumptions underpin machine learning dataset development or even thinking more deeply about classification as a whole.
-
-If you find yourself with lots of questions, you're already off to a good start.
-
-
-
-
-Credits
-
- Dylan Baker // January 2022
- Thanks to Adam Pearce, Alex Hanna, Emily Denton, Fernanda Viégas, Kevin Robinson, Nithum Thain, Razvan Amironesei, and Vinodkumar Prabhakaran for their help with this piece.
-
-
-
-
-
-
-More Explorables
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/mmecheri/Rakuten_Streamlit/dataset_description.py b/spaces/mmecheri/Rakuten_Streamlit/dataset_description.py
deleted file mode 100644
index 873903d5a31d96761cdcdb4fb0a8f70a4a50c13a..0000000000000000000000000000000000000000
--- a/spaces/mmecheri/Rakuten_Streamlit/dataset_description.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import streamlit as st
-import pandas as pd
-import numpy as np
-import data_exp_Viz, data_preprocessing
-from multiapp import MultiApp
-from PIL import Image
-
-def app():
-
- st.subheader('Description des données')
-
- read_page_text(text_page ='./page_descriptions/data_description_txt.md')
-
- load_samples()
-#
-
-def read_page_text(text_page):
- '''The text page. Read from .md file '''
- with open(text_page, 'r', encoding='utf-8') as txtpage:
- txtpage = txtpage.read().split('------')
- st.markdown(txtpage[0], unsafe_allow_html=True)
-
-
-def load_samples():
-
- col1, col2, col3 = st.columns([2.5,1,0.75])
-
- with col1:
- st.markdown('Extract du dataset d\'entrainement(X_train):', unsafe_allow_html=True)
- df = pd.read_pickle('./demo_Inputs/data/Extact_Xtain.pkl')
- df.index.name = 'Id'
- st.dataframe(df)
- agree1 = st.checkbox('Afficher la description des colonnes')
- if agree1:
- with open('./page_descriptions/data_col_description.md', 'r', encoding='utf-8') as subpage1:
- subpage1 = subpage1.read().split('------')
- st.markdown(subpage1[0], unsafe_allow_html=True)
-
- with col2:
- st.markdown('Les classes associées «**prdtypecode**»(y_train)', unsafe_allow_html=True)
- df = pd.read_pickle('./demo_Inputs/data/Extact_Ytain.pkl')
- st.dataframe(df)
-
-
- with col3:
- with open('./page_descriptions/data_img_description.md', 'r', encoding='utf-8') as subpage2:
- subpage2 = subpage2.read().split('------')
- st.markdown(subpage2[0], unsafe_allow_html=True)
-
- image = Image.open('./doc/exmpl_data_with_image.png')
- st.image(image,output_format="auto")
-
diff --git a/spaces/mmlab-ntu/relate-anything-model/ram_train_eval.py b/spaces/mmlab-ntu/relate-anything-model/ram_train_eval.py
deleted file mode 100644
index 8be894a4fbbf646b158147d262aa125cc1791c0b..0000000000000000000000000000000000000000
--- a/spaces/mmlab-ntu/relate-anything-model/ram_train_eval.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import os
-import time
-from datetime import timedelta
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from mmengine.config import Config
-from mmengine.utils import ProgressBar
-from transformers import AutoConfig, AutoModel
-
-
-class RamDataset(torch.utils.data.Dataset):
- def __init__(self, data_path, is_train=True, num_relation_classes=56):
- super().__init__()
- self.num_relation_classes = num_relation_classes
- data = np.load(data_path, allow_pickle=True)
- self.samples = data["arr_0"]
- sample_num = self.samples.size
- self.sample_idx_list = []
- for idx in range(sample_num):
- if self.samples[idx]["is_train"] == is_train:
- self.sample_idx_list.append(idx)
-
- def __getitem__(self, idx):
- sample = self.samples[self.sample_idx_list[idx]]
- object_num = sample["feat"].shape[0]
- embedding = torch.from_numpy(sample["feat"])
- gt_rels = sample["relations"]
- rel_target = self._get_target(object_num, gt_rels)
- return embedding, rel_target, gt_rels
-
- def __len__(self):
- return len(self.sample_idx_list)
-
- def _get_target(self, object_num, gt_rels):
- rel_target = torch.zeros([self.num_relation_classes, object_num, object_num])
- for ii, jj, cls_relationship in gt_rels:
- rel_target[cls_relationship, ii, jj] = 1
- return rel_target
-
-
-class RamModel(nn.Module):
- def __init__(
- self,
- pretrained_model_name_or_path,
- load_pretrained_weights=True,
- num_transformer_layer=2,
- input_feature_size=256,
- output_feature_size=768,
- cls_feature_size=512,
- num_relation_classes=56,
- pred_type="attention",
- loss_type="bce",
- ):
- super().__init__()
- # 0. config
- self.cls_feature_size = cls_feature_size
- self.num_relation_classes = num_relation_classes
- self.pred_type = pred_type
- self.loss_type = loss_type
-
- # 1. fc input and output
- self.fc_input = nn.Sequential(
- nn.Linear(input_feature_size, output_feature_size),
- nn.LayerNorm(output_feature_size),
- )
- self.fc_output = nn.Sequential(
- nn.Linear(output_feature_size, output_feature_size),
- nn.LayerNorm(output_feature_size),
- )
- # 2. transformer model
- if load_pretrained_weights:
- self.model = AutoModel.from_pretrained(pretrained_model_name_or_path)
- else:
- config = AutoConfig.from_pretrained(pretrained_model_name_or_path)
- self.model = AutoModel.from_config(config)
- if num_transformer_layer != "all" and isinstance(num_transformer_layer, int):
- self.model.encoder.layer = self.model.encoder.layer[:num_transformer_layer]
- # 3. predict head
- self.cls_sub = nn.Linear(output_feature_size, cls_feature_size * num_relation_classes)
- self.cls_obj = nn.Linear(output_feature_size, cls_feature_size * num_relation_classes)
- # 4. loss
- if self.loss_type == "bce":
- self.bce_loss = nn.BCEWithLogitsLoss()
- elif self.loss_type == "multi_label_ce":
- print("Use Multi Label Cross Entropy Loss.")
-
- def forward(self, embeds, attention_mask=None):
- """
- embeds: (batch_size, token_num, feature_size)
- attention_mask: (batch_size, token_num)
- """
- # 1. fc input
- embeds = self.fc_input(embeds)
- # 2. transformer model
- position_ids = torch.ones([1, embeds.shape[1]]).to(embeds.device).to(torch.long)
- outputs = self.model.forward(inputs_embeds=embeds, attention_mask=attention_mask, position_ids=position_ids)
- embeds = outputs["last_hidden_state"]
- # 3. fc output
- embeds = self.fc_output(embeds)
- # 4. predict head
- batch_size, token_num, feature_size = embeds.shape
- sub_embeds = self.cls_sub(embeds).reshape([batch_size, token_num, self.num_relation_classes, self.cls_feature_size]).permute([0, 2, 1, 3])
- obj_embeds = self.cls_obj(embeds).reshape([batch_size, token_num, self.num_relation_classes, self.cls_feature_size]).permute([0, 2, 1, 3])
- if self.pred_type == "attention":
- cls_pred = sub_embeds @ torch.transpose(obj_embeds, 2, 3) / self.cls_feature_size**0.5 # noqa
- elif self.pred_type == "einsum":
- cls_pred = torch.einsum("nrsc,nroc->nrso", sub_embeds, obj_embeds)
- return cls_pred
-
- def loss(self, pred, target, attention_mask):
- loss_dict = dict()
- batch_size, relation_num, _, _ = pred.shape
-
- mask = torch.zeros_like(pred).to(pred.device)
- for idx in range(batch_size):
- n = torch.sum(attention_mask[idx]).to(torch.int)
- mask[idx, :, :n, :n] = 1
- pred = pred * mask - 9999 * (1 - mask)
-
- if self.loss_type == "bce":
- loss = self.bce_loss(pred, target)
- elif self.loss_type == "multi_label_ce":
- input_tensor = torch.permute(pred, (1, 0, 2, 3))
- target_tensor = torch.permute(target, (1, 0, 2, 3))
- input_tensor = pred.reshape([relation_num, -1])
- target_tensor = target.reshape([relation_num, -1])
- loss = self.multilabel_categorical_crossentropy(target_tensor, input_tensor)
- weight = loss / loss.max()
- loss = loss * weight
- loss = loss.mean()
- loss_dict["loss"] = loss
-
- # running metric
- recall_20 = get_recall_N(pred, target, object_num=20)
- loss_dict["recall@20"] = recall_20
- return loss_dict
-
- def multilabel_categorical_crossentropy(self, y_true, y_pred):
- """
- https://kexue.fm/archives/7359
- """
- y_pred = (1 - 2 * y_true) * y_pred
- y_pred_neg = y_pred - y_true * 9999
- y_pred_pos = y_pred - (1 - y_true) * 9999
- zeros = torch.zeros_like(y_pred[..., :1])
- y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1)
- y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1)
- neg_loss = torch.logsumexp(y_pred_neg, dim=-1)
- pos_loss = torch.logsumexp(y_pred_pos, dim=-1)
- return neg_loss + pos_loss
-
-
-def get_recall_N(y_pred, y_true, object_num=20):
- """
- y_pred: [batch_size, 56, object_num, object_num]
- y_true: [batch_size, 56, object_num, object_num]
- """
-
- device = y_pred.device
- recall_list = []
-
- for idx in range(len(y_true)):
- sample_y_true = []
- sample_y_pred = []
-
- # find topk
- _, topk_indices = torch.topk(
- y_true[idx : idx + 1].reshape(
- [
- -1,
- ]
- ),
- k=object_num,
- )
- for index in topk_indices:
- pred_cls = index // (y_true.shape[2] ** 2)
- index_subject_object = index % (y_true.shape[2] ** 2)
- pred_subject = index_subject_object // y_true.shape[2]
- pred_object = index_subject_object % y_true.shape[2]
- if y_true[idx, pred_cls, pred_subject, pred_object] == 0:
- continue
- sample_y_true.append([pred_subject, pred_object, pred_cls])
-
- # find topk
- _, topk_indices = torch.topk(
- y_pred[idx : idx + 1].reshape(
- [
- -1,
- ]
- ),
- k=object_num,
- )
- for index in topk_indices:
- pred_cls = index // (y_pred.shape[2] ** 2)
- index_subject_object = index % (y_pred.shape[2] ** 2)
- pred_subject = index_subject_object // y_pred.shape[2]
- pred_object = index_subject_object % y_pred.shape[2]
- sample_y_pred.append([pred_subject, pred_object, pred_cls])
-
- recall = len([x for x in sample_y_pred if x in sample_y_true]) / (len(sample_y_true) + 1e-8)
- recall_list.append(recall)
-
- recall = torch.tensor(recall_list).to(device).mean() * 100
- return recall
-
-
-class RamTrainer(object):
- def __init__(self, config):
- self.config = config
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- self._build_dataset()
- self._build_dataloader()
- self._build_model()
- self._build_optimizer()
- self._build_lr_scheduler()
-
- def _build_dataset(self):
- self.dataset = RamDataset(**self.config.dataset)
-
- def _build_dataloader(self):
- self.dataloader = torch.utils.data.DataLoader(
- self.dataset,
- batch_size=self.config.dataloader.batch_size,
- shuffle=True if self.config.dataset.is_train else False,
- )
-
- def _build_model(self):
- self.model = RamModel(**self.config.model).to(self.device)
- if self.config.load_from is not None:
- self.model.load_state_dict(torch.load(self.config.load_from))
- self.model.train()
-
- def _build_optimizer(self):
- self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay, eps=self.config.optim.eps, betas=self.config.optim.betas)
-
- def _build_lr_scheduler(self):
- self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.config.optim.lr_scheduler.step, gamma=self.config.optim.lr_scheduler.gamma)
-
- def train(self):
- t_start = time.time()
- running_avg_loss = 0
- for epoch_idx in range(self.config.num_epoch):
- for batch_idx, batch_data in enumerate(self.dataloader):
- batch_embeds = batch_data[0].to(torch.float32).to(self.device)
- batch_target = batch_data[1].to(torch.float32).to(self.device)
- attention_mask = batch_embeds.new_ones((batch_embeds.shape[0], batch_embeds.shape[1]))
- batch_pred = self.model.forward(batch_embeds, attention_mask)
- loss_dict = self.model.loss(batch_pred, batch_target, attention_mask)
- loss = loss_dict["loss"]
- recall_20 = loss_dict["recall@20"]
- self.optimizer.zero_grad()
- loss.backward()
- torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.optim.max_norm, self.config.optim.norm_type)
- self.optimizer.step()
- running_avg_loss += loss.item()
-
- if batch_idx % 100 == 0:
- t_current = time.time()
- num_finished_step = epoch_idx * self.config.num_epoch * len(self.dataloader) + batch_idx + 1
- num_to_do_step = (self.config.num_epoch - epoch_idx - 1) * len(self.dataloader) + (len(self.dataloader) - batch_idx - 1)
- avg_speed = num_finished_step / (t_current - t_start)
- eta = num_to_do_step / avg_speed
- print(
- "ETA={:0>8}, Epoch={}, Batch={}/{}, LR={}, Loss={:.4f}, RunningAvgLoss={:.4f}, Recall@20={:.2f}%".format(
- str(timedelta(seconds=int(eta))), epoch_idx + 1, batch_idx, len(self.dataloader), self.lr_scheduler.get_last_lr()[0], loss.item(), running_avg_loss / num_finished_step, recall_20.item()
- )
- )
- self.lr_scheduler.step()
- if not os.path.exists(self.config.output_dir):
- os.makedirs(self.config.output_dir)
- save_path = os.path.join(self.config.output_dir, "epoch_{}.pth".format(epoch_idx + 1))
- print("Save epoch={} checkpoint to {}".format(epoch_idx + 1, save_path))
- torch.save(self.model.state_dict(), save_path)
- return save_path
-
-
-class RamPredictor(object):
- def __init__(self, config):
- self.config = config
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- self._build_dataset()
- self._build_dataloader()
- self._build_model()
-
- def _build_dataset(self):
- self.dataset = RamDataset(**self.config.dataset)
-
- def _build_dataloader(self):
- self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=self.config.dataloader.batch_size, shuffle=False)
-
- def _build_model(self):
- self.model = RamModel(**self.config.model).to(self.device)
- if self.config.load_from is not None:
- self.model.load_state_dict(torch.load(self.config.load_from))
- self.model.eval()
-
- def predict(self, batch_embeds, pred_keep_num=100):
- """
- Parameters
- ----------
- batch_embeds: (batch_size=1, token_num, feature_size)
- pred_keep_num: int
- Returns
- -------
- batch_pred: (batch_size, relation_num, object_num, object_num)
- pred_rels: [[sub_id, obj_id, rel_id], ...]
- """
- if not isinstance(batch_embeds, torch.Tensor):
- batch_embeds = torch.asarray(batch_embeds)
- batch_embeds = batch_embeds.to(torch.float32).to(self.device)
- attention_mask = batch_embeds.new_ones((batch_embeds.shape[0], batch_embeds.shape[1]))
- batch_pred = self.model.forward(batch_embeds, attention_mask)
- for idx_i in range(batch_pred.shape[2]):
- batch_pred[:, :, idx_i, idx_i] = -9999
- batch_pred = batch_pred.sigmoid()
-
- pred_rels = []
- _, topk_indices = torch.topk(
- batch_pred.reshape(
- [
- -1,
- ]
- ),
- k=pred_keep_num,
- )
-
- # subject, object, relation
- for index in topk_indices:
- pred_relation = index // (batch_pred.shape[2] ** 2)
- index_subject_object = index % (batch_pred.shape[2] ** 2)
- pred_subject = index_subject_object // batch_pred.shape[2]
- pred_object = index_subject_object % batch_pred.shape[2]
- pred = [pred_subject.item(), pred_object.item(), pred_relation.item()]
- pred_rels.append(pred)
- return batch_pred, pred_rels
-
- def eval(self):
- sum_recall_20 = 0.0
- sum_recall_50 = 0.0
- sum_recall_100 = 0.0
- prog_bar = ProgressBar(len(self.dataloader))
- for batch_idx, batch_data in enumerate(self.dataloader):
- batch_embeds = batch_data[0]
- batch_target = batch_data[1]
- gt_rels = batch_data[2]
- batch_pred, pred_rels = self.predict(batch_embeds)
- this_recall_20 = get_recall_N(batch_pred, batch_target, object_num=20)
- this_recall_50 = get_recall_N(batch_pred, batch_target, object_num=50)
- this_recall_100 = get_recall_N(batch_pred, batch_target, object_num=100)
- sum_recall_20 += this_recall_20.item()
- sum_recall_50 += this_recall_50.item()
- sum_recall_100 += this_recall_100.item()
- prog_bar.update()
- recall_20 = sum_recall_20 / len(self.dataloader)
- recall_50 = sum_recall_50 / len(self.dataloader)
- recall_100 = sum_recall_100 / len(self.dataloader)
- metric = {
- "recall_20": recall_20,
- "recall_50": recall_50,
- "recall_100": recall_100,
- }
- return metric
-
-
-if __name__ == "__main__":
- # Config
- config = dict(
- dataset=dict(
- data_path="./data/feat_0420.npz",
- is_train=True,
- num_relation_classes=56,
- ),
- dataloader=dict(
- batch_size=4,
- ),
- model=dict(
- pretrained_model_name_or_path="bert-base-uncased",
- load_pretrained_weights=True,
- num_transformer_layer=2,
- input_feature_size=256,
- output_feature_size=768,
- cls_feature_size=512,
- num_relation_classes=56,
- pred_type="attention",
- loss_type="multi_label_ce",
- ),
- optim=dict(
- lr=1e-4,
- weight_decay=0.05,
- eps=1e-8,
- betas=(0.9, 0.999),
- max_norm=0.01,
- norm_type=2,
- lr_scheduler=dict(
- step=[6, 10],
- gamma=0.1,
- ),
- ),
- num_epoch=12,
- output_dir="./work_dirs",
- load_from=None,
- )
-
- # Train
- config = Config(config)
- trainer = RamTrainer(config)
- last_model_path = trainer.train()
-
- # Test/Eval
- config.dataset.is_train = False
- config.load_from = last_model_path
- predictor = RamPredictor(config)
- metric = predictor.eval()
- print(metric)
diff --git a/spaces/mrneuralnet/P-DFD/dataset/__init__.py b/spaces/mrneuralnet/P-DFD/dataset/__init__.py
deleted file mode 100644
index 27584c89c9379621bffb513aafafe2cc1bd41b8c..0000000000000000000000000000000000000000
--- a/spaces/mrneuralnet/P-DFD/dataset/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from .abstract_dataset import AbstractDataset
-from .faceforensics import FaceForensics
-from .wild_deepfake import WildDeepfake
-from .celeb_df import CelebDF
-from .dfdc import DFDC
-
-LOADERS = {
- "FaceForensics": FaceForensics,
- "WildDeepfake": WildDeepfake,
- "CelebDF": CelebDF,
- "DFDC": DFDC,
-}
-
-
-def load_dataset(name="FaceForensics"):
- print(f"Loading dataset: '{name}'...")
- return LOADERS[name]
diff --git a/spaces/mrneuralnet/P-DFD/layers/functions/prior_box.py b/spaces/mrneuralnet/P-DFD/layers/functions/prior_box.py
deleted file mode 100644
index 80c7f858371ed71f39ed609eb44b423d8693bf61..0000000000000000000000000000000000000000
--- a/spaces/mrneuralnet/P-DFD/layers/functions/prior_box.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import torch
-from itertools import product as product
-import numpy as np
-from math import ceil
-
-
-class PriorBox(object):
- def __init__(self, cfg, image_size=None, phase='train'):
- super(PriorBox, self).__init__()
- self.min_sizes = cfg['min_sizes']
- self.steps = cfg['steps']
- self.clip = cfg['clip']
- self.image_size = image_size
- self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
- self.name = "s"
-
- def forward(self):
- anchors = []
- for k, f in enumerate(self.feature_maps):
- min_sizes = self.min_sizes[k]
- for i, j in product(range(f[0]), range(f[1])):
- for min_size in min_sizes:
- s_kx = min_size / self.image_size[1]
- s_ky = min_size / self.image_size[0]
- dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
- dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
- for cy, cx in product(dense_cy, dense_cx):
- anchors += [cx, cy, s_kx, s_ky]
-
- # back to torch land
- output = torch.Tensor(anchors).view(-1, 4)
- if self.clip:
- output.clamp_(max=1, min=0)
- return output
diff --git a/spaces/mrneuralnet/P-PD/networks/drn_seg.py b/spaces/mrneuralnet/P-PD/networks/drn_seg.py
deleted file mode 100644
index 084a39bc0ee42a533d6151508ec93fc3680753fd..0000000000000000000000000000000000000000
--- a/spaces/mrneuralnet/P-PD/networks/drn_seg.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-from networks.drn import drn_c_26
-
-
-def fill_up_weights(up):
- w = up.weight.data
- f = math.ceil(w.size(2) / 2)
- c = (2 * f - 1 - f % 2) / (2. * f)
- for i in range(w.size(2)):
- for j in range(w.size(3)):
- w[0, 0, i, j] = \
- (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
- for c in range(1, w.size(0)):
- w[c, 0, :, :] = w[0, 0, :, :]
-
-
-class DRNSeg(nn.Module):
- def __init__(self, classes, pretrained_drn=False,
- pretrained_model=None, use_torch_up=False):
- super(DRNSeg, self).__init__()
-
- model = drn_c_26(pretrained=pretrained_drn)
- self.base = nn.Sequential(*list(model.children())[:-2])
- if pretrained_model:
- self.load_pretrained(pretrained_model)
-
- self.seg = nn.Conv2d(model.out_dim, classes,
- kernel_size=1, bias=True)
-
- m = self.seg
- n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- m.weight.data.normal_(0, math.sqrt(2. / n))
- m.bias.data.zero_()
- if use_torch_up:
- self.up = nn.UpsamplingBilinear2d(scale_factor=8)
- else:
- up = nn.ConvTranspose2d(classes, classes, 16, stride=8, padding=4,
- output_padding=0, groups=classes,
- bias=False)
- fill_up_weights(up)
- up.weight.requires_grad = False
- self.up = up
-
- def forward(self, x):
- x = self.base(x)
- x = self.seg(x)
- y = self.up(x)
- return y
-
- def optim_parameters(self, memo=None):
- for param in self.base.parameters():
- yield param
- for param in self.seg.parameters():
- yield param
-
- def load_pretrained(self, pretrained_model):
- print("loading the pretrained drn model from %s" % pretrained_model)
- state_dict = torch.load(pretrained_model, map_location='cpu')
- if hasattr(state_dict, '_metadata'):
- del state_dict._metadata
-
- # filter out unnecessary keys
- pretrained_dict = state_dict['model']
- pretrained_dict = {k[5:]: v for k, v in pretrained_dict.items() if k.split('.')[0] == 'base'}
-
- # load the pretrained state dict
- self.base.load_state_dict(pretrained_dict)
-
-
-class DRNSub(nn.Module):
- def __init__(self, num_classes, pretrained_model=None, fix_base=False):
- super(DRNSub, self).__init__()
-
- drnseg = DRNSeg(2)
- if pretrained_model:
- print("loading the pretrained drn model from %s" % pretrained_model)
- state_dict = torch.load(pretrained_model, map_location='cpu')
- drnseg.load_state_dict(state_dict['model'])
-
- self.base = drnseg.base
- if fix_base:
- for param in self.base.parameters():
- param.requires_grad = False
-
- self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
- self.fc = nn.Linear(512, num_classes)
-
- def forward(self, x):
- x = self.base(x)
- x = self.avgpool(x)
- x = x.view(x.size(0), -1)
- x = self.fc(x)
- return x
diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py b/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py
deleted file mode 100644
index 5aaddf6421ab7fa417af508005671a0ed821c701..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import gc
-import os
-import random
-import shutil
-import numpy as np
-
-import torch
-import tqdm
-from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import (
- CpcFeatureReader,
-)
-from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import (
- HubertFeatureReader,
-)
-from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import (
- LogMelFeatureReader,
-)
-from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import (
- Wav2VecFeatureReader,
-)
-
-
-def get_feature_reader(feature_type):
- if feature_type == "logmel":
- return LogMelFeatureReader
- elif feature_type == "hubert":
- return HubertFeatureReader
- elif feature_type == "w2v2":
- return Wav2VecFeatureReader
- elif feature_type == "cpc":
- return CpcFeatureReader
- else:
- raise NotImplementedError(f"{feature_type} is not supported.")
-
-
-def get_feature_iterator(
- feature_type, checkpoint_path, layer, manifest_path, sample_pct
-):
- feature_reader_cls = get_feature_reader(feature_type)
- with open(manifest_path, "r") as fp:
- lines = fp.read().split("\n")
- root = lines.pop(0).strip()
- file_path_list = [
- os.path.join(root, line.split("\t")[0])
- for line in lines
- if len(line) > 0
- ]
- if sample_pct < 1.0:
- file_path_list = random.sample(
- file_path_list, int(sample_pct * len(file_path_list))
- )
- num_files = len(file_path_list)
- reader = feature_reader_cls(
- checkpoint_path=checkpoint_path, layer=layer
- )
-
- def iterate():
- for file_path in file_path_list:
- feats = reader.get_feats(file_path)
- yield feats.cpu().numpy()
-
- return iterate, num_files
-
-
-def get_features(
- feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten
-):
- generator, num_files = get_feature_iterator(
- feature_type=feature_type,
- checkpoint_path=checkpoint_path,
- layer=layer,
- manifest_path=manifest_path,
- sample_pct=sample_pct,
- )
- iterator = generator()
-
- features_list = []
- for features in tqdm.tqdm(iterator, total=num_files):
- features_list.append(features)
-
- # Explicit clean up
- del iterator
- del generator
- gc.collect()
- torch.cuda.empty_cache()
-
- if flatten:
- return np.concatenate(features_list)
-
- return features_list
-
-
-def get_and_dump_features(
- feature_type,
- checkpoint_path,
- layer,
- manifest_path,
- sample_pct,
- flatten,
- out_features_path,
-):
- # Feature extraction
- features_batch = get_features(
- feature_type=feature_type,
- checkpoint_path=checkpoint_path,
- layer=layer,
- manifest_path=manifest_path,
- sample_pct=sample_pct,
- flatten=flatten,
- )
-
- # Save features
- out_dir_path = os.path.dirname(out_features_path)
- os.makedirs(out_dir_path, exist_ok=True)
- shutil.copyfile(
- manifest_path,
- os.path.join(out_dir_path, os.path.basename(manifest_path)),
- )
- np.save(out_features_path, features_batch)
-
- return features_batch
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/scoring/__init__.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/scoring/__init__.py
deleted file mode 100644
index 58f2f563e493327394dff1265030d18f0814b5a2..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/scoring/__init__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import importlib
-import os
-from abc import ABC, abstractmethod
-
-from fairseq import registry
-from omegaconf import DictConfig
-
-
-class BaseScorer(ABC):
- def __init__(self, cfg):
- self.cfg = cfg
- self.ref = []
- self.pred = []
-
- def add_string(self, ref, pred):
- self.ref.append(ref)
- self.pred.append(pred)
-
- @abstractmethod
- def score(self) -> float:
- pass
-
- @abstractmethod
- def result_string(self) -> str:
- pass
-
-
-_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
- "--scoring", default="bleu"
-)
-
-
-def build_scorer(choice, tgt_dict):
- _choice = choice._name if isinstance(choice, DictConfig) else choice
-
- if _choice == "bleu":
- from fairseq.scoring import bleu
-
- return bleu.Scorer(
- bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
- )
- return _build_scorer(choice)
-
-
-# automatically import any Python files in the current directory
-for file in sorted(os.listdir(os.path.dirname(__file__))):
- if file.endswith(".py") and not file.startswith("_"):
- module = file[: file.find(".py")]
- importlib.import_module("fairseq.scoring." + module)
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/tasks/sentence_ranking.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/tasks/sentence_ranking.py
deleted file mode 100644
index bed44f34e5f8e506b6ae7ba30ddaa661bf4a7522..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/tasks/sentence_ranking.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-
-import numpy as np
-from fairseq import utils
-from fairseq.data import (
- ConcatSentencesDataset,
- Dictionary,
- IdDataset,
- NestedDictionaryDataset,
- NumelDataset,
- NumSamplesDataset,
- PrependTokenDataset,
- RawLabelDataset,
- RightPadDataset,
- SortDataset,
- TruncateDataset,
- data_utils,
-)
-from fairseq.data.shorten_dataset import maybe_shorten_dataset
-from fairseq.tasks import LegacyFairseqTask, register_task
-
-
-logger = logging.getLogger(__name__)
-
-
-@register_task("sentence_ranking")
-class SentenceRankingTask(LegacyFairseqTask):
- """
- Ranking task on multiple sentences.
-
- Args:
- dictionary (Dictionary): the dictionary for the input of the task
- """
-
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- parser.add_argument("data", metavar="FILE", help="file prefix for data")
- parser.add_argument(
- "--num-classes", type=int, help="number of sentences to be ranked"
- )
- parser.add_argument(
- "--init-token",
- type=int,
- help="add token at the beginning of each batch item",
- )
- parser.add_argument(
- "--separator-token", type=int, help="add separator token between inputs"
- )
- parser.add_argument("--no-shuffle", action="store_true")
- parser.add_argument(
- "--shorten-method",
- default="none",
- choices=["none", "truncate", "random_crop"],
- help="if not none, shorten sequences that exceed --tokens-per-sample",
- )
- parser.add_argument(
- "--shorten-data-split-list",
- default="",
- help="comma-separated list of dataset splits to apply shortening to, "
- 'e.g., "train,valid" (default: all dataset splits)',
- )
- parser.add_argument(
- "--max-option-length", type=int, help="max length for each option"
- )
-
- def __init__(self, args, dictionary):
- super().__init__(args)
- self.dictionary = dictionary
-
- @classmethod
- def load_dictionary(cls, args, filename, source=True):
- """Load the dictionary from the filename
-
- Args:
- filename (str): the filename
- """
- dictionary = Dictionary.load(filename)
- dictionary.add_symbol("")
- return dictionary
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- assert (
- args.criterion == "sentence_ranking"
- ), "Must set --criterion=sentence_ranking"
-
- # load data dictionary
- data_dict = cls.load_dictionary(
- args,
- os.path.join(args.data, "input0", "dict.txt"),
- source=True,
- )
- logger.info("[input] dictionary: {} types".format(len(data_dict)))
- return SentenceRankingTask(args, data_dict)
-
- def load_dataset(self, split, combine=False, **kwargs):
- """Load a given dataset split (e.g., train, valid, test)."""
-
- def get_path(type, split):
- return os.path.join(self.args.data, type, split)
-
- def make_dataset(type, dictionary):
- split_path = get_path(type, split)
-
- dataset = data_utils.load_indexed_dataset(
- split_path,
- self.source_dictionary,
- self.args.dataset_impl,
- combine=combine,
- )
- return dataset
-
- input0 = make_dataset("input0", self.source_dictionary)
- input_options = [
- make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary)
- for idx in range(self.args.num_classes)
- ]
-
- if self.args.separator_token is not None:
- input0 = PrependTokenDataset(input0, self.args.separator_token)
-
- src_tokens = []
- for input_option in input_options:
- if self.args.init_token is not None:
- input_option = PrependTokenDataset(input_option, self.args.init_token)
- if self.args.max_option_length is not None:
- input_option = TruncateDataset(
- input_option, self.args.max_option_length
- )
- src_token = ConcatSentencesDataset(input_option, input0)
- src_token = maybe_shorten_dataset(
- src_token,
- split,
- self.args.shorten_data_split_list,
- self.args.shorten_method,
- self.args.max_positions,
- self.args.seed,
- )
- src_tokens.append(src_token)
-
- with data_utils.numpy_seed(self.args.seed):
- shuffle = np.random.permutation(len(src_tokens[0]))
-
- dataset = {
- "id": IdDataset(),
- "nsentences": NumSamplesDataset(),
- "ntokens": NumelDataset(src_tokens[0], reduce=True),
- }
-
- for src_token_idx in range(len(src_tokens)):
- dataset.update(
- {
- "net_input{idx}".format(idx=src_token_idx + 1): {
- "src_tokens": RightPadDataset(
- src_tokens[src_token_idx],
- pad_idx=self.source_dictionary.pad(),
- ),
- "src_lengths": NumelDataset(
- src_tokens[src_token_idx], reduce=False
- ),
- }
- }
- )
-
- label_path = "{}.label".format(get_path("label", split))
- if os.path.exists(label_path):
- with open(label_path) as h:
- dataset.update(
- target=RawLabelDataset([int(x.strip()) for x in h.readlines()])
- )
-
- nested_dataset = NestedDictionaryDataset(
- dataset,
- sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
- )
-
- if self.args.no_shuffle:
- dataset = nested_dataset
- else:
- dataset = SortDataset(
- nested_dataset,
- # shuffle
- sort_order=[shuffle],
- )
-
- logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
-
- self.datasets[split] = dataset
- return self.datasets[split]
-
- def build_model(self, args):
- from fairseq import models
-
- model = models.build_model(args, self)
-
- model.register_classification_head(
- getattr(args, "ranking_head_name", "sentence_classification_head"),
- num_classes=1,
- )
-
- return model
-
- def max_positions(self):
- return self.args.max_positions
-
- @property
- def source_dictionary(self):
- return self.dictionary
-
- @property
- def target_dictionary(self):
- return self.dictionary
diff --git a/spaces/multimodalart/TAV-poli-2/app_inference.py b/spaces/multimodalart/TAV-poli-2/app_inference.py
deleted file mode 100644
index d705504e5bc7a8938e1b5fcfb207f4cb731c866b..0000000000000000000000000000000000000000
--- a/spaces/multimodalart/TAV-poli-2/app_inference.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import enum
-
-import gradio as gr
-from huggingface_hub import HfApi
-
-from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget
-from inference import InferencePipeline
-from utils import find_exp_dirs
-
-
-class ModelSource(enum.Enum):
- HUB_LIB = UploadTarget.MODEL_LIBRARY.value
- LOCAL = 'Local'
-
-
-class InferenceUtil:
- def __init__(self, hf_token: str | None):
- self.hf_token = hf_token
-
- def load_hub_model_list(self) -> dict:
- api = HfApi(token=self.hf_token)
- choices = [
- info.modelId
- for info in api.list_models(author=MODEL_LIBRARY_ORG_NAME)
- ]
- return gr.update(choices=choices,
- value=choices[0] if choices else None)
-
- @staticmethod
- def load_local_model_list() -> dict:
- choices = find_exp_dirs()
- return gr.update(choices=choices,
- value=choices[0] if choices else None)
-
- def reload_model_list(self, model_source: str) -> dict:
- if model_source == ModelSource.HUB_LIB.value:
- return self.load_hub_model_list()
- elif model_source == ModelSource.LOCAL.value:
- return self.load_local_model_list()
- else:
- raise ValueError
-
- def load_model_info(self, model_id: str) -> tuple[str, str]:
- try:
- card = InferencePipeline.get_model_card(model_id, self.hf_token)
- except Exception:
- return '', ''
- base_model = getattr(card.data, 'base_model', '')
- training_prompt = getattr(card.data, 'training_prompt', '')
- return base_model, training_prompt
-
- def reload_model_list_and_update_model_info(
- self, model_source: str) -> tuple[dict, str, str]:
- model_list_update = self.reload_model_list(model_source)
- model_list = model_list_update['choices']
- model_info = self.load_model_info(model_list[0] if model_list else '')
- return model_list_update, *model_info
-
-
-def create_inference_demo(pipe: InferencePipeline,
- hf_token: str | None = None) -> gr.Blocks:
- app = InferenceUtil(hf_token)
-
- with gr.Blocks() as demo:
- with gr.Row():
- with gr.Column():
- with gr.Box():
- model_source = gr.Radio(
- label='Model Source',
- choices=[_.value for _ in ModelSource],
- value=ModelSource.HUB_LIB.value)
- reload_button = gr.Button('Reload Model List')
- model_id = gr.Dropdown(label='Model ID',
- choices=None,
- value=None)
- with gr.Accordion(
- label=
- 'Model info (Base model and prompt used for training)',
- open=False):
- with gr.Row():
- base_model_used_for_training = gr.Text(
- label='Base model', interactive=False)
- prompt_used_for_training = gr.Text(
- label='Training prompt', interactive=False)
- prompt = gr.Textbox(
- label='Prompt',
- max_lines=1,
- placeholder='Example: "A panda is surfing"')
- video_length = gr.Slider(label='Video length',
- minimum=4,
- maximum=12,
- step=1,
- value=8)
- fps = gr.Slider(label='FPS',
- minimum=1,
- maximum=12,
- step=1,
- value=1)
- seed = gr.Slider(label='Seed',
- minimum=0,
- maximum=100000,
- step=1,
- value=0)
- with gr.Accordion('Other Parameters', open=False):
- num_steps = gr.Slider(label='Number of Steps',
- minimum=0,
- maximum=100,
- step=1,
- value=50)
- guidance_scale = gr.Slider(label='CFG Scale',
- minimum=0,
- maximum=50,
- step=0.1,
- value=7.5)
-
- run_button = gr.Button('Generate')
-
- gr.Markdown('''
- - After training, you can press "Reload Model List" button to load your trained model names.
- - It takes a few minutes to download model first.
- - Expected time to generate an 8-frame video: 70 seconds with T4, 24 seconds with A10G, (10 seconds with A100)
- ''')
- with gr.Column():
- result = gr.Video(label='Result')
-
- model_source.change(fn=app.reload_model_list_and_update_model_info,
- inputs=model_source,
- outputs=[
- model_id,
- base_model_used_for_training,
- prompt_used_for_training,
- ])
- reload_button.click(fn=app.reload_model_list_and_update_model_info,
- inputs=model_source,
- outputs=[
- model_id,
- base_model_used_for_training,
- prompt_used_for_training,
- ])
- model_id.change(fn=app.load_model_info,
- inputs=model_id,
- outputs=[
- base_model_used_for_training,
- prompt_used_for_training,
- ])
- inputs = [
- model_id,
- prompt,
- video_length,
- fps,
- seed,
- num_steps,
- guidance_scale,
- ]
- prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
- run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
- return demo
-
-
-if __name__ == '__main__':
- import os
-
- hf_token = os.getenv('HF_TOKEN')
- pipe = InferencePipeline(hf_token)
- demo = create_inference_demo(pipe, hf_token)
- demo.queue(max_size=10).launch(share=False)
diff --git a/spaces/multimodalart/latentdiffusion/latent-diffusion/scripts/inpaint.py b/spaces/multimodalart/latentdiffusion/latent-diffusion/scripts/inpaint.py
deleted file mode 100644
index d6e6387a9a3b0afa73fae8af25f43a8ba856240e..0000000000000000000000000000000000000000
--- a/spaces/multimodalart/latentdiffusion/latent-diffusion/scripts/inpaint.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import argparse, os, sys, glob
-from omegaconf import OmegaConf
-from PIL import Image
-from tqdm import tqdm
-import numpy as np
-import torch
-from main import instantiate_from_config
-from ldm.models.diffusion.ddim import DDIMSampler
-
-
-def make_batch(image, mask, device):
- image = np.array(Image.open(image).convert("RGB"))
- image = image.astype(np.float32)/255.0
- image = image[None].transpose(0,3,1,2)
- image = torch.from_numpy(image)
-
- mask = np.array(Image.open(mask).convert("L"))
- mask = mask.astype(np.float32)/255.0
- mask = mask[None,None]
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = (1-mask)*image
-
- batch = {"image": image, "mask": mask, "masked_image": masked_image}
- for k in batch:
- batch[k] = batch[k].to(device=device)
- batch[k] = batch[k]*2.0-1.0
- return batch
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--indir",
- type=str,
- nargs="?",
- help="dir containing image-mask pairs (`example.png` and `example_mask.png`)",
- )
- parser.add_argument(
- "--outdir",
- type=str,
- nargs="?",
- help="dir to write results to",
- )
- parser.add_argument(
- "--steps",
- type=int,
- default=50,
- help="number of ddim sampling steps",
- )
- opt = parser.parse_args()
-
- masks = sorted(glob.glob(os.path.join(opt.indir, "*_mask.png")))
- images = [x.replace("_mask.png", ".png") for x in masks]
- print(f"Found {len(masks)} inputs.")
-
- config = OmegaConf.load("models/ldm/inpainting_big/config.yaml")
- model = instantiate_from_config(config.model)
- model.load_state_dict(torch.load("models/ldm/inpainting_big/last.ckpt")["state_dict"],
- strict=False)
-
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
- model = model.to(device)
- sampler = DDIMSampler(model)
-
- os.makedirs(opt.outdir, exist_ok=True)
- with torch.no_grad():
- with model.ema_scope():
- for image, mask in tqdm(zip(images, masks)):
- outpath = os.path.join(opt.outdir, os.path.split(image)[1])
- batch = make_batch(image, mask, device=device)
-
- # encode masked image and concat downsampled mask
- c = model.cond_stage_model.encode(batch["masked_image"])
- cc = torch.nn.functional.interpolate(batch["mask"],
- size=c.shape[-2:])
- c = torch.cat((c, cc), dim=1)
-
- shape = (c.shape[1]-1,)+c.shape[2:]
- samples_ddim, _ = sampler.sample(S=opt.steps,
- conditioning=c,
- batch_size=c.shape[0],
- shape=shape,
- verbose=False)
- x_samples_ddim = model.decode_first_stage(samples_ddim)
-
- image = torch.clamp((batch["image"]+1.0)/2.0,
- min=0.0, max=1.0)
- mask = torch.clamp((batch["mask"]+1.0)/2.0,
- min=0.0, max=1.0)
- predicted_image = torch.clamp((x_samples_ddim+1.0)/2.0,
- min=0.0, max=1.0)
-
- inpainted = (1-mask)*image+mask*predicted_image
- inpainted = inpainted.cpu().numpy().transpose(0,2,3,1)[0]*255
- Image.fromarray(inpainted.astype(np.uint8)).save(outpath)
diff --git a/spaces/mygyasir/genious_bgremover/carvekit/pipelines/preprocessing.py b/spaces/mygyasir/genious_bgremover/carvekit/pipelines/preprocessing.py
deleted file mode 100644
index 3d1e848d10bb99ddc06b84fbc52c9d0f36ebe1c0..0000000000000000000000000000000000000000
--- a/spaces/mygyasir/genious_bgremover/carvekit/pipelines/preprocessing.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Source url: https://github.com/OPHoperHPO/image-background-remove-tool
-Author: Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO].
-License: Apache License 2.0
-"""
-from pathlib import Path
-from typing import Union, List
-
-from PIL import Image
-
-__all__ = ["PreprocessingStub"]
-
-
-class PreprocessingStub:
- """Stub for future preprocessing methods"""
-
- def __call__(self, interface, images: List[Union[str, Path, Image.Image]]):
- """
- Passes data though interface.segmentation_pipeline() method
-
- Args:
- interface: Interface instance
- images: list of images
-
- Returns:
- the result of passing data through segmentation_pipeline method of interface
- """
- return interface.segmentation_pipeline(images=images)
diff --git a/spaces/myscale/visual-dataset-explorer/app.py b/spaces/myscale/visual-dataset-explorer/app.py
deleted file mode 100644
index 01a08e9aa8aeb18c94def9b97817527471db597e..0000000000000000000000000000000000000000
--- a/spaces/myscale/visual-dataset-explorer/app.py
+++ /dev/null
@@ -1,519 +0,0 @@
-import streamlit as st
-import numpy as np
-import base64
-from io import BytesIO
-from multilingual_clip import pt_multilingual_clip
-from transformers import CLIPTokenizerFast, AutoTokenizer, CLIPModel
-import torch
-import logging
-from os import environ
-from parse import parse
-from clickhouse_connect import get_client
-environ['TOKENIZERS_PARALLELISM'] = 'true'
-
-
-db_name_map = {
- "Unsplash Photos 25K": lambda feat: f"mqdb_demo.unsplash_25k_{feat}_indexer",
- "RSICD: Remote Sensing Images 11K": lambda feat: f"mqdb_demo.rsicd_{feat}_b_32",
-}
-feat_name_map = {
- 'Vanilla CLIP': "clip",
- 'CLIP finetuned on RSICD': "cliprsicd"
-}
-
-
-DB_NAME = "mqdb_demo.unsplash_25k_clip_indexer"
-DIMS = 512
-# Ignore some bad links (broken in the dataset already)
-BAD_IDS = {'9_9hzZVjV8s', 'RDs0THr4lGs', 'vigsqYux_-8',
- 'rsJtMXn3p_c', 'AcG-unN00gw', 'r1R_0ZNUcx0'}
-
-
-@st.experimental_singleton(show_spinner=False)
-def init_db():
- """ Initialize the Database Connection
-
- Returns:
- meta_field: Meta field that records if an image is viewed or not
- client: Database connection object
- """
- r = parse("{http_pre}://{host}:{port}", st.secrets["DB_URL"])
- client = get_client(
- host=r['host'], port=r['port'], user=st.secrets["USER"], password=st.secrets["PASSWD"],
- interface=r['http_pre'],
- )
- meta_field = {}
- return meta_field, client
-
-
-@st.experimental_singleton(show_spinner=False)
-def init_query_num():
- print("init query_num")
- return 0
-
-
-def query(xq, top_k=10):
- """ Query TopK matched w.r.t a given vector
-
- Args:
- xq (numpy.ndarray or list of floats): Query vector
- top_k (int, optional): Number of matched vectors. Defaults to 10.
-
- Returns:
- matches: list of Records object. Keys referrring to selected columns
- """
- attempt = 0
- xq = xq / np.linalg.norm(xq)
- while attempt < 3:
- try:
- xq_s = f"[{', '.join([str(float(fnum)) for fnum in list(xq)])}]"
-
- print('Excluded pre:', st.session_state.meta)
- if len(st.session_state.meta) > 0:
- exclude_list = ','.join(
- [f'\'{i}\'' for i, v in st.session_state.meta.items() if v >= 1])
- print("Excluded:", exclude_list)
- # Using PREWHERE allows you to do column filter before vector search
- xc = st.session_state.index.query(f"SELECT id, url, vector,\
- distance(vector, {xq_s}) AS dist\
- FROM {db_name_map[st.session_state.db_name_ref](feat_name_map[st.session_state.feat_name])} \
- WHERE id NOT IN ({exclude_list}) ORDER BY dist LIMIT {top_k}").named_results()
- else:
- xc = st.session_state.index.query(f"SELECT id, url, vector,\
- distance(vector, {xq_s}) AS dist\
- FROM {db_name_map[st.session_state.db_name_ref](feat_name_map[st.session_state.feat_name])} \
- ORDER BY dist LIMIT {top_k}").named_results()
- real_xc = st.session_state.index.query(f"SELECT id, url, vector,\
- distance(vector, {xq_s}) AS dist \
- FROM {db_name_map[st.session_state.db_name_ref](feat_name_map[st.session_state.feat_name])} \
- ORDER BY dist LIMIT {top_k}").named_results()
- top_k = [{k: v for k, v in r.items()} for r in real_xc]
- xc = [xi for xi in xc if xi['id'] not in st.session_state.meta or
- st.session_state.meta[xi['id']] < 1]
- logging.info(
- f'{len(xc)} records returned, {[_i["id"] for _i in xc]}')
- matches = xc
- break
- except Exception as e:
- # force reload if we have trouble on connections or something else
- logging.warning(str(e))
- _, st.session_state.index = init_db()
- attempt += 1
- matches = []
- if len(matches) == 0:
- logging.error(f"No matches found for '{DB_NAME}'")
- return matches, top_k
-
-
-@st.experimental_singleton(show_spinner=False)
-def init_random_query():
- xq = np.random.rand(DIMS).tolist()
- return xq, xq.copy()
-
-
-class Classifier:
- """ Zero-shot Classifier
- This Classifier provides proxy regarding to the user's reaction to the probed images.
- The proxy will replace the original query vector generated by prompted vector and finally
- give the user a satisfying retrieval result.
-
- This can be commonly seen in a recommendation system. The classifier will recommend more
- precise result as it accumulating user's activity.
- """
-
- def __init__(self, xq: list):
- # initialize model with DIMS input size and 1 output
- # note that the bias is ignored, as we only focus on the inner product result
- self.model = torch.nn.Linear(DIMS, 1, bias=False)
- # convert initial query `xq` to tensor parameter to init weights
- init_weight = torch.Tensor(xq).reshape(1, -1)
- self.model.weight = torch.nn.Parameter(init_weight)
- # init loss and optimizer
- self.loss = torch.nn.BCEWithLogitsLoss()
- self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.1)
-
- def fit(self, X: list, y: list, iters: int = 5):
- # convert X and y to tensor
- X = torch.Tensor(X)
- y = torch.Tensor(y).reshape(-1, 1)
- for i in range(iters):
- # zero gradients
- self.optimizer.zero_grad()
- # Normalize the weight before inference
- # This will constrain the gradient or you will have an explosion on query vector
- self.model.weight.data = self.model.weight.data / \
- torch.norm(self.model.weight.data, p=2, dim=-1)
- # forward pass
- out = self.model(X)
- # compute loss
- loss = self.loss(out, y)
- # backward pass
- loss.backward()
- # update weights
- self.optimizer.step()
-
- def get_weights(self):
- xq = self.model.weight.detach().numpy()[0].tolist()
- return xq
-
-
-class NormalizingLayer(torch.nn.Module):
- def forward(self, x):
- return x / torch.norm(x, dim=-1, keepdim=True)
-
-
-def card(i, url):
- return f'
'
-
-
-def card_with_conf(i, conf, url):
- conf = "%.4f" % (conf)
- return f'
Relevance: {conf}
'
-
-
-def get_top_k(xq, top_k=9):
- """ wrapper function for query
-
- Args:
- xq (numpy.ndarray or list of floats): Query vector
- top_k (int, optional): Number of returned vectors. Defaults to 9.
-
- Returns:
- matches: See `query()`
- """
- matches = query(
- xq, top_k=top_k
- )
- return matches
-
-
-def tune(X, y, iters=2):
- """ Train the Zero-shot Classifier
-
- Args:
- X (numpy.ndarray): Input vectors (retreived vectors)
- y (list of floats or numpy.ndarray): Scores given by user
- iters (int, optional): iterations of updates to be run
- """
- assert len(X) == len(y)
- # train the classifier
- st.session_state.clf.fit(X, y, iters=iters)
- # extract new vector
- st.session_state.xq = st.session_state.clf.get_weights()
-
-
-def refresh_index():
- """ Clean the session
- """
- del st.session_state["meta"]
- st.session_state.meta = {}
- st.session_state.query_num = 0
- logging.info(f"Refresh for '{st.session_state.meta}'")
- init_db.clear()
- # refresh session states
- st.session_state.meta, st.session_state.index = init_db()
- del st.session_state.clf, st.session_state.xq
-
-
-def calc_dist():
- xq = np.array(st.session_state.xq)
- orig_xq = np.array(st.session_state.orig_xq)
- return np.linalg.norm(xq - orig_xq)
-
-
-def submit():
- """ Tune the model w.r.t given score from user.
- """
- st.session_state.query_num += 1
- matches = st.session_state.matches
- velocity = 1 # st.session_state.velocity
- scores = {}
- states = [
- st.session_state[f"input{i}"] for i in range(len(matches))
- ]
- for i, match in enumerate(matches):
- scores[match['id']] = float(states[i])
- # reset states to 1.0
- for i in range(len(matches)):
- st.session_state[f"input{i}"] = 1.0
- # get training data and labels
- X = list([match['vector'] for match in matches])
- y = [v for v in list(scores.values())]
- tune(X, y, iters=int(st.session_state.iters))
- # update record metadata after training
- for match in matches:
- st.session_state.meta[match['id']] = 1
- logging.info(f"Exclude List: {st.session_state.meta}")
-
-
-def delete_element(element):
- del element
-
-
-@st.experimental_singleton(show_spinner=False)
-def init_clip_mlang():
- """ Initialize CLIP Model
-
- Returns:
- Tokenizer: CLIPTokenizerFast (which convert words into embeddings)
- """
- MODEL_ID = 'M-CLIP/XLM-Roberta-Large-Vit-B-32'
- clip = pt_multilingual_clip.MultilingualCLIP.from_pretrained(MODEL_ID)
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
- return tokenizer, clip
-
-
-@st.experimental_singleton(show_spinner=False)
-def init_clip_vanilla():
- """ Initialize CLIP Model
-
- Returns:
- Tokenizer: CLIPTokenizerFast (which convert words into embeddings)
- """
- MODEL_ID = "openai/clip-vit-base-patch32"
- tokenizer = CLIPTokenizerFast.from_pretrained(MODEL_ID)
- clip = CLIPModel.from_pretrained(MODEL_ID)
- return tokenizer, clip
-
-
-@st.experimental_singleton(show_spinner=False)
-def init_clip_rsicd():
- """ Initialize CLIP Model
-
- Returns:
- Tokenizer: CLIPTokenizerFast (which convert words into embeddings)
- """
- MODEL_ID = "flax-community/clip-rsicd"
- tokenizer = CLIPTokenizerFast.from_pretrained(MODEL_ID)
- clip = CLIPModel.from_pretrained(MODEL_ID)
- return tokenizer, clip
-
-
-def prompt2vec_mlang(prompt: str, tokenizer, clip):
- """ Convert prompt into a computational vector
-
- Args:
- prompt (str): Text to be tokenized
-
- Returns:
- xq: vector from the tokenizer, representing the original prompt
- """
- out = clip.forward(prompt, tokenizer)
- xq = out.squeeze(0).cpu().detach().numpy().tolist()
- return xq
-
-
-def prompt2vec_vanilla(prompt: str, tokenizer, clip):
- inputs = tokenizer(prompt, return_tensors='pt')
- out = clip.get_text_features(**inputs)
- xq = out.squeeze(0).cpu().detach().numpy().tolist()
- return xq
-
-
-st.markdown("""
-
-""", unsafe_allow_html=True)
-
-messages = [
- f"""
- Find most relevant examples from a large visual dataset by combining text query and few-shot learning.
- """,
- f"""
- Then then you can adjust the weight on each image. Those weights should **represent how much it
- can meet your preference**. You can either choose the images that match your prompt or change
- your mind.
-
- You might notice that there is a iteration slide bar on the top of all retrieved images. This will
- control the speed of changes on vectors. More **iterations** will change the vector faster while
- lower values on **iterations** will make the retrieval smoother.
- """,
- f"""
- This example will manage to train a classifier to distinguish between samples you want and samples
- you don't want. By initializing the weight from prompt, you can get a good enough classifier to cluster
- images you want to search. If you think the result is not as perfect as you expected, you can also
- supervise the classifer with **Relevance** annotation. If you cannot see any difference in Top-K
- Retrieved results, try to enlarge **Number of Iteration**
- """,
- # TODO @ fangruil: fill the link with our tech blog
- f"""
- The app uses the [MyScale](http://mqdb.page.moqi.ai/mqdb-docs/) to store and query images
- using vector search. All images are sourced from the
- [Unsplash Lite dataset](https://unsplash-datasets.s3.amazonaws.com/lite/latest/unsplash-research-dataset-lite-latest.zip)
- and encoded using [OpenAI's CLIP](https://huggingface.co/openai/clip-vit-base-patch32). We explain how
- it all works [here]().
- """
-]
-
-text_model_map = {
- 'Multi Lingual': {'Vanilla CLIP': [prompt2vec_mlang, ]},
- 'English': {'Vanilla CLIP': [prompt2vec_vanilla, ],
- 'CLIP finetuned on RSICD': [prompt2vec_vanilla, ],
- }
-}
-
-
-with st.spinner("Connecting DB..."):
- st.session_state.meta, st.session_state.index = init_db()
-
-with st.spinner("Loading Models..."):
- # Initialize CLIP model
- if 'xq' not in st.session_state:
- text_model_map['Multi Lingual']['Vanilla CLIP'].append(
- init_clip_mlang())
- text_model_map['English']['Vanilla CLIP'].append(init_clip_vanilla())
- text_model_map['English']['CLIP finetuned on RSICD'].append(
- init_clip_rsicd())
- st.session_state.query_num = 0
-
-if 'xq' not in st.session_state:
- # If it's a fresh start
- if st.session_state.query_num < len(messages):
- msg = messages[st.session_state.query_num]
- else:
- msg = messages[-1]
- prompt = ''
- # Basic Layout
- with st.container():
- if 'prompt' in st.session_state:
- del st.session_state.prompt
- st.title("Visual Dataset Explorer")
- start = [st.empty(), st.empty(), st.empty(), st.empty(),
- st.empty(), st.empty(), st.empty(), st.empty()]
- start[0].info(msg)
- start_col = start[1].columns(3)
- st.session_state.db_name_ref = start_col[0].selectbox(
- "Select Database:", list(db_name_map.keys()))
- st.session_state.lang = start_col[1].selectbox(
- "Select Language:", list(text_model_map.keys()))
- st.session_state.feat_name = start_col[2].selectbox("Select Image Feature:",
- list(text_model_map[st.session_state.lang].keys()))
- if st.session_state.db_name_ref == "RSICD: Remote Sensing Images 11K":
- start[2].warning('If you are searching for Remote Sensing Images, \
- try to use prompt "An aerial photograph of " \
- to obtain best search experience!')
- if len(prompt) > 0:
- st.session_state.prompt = prompt.replace(' ', '_')
- start[4].markdown(
- ' Don\'t know what to search? Try Random!
\
- 🌟 We also support multi-language search. Type any language you know to search! ⌨️
',
- unsafe_allow_html=True)
- upld_model = start[6].file_uploader(
- "Or you can upload your previous run!", type='onnx')
- upld_btn = start[7].button(
- "Use Loaded Weights", disabled=upld_model is None)
- prompt = start[3].text_input(
- "Prompt:",
- value="An aerial photograph of "if st.session_state.db_name_ref == "RSICD: Remote Sensing Images 11K" else "",
- placeholder="Examples: playing corgi, 女人举着雨伞, mouette volant au-dessus de la mer, ガラスの花瓶の花 ...",)
- with start[5]:
- col = st.columns(8)
- has_no_prompt = (len(prompt) == 0 and upld_model is None)
- prompt_xq = col[6].button("Prompt", disabled=len(prompt) == 0)
- random_xq = col[7].button("Random", disabled=not (
- len(prompt) == 0 and upld_model is None))
-
- if random_xq:
- # Randomly pick a vector to query
- xq, orig_xq = init_random_query()
- st.session_state.xq = xq
- st.session_state.orig_xq = orig_xq
- _ = [elem.empty() for elem in start]
- elif prompt_xq or upld_btn:
- if upld_model is not None:
- # Import vector from a file
- import onnx
- from onnx import numpy_helper
- _model = onnx.load(upld_model)
- weights = _model.graph.initializer
- assert len(weights) == 1
- xq = numpy_helper.to_array(weights[0]).tolist()
- assert len(xq) == DIMS
- st.session_state.prompt = upld_model.name.split(".onnx")[
- 0].replace(' ', '_')
- else:
- print(f"Input prompt is {prompt}")
- # Tokenize the vectors
- p2v_func, args = text_model_map[st.session_state.lang][st.session_state.feat_name]
- xq = p2v_func(prompt, *args)
- st.session_state.xq = xq
- st.session_state.orig_xq = xq
- _ = [elem.empty() for elem in start]
-
-if 'xq' in st.session_state:
- # If it is not a fresh start
- if st.session_state.query_num+1 < len(messages):
- msg = messages[st.session_state.query_num+1]
- else:
- msg = messages[-1]
- # initialize classifier
- if 'clf' not in st.session_state:
- st.session_state.clf = Classifier(st.session_state.xq)
-
- # if we want to display images we end up here
- st.info(msg)
- # first retrieve images from pinecone
- st.session_state.matches, st.session_state.top_k = get_top_k(
- st.session_state.clf.get_weights(), top_k=9)
-
- # export the model into executable ONNX
- st.session_state.dnld_model = BytesIO()
- torch.onnx.export(torch.nn.Sequential(NormalizingLayer(), st.session_state.clf.model),
- torch.as_tensor(st.session_state.xq).reshape(1, -1),
- st.session_state.dnld_model,
- input_names=['input'],
- output_names=['output'])
-
- with st.container():
- with st.sidebar:
- with st.container():
- st.header("Top K Nearest in Database")
- for i, k in enumerate(st.session_state.top_k):
- url = k["url"]
- url += "?q=75&fm=jpg&w=200&fit=max"
- if k["id"] not in BAD_IDS:
- disabled = False
- else:
- disable = True
- dist = np.matmul(st.session_state.clf.get_weights() / np.linalg.norm(st.session_state.clf.get_weights()),
- np.array(k["vector"]).T)
- st.markdown(card_with_conf(i, dist, url),
- unsafe_allow_html=True)
- dnld_nam = st.text_input('Download Name:',
- f'{(st.session_state.prompt if "prompt" in st.session_state else "model")}.onnx',
- max_chars=50)
- dnld_btn = st.download_button('Download your classifier!',
- st.session_state.dnld_model,
- dnld_nam,)
- # once retrieved, display them alongside checkboxes in a form
- with st.form("batch", clear_on_submit=False):
- st.session_state.iters = st.slider(
- "Number of Iterations to Update", min_value=0, max_value=10, step=1, value=2)
- col = st.columns([1, 9])
- col[0].form_submit_button("Train!", on_click=submit)
- col[1].form_submit_button(
- "Choose a new prompt", on_click=refresh_index)
- # we have three columns in the form
- cols = st.columns(3)
- for i, match in enumerate(st.session_state.matches):
- # find good url
- url = match["url"]
- url += "?q=75&fm=jpg&w=200&fit=max"
- if match["id"] not in BAD_IDS:
- disabled = False
- else:
- disable = True
- # the card shows an image and a checkbox
- cols[i % 3].markdown(card(i, url), unsafe_allow_html=True)
- # we access the values of the checkbox via st.session_state[f"input{i}"]
- cols[i % 3].slider(
- "Relevance",
- min_value=0.0,
- max_value=1.0,
- value=1.0,
- step=0.05,
- key=f"input{i}",
- disabled=disabled
- )
diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/models/__init__.py b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/pytorch_caney/models/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/nateraw/yolov6/yolov6/utils/general.py b/spaces/nateraw/yolov6/yolov6/utils/general.py
deleted file mode 100644
index 10a590580ba155071685f1ea558df995b105c706..0000000000000000000000000000000000000000
--- a/spaces/nateraw/yolov6/yolov6/utils/general.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-import os
-import glob
-from pathlib import Path
-
-def increment_name(path):
- "increase save directory's id"
- path = Path(path)
- sep = ''
- if path.exists():
- path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
- for n in range(1, 9999):
- p = f'{path}{sep}{n}{suffix}'
- if not os.path.exists(p):
- break
- path = Path(p)
- return path
-
-
-def find_latest_checkpoint(search_dir='.'):
- # Find the most recent saved checkpoint in search_dir
- checkpoint_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
- return max(checkpoint_list, key=os.path.getctime) if checkpoint_list else ''
diff --git a/spaces/naver/PUMP/post_filter.py b/spaces/naver/PUMP/post_filter.py
deleted file mode 100644
index 851a3403498eb7499b848cc32dfc6878f52aca48..0000000000000000000000000000000000000000
--- a/spaces/naver/PUMP/post_filter.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright 2022-present NAVER Corp.
-# CC BY-NC-SA 4.0
-# Available only for non-commercial use
-
-import pdb, sys, os
-import argparse
-import numpy as np
-from scipy.sparse import coo_matrix, csr_matrix, triu, csgraph
-
-import core.functional as myF
-from tools.common import image, image_with_trf
-from tools.viz import dbgfig, show_correspondences
-
-
-def arg_parser():
- parser = argparse.ArgumentParser("Post-filtering of Deep matching correspondences")
-
- parser.add_argument("--img1", required=True, help="path to first image")
- parser.add_argument("--img2", required=True, help="path to second image")
- parser.add_argument("--resize", default=0, type=int, help="prior image downsize (0 if recursive)")
- parser.add_argument("--corres", required=True, help="input path")
- parser.add_argument("--output", default="", help="filtered corres output")
-
- parser.add_argument("--locality", type=float, default=2, help="tolerance to deformation")
- parser.add_argument("--min-cc-size", type=int, default=50, help="min connex-component size")
- parser.add_argument("--densify", default='no', choices=['no','full','cc','convex'], help="output pixel-dense corres field")
- parser.add_argument("--dense-side", default='left', choices=['left','right'], help="img to densify")
-
- parser.add_argument("--verbose", "-v", type=int, default=0, help="verbosity level")
- parser.add_argument("--dbg", type=str, nargs='+', default=(), help="debug options")
- return parser
-
-
-def main(args):
- import test_singlescale as pump
- corres = np.load(args.corres)['corres']
- imgs = tuple(map(image, pump.Main.load_images(args)))
-
- if dbgfig('raw',args.dbg):
- show_correspondences(*imgs, corres)
-
- corres = filter_corres( *imgs, corres,
- locality=args.locality, min_cc_size=args.min_cc_size,
- densify=args.densify, dense_side=args.dense_side,
- verbose=args.verbose, dbg=args.dbg)
-
- if dbgfig('viz',args.dbg):
- show_correspondences(*imgs, corres)
-
- return pump.save_output( args, corres )
-
-
-def filter_corres( img0, img1, corres,
- locality = None, # graph edge locality
- min_cc_size = None, # min CC size
- densify = None,
- dense_side = None,
- verbose = 0, dbg=()):
-
- if None in (locality, min_cc_size, densify, dense_side):
- default_params = arg_parser()
- locality = locality or default_params.get_default('locality')
- min_cc_size = min_cc_size or default_params.get_default('min_cc_size')
- densify = densify or default_params.get_default('densify')
- dense_side = dense_side or default_params.get_default('dense_side')
-
- img0, trf0 = img0 if isinstance(img0,tuple) else (img0, np.eye(3))
- img1, trf1 = img1 if isinstance(img1,tuple) else (img1, np.eye(3))
- assert isinstance(img0, np.ndarray) and isinstance(img1, np.ndarray)
-
- corres = myF.affmul((np.linalg.inv(trf0),np.linalg.inv(trf1)), corres)
- n_corres = len(corres)
- if verbose: print(f'>> input: {len(corres)} correspondences')
-
- graph = compute_graph(corres, max_dis=locality*4)
- if verbose: print(f'>> {locality=}: {graph.nnz} nodes in graph')
-
- cc_sizes = measure_connected_components(graph)
- corres[:,4] += np.log2(cc_sizes)
- corres = corres[cc_sizes > min_cc_size]
- if verbose: print(f'>> {min_cc_size=}: remaining {len(corres)} correspondences')
-
- final = myF.affmul((trf0,trf1), corres)
-
- if densify != 'no':
- # densify correspondences
- if dense_side == 'right': # temporary swap
- final = final[:,[2,3,0,1]]
- H = round(img1.shape[0] / trf1[1,1])
- W = round(img1.shape[1] / trf1[0,0])
- else:
- H = round(img0.shape[0] / trf0[1,1])
- W = round(img0.shape[1] / trf0[0,0])
-
- if densify == 'cc':
- assert False, 'todo'
- elif densify in (True, 'full', 'convex'):
- # recover true image0's shape
- final = densify_corres( final, (H, W), full=(densify!='convex') )
- else:
- raise ValueError(f'Bad mode for {densify=}')
-
- if dense_side == 'right': # undo temporary swap
- final = final[:,[2,3,0,1]]
-
- return final
-
-
-def compute_graph(corres, max_dis=10, min_ang=90):
- """ 4D distances (corres can only be connected to same scale)
- using sparse matrices for efficiency
-
- step1: build horizontal and vertical binning, binsize = max_dis
- add in each bin all neighbor bins
- step2: for each corres, we can intersect 2 bins to get a short list of candidates
- step3: verify euclidean distance < maxdis (optional?)
- """
- def bin_positions(pos):
- # every corres goes into a single bin
- bin_indices = np.int32(pos.clip(min=0) // max_dis) + 1
- cols = np.arange(len(pos))
-
- # add the cell before and the cell after, to handle border effects
- res = csr_matrix((np.ones(len(bin_indices)*3,dtype=np.float32),
- (np.r_[bin_indices-1, bin_indices, bin_indices+1], np.r_[cols,cols,cols])),
- shape=(bin_indices.max()+2 if bin_indices.size else 1, len(pos)))
-
- return res, bin_indices
-
- # 1-hot matrices of shape = nbins x n_corres
- x1_bins = bin_positions(corres[:,0])
- y1_bins = bin_positions(corres[:,1])
- x2_bins = bin_positions(corres[:,2])
- y2_bins = bin_positions(corres[:,3])
-
- def row_indices(ngh):
- res = np.bincount(ngh.indptr[1:-1], minlength=ngh.indptr[-1])[:-1]
- return res.cumsum()
-
- def compute_dist( ngh, pts, scale=None ):
- # pos from the second point
- x_pos = pts[ngh.indices,0]
- y_pos = pts[ngh.indices,1]
-
- # subtract pos from the 1st point
- rows = row_indices(ngh)
- x_pos -= pts[rows, 0]
- y_pos -= pts[rows, 1]
- dis = np.sqrt(np.square(x_pos) + np.square(y_pos))
- if scale is not None:
- # there is a scale for each of the 2 pts, we encline to choose the worst one
- dis *= (scale[rows] + scale[ngh.indices]) / 2 # so we use arithmetic instead of geometric mean
-
- return normed(np.c_[x_pos, y_pos]), dis
-
- def Rot( ngh, degrees ):
- rows = row_indices(ngh)
- rad = degrees * np.pi / 180
- rad = (rad[rows] + rad[ngh.indices]) / 2 # average angle between 2 corres
- cos, sin = np.cos(rad), np.sin(rad)
- return np.float32(((cos, -sin), (sin,cos))).transpose(2,0,1)
-
- def match(xbins, ybins, pt1, pt2, way):
- xb, ixb = xbins
- yb, iyb = ybins
-
- # gets for each corres a list of potential matches
- ngh = xb[ixb].multiply( yb[iyb] ) # shape = n_corres x n_corres
- ngh = triu(ngh, k=1).tocsr() # remove mirrored matches
- # ngh = matches of matches, shape = n_corres x n_corres
-
- # verify locality and flow
- vec1, d1 = compute_dist(ngh, pt1) # for each match, distance and orientation in img1
- # assert d1.max()**0.5 < 2*max_dis*1.415, 'cannot be larger than 2 cells in diagonals, or there is a bug'+bb()
- scale, rot = myF.decode_scale_rot(corres[:,5])
- vec2, d2 = compute_dist(ngh, pt2, scale=scale**(-way))
- ang = np.einsum('ik,ik->i', (vec1[:,None] @ Rot(ngh,way*rot))[:,0], vec2)
-
- valid = (d1 <= max_dis) & (d2 <= max_dis) & (ang >= np.cos(min_ang*np.pi/180))
- res = csr_matrix((valid, ngh.indices, ngh.indptr), shape=ngh.shape)
- res.eliminate_zeros()
- return res
-
- # find all neihbors within each xy bin
- ngh1 = match(x1_bins, y1_bins, corres[:,0:2], corres[:,2:4], way=+1)
- ngh2 = match(x2_bins, y2_bins, corres[:,2:4], corres[:,0:2], way=-1).T
-
- return ngh1 + ngh2 # union
-
-
-def measure_connected_components(graph, dbg=()):
- # compute connected components
- nc, labels = csgraph.connected_components(graph, directed=False)
-
- # filter and remove all small components
- count = np.bincount(labels)
-
- return count[labels]
-
-def normed( mat ):
- return mat / np.linalg.norm(mat, axis=-1, keepdims=True).clip(min=1e-16)
-
-
-def densify_corres( corres, shape, full=True ):
- from scipy.interpolate import LinearNDInterpolator
- from scipy.spatial import cKDTree as KDTree
-
- assert len(corres) > 3, 'Not enough corres for densification'
- H, W = shape
-
- interp = LinearNDInterpolator(corres[:,0:2], corres[:,2:4])
- X, Y = np.mgrid[0:H, 0:W][::-1] # H x W, H x W
- p1 = np.c_[X.ravel(), Y.ravel()]
- p2 = interp(X, Y) # H x W x 2
-
- p2 = p2.reshape(-1,2)
- invalid = np.isnan(p2).any(axis=1)
-
- if full:
- # interpolate pixels outside of the convex hull
- badp = p1[invalid]
- tree = KDTree(corres[:,0:2])
- _, nn = tree.query(badp, 3) # find 3 closest neighbors
- corflow = corres[:,2:4] - corres[:,0:2]
- p2.reshape(-1,2)[invalid] = corflow[nn].mean(axis=1) + p1[invalid]
- else:
- # remove nans, i.e. remove points outside of convex hull
- p1, p2 = p1[~invalid], p2[~invalid]
-
- # return correspondence field
- return np.c_[p1, p2]
-
-
-if __name__ == '__main__':
- main(arg_parser().parse_args())
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mafia Queens Of Mumbai Pdf Free Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mafia Queens Of Mumbai Pdf Free Download.md
deleted file mode 100644
index 880c7a962bb4073481d3648f4c5e504a02e72418..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mafia Queens Of Mumbai Pdf Free Download.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-How to Download Mafia Queens of Mumbai PDF for Free
-If you are looking for a thrilling and captivating read about the lives of some of the most notorious women in Mumbai's underworld, then you should check out Mafia Queens of Mumbai by S. Hussain Zaidi and Jane Borges. This book tells the true stories of 13 women who were involved in criminal activities in Mumbai, from smuggling and gun-running to terrorism and murder. You will learn about their motivations, their struggles, their achievements and their downfall.
-mafia queens of mumbai pdf free download
Download File ⇒⇒⇒ https://urlcod.com/2uIbgb
-Mafia Queens of Mumbai is a non-fiction crime novel that was published in 2011 and has received rave reviews from critics and readers alike. It has also been adapted into a web series by Netflix, starring Alia Bhatt as Gangubai Kathiawadi, one of the most powerful and influential mafia queens in Mumbai's history.
-But how can you get your hands on this book without spending a dime? Well, there are some ways to download Mafia Queens of Mumbai PDF for free online, but you have to be careful and cautious. Some of the websites that claim to offer free downloads may be scams, viruses or malware that can harm your device or steal your personal information. Therefore, you should always use a reliable and reputable source to download any PDF file online.
-One of the best and safest ways to download Mafia Queens of Mumbai PDF for free is to use OceanofPDF.com. This is a website that offers free download links for thousands of books in various genres and formats. You can easily find Mafia Queens of Mumbai by S. Hussain Zaidi on this website by using the search bar or browsing through the categories. Once you find the book, you can click on the download link and choose the PDF format. The file will be downloaded to your device in a matter of seconds.
-OceanofPDF.com is a legal and legitimate website that respects the rights of authors and publishers. It only provides download links for books that are in the public domain or have been shared by the authors themselves. It does not host any pirated or copyrighted content on its servers. Therefore, you can use this website without any fear or guilt.
-So, what are you waiting for? Download Mafia Queens of Mumbai PDF for free from OceanofPDF.com and enjoy reading this fascinating and gripping book about the women who ruled Mumbai's underworld.
-
-But why should you read Mafia Queens of Mumbai? What makes this book so special and different from other crime novels? Well, there are many reasons to read this book, but here are some of the most compelling ones:
-
-
-- It gives you a rare and fascinating glimpse into the lives of women who defied the norms and expectations of society and carved their own niche in the male-dominated underworld of Mumbai.
-- It shows you the human side of these women, their emotions, their relationships, their dreams and their regrets. You will see them not as mere criminals, but as complex and multidimensional characters who had their own reasons and motivations for their actions.
-- It exposes you to the dark and gritty reality of Mumbai's underworld, the violence, the corruption, the power struggles, the betrayals and the intrigues that shaped the history and culture of the city.
-- It entertains you with its thrilling and captivating narration, its vivid descriptions, its crisp dialogues and its dramatic twists and turns. You will be hooked from the first page to the last.
-
-Mafia Queens of Mumbai is a book that will make you think, feel and wonder. It is a book that will challenge your perceptions and assumptions. It is a book that will stay with you long after you finish reading it.
-So, don't wait any longer. Download Mafia Queens of Mumbai PDF for free from OceanofPDF.com and dive into this amazing and unforgettable book.
7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/ngoctuanai/chatgptfree/README.md b/spaces/ngoctuanai/chatgptfree/README.md
deleted file mode 100644
index c64c4b1921d28fe9da99de9addb0319903c97bb8..0000000000000000000000000000000000000000
--- a/spaces/ngoctuanai/chatgptfree/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: ChatGPT Free
-emoji: 💻
-colorFrom: blue
-colorTo: yellow
-sdk: docker
-pinned: false
-disable_embedding: true
-license: mit
-app_port: 3000
----
\ No newline at end of file
diff --git a/spaces/nickil/weakly-supervised-parsing/weakly_supervised_parser/utils/populate_chart.py b/spaces/nickil/weakly-supervised-parsing/weakly_supervised_parser/utils/populate_chart.py
deleted file mode 100644
index 39bab191f3cbdf7841d004fb5a49af2e00f9bc36..0000000000000000000000000000000000000000
--- a/spaces/nickil/weakly-supervised-parsing/weakly_supervised_parser/utils/populate_chart.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import pandas as pd
-import numpy as np
-import logging
-
-from datasets.utils import set_progress_bar_enabled
-
-from weakly_supervised_parser.utils.prepare_dataset import NGramify
-from weakly_supervised_parser.utils.create_inside_outside_strings import InsideOutside
-from weakly_supervised_parser.model.trainer import InsideOutsideStringPredictor
-from weakly_supervised_parser.utils.cky_algorithm import get_best_parse
-from weakly_supervised_parser.utils.distant_supervision import RuleBasedHeuristic
-from weakly_supervised_parser.utils.prepare_dataset import PTBDataset
-from weakly_supervised_parser.settings import PTB_TRAIN_SENTENCES_WITHOUT_PUNCTUATION_PATH
-
-from weakly_supervised_parser.model.data_module_loader import DataModule
-from weakly_supervised_parser.model.span_classifier import LightningModel
-
-# Disable Dataset.map progress bar
-set_progress_bar_enabled(False)
-logging.getLogger("pytorch_lightning").setLevel(logging.WARNING)
-
-
-# ptb = PTBDataset(data_path=PTB_TRAIN_SENTENCES_WITHOUT_PUNCTUATION_PATH)
-# ptb_top_100_common = [item.lower() for item in RuleBasedHeuristic(corpus=ptb.retrieve_all_sentences()).get_top_tokens(top_most_common_ptb=100)]
-ptb_top_100_common = ['this', 'myself', 'shouldn', 'not', 'analysts', 'same', 'mightn', 'we', 'american', 'the', 'another', 'until', "aren't", 'when', 'if', 'am', 'over', 'ma', 'as', 'of', 'with', 'even', 'couldn', 'not', "needn't", 'where', 'there', 'isn', 'however', 'my', 'sales', 'here', 'at', 'yours', 'into', 'wouldn', 'officials', 'no', "hasn't", 'to', 'wasn', 'any', 'ours', 'out', 'each', "wasn't", 'is', 'and', 'me', 'off', 'once', "it's", 'they', 'most', 'also', 'through', 'hasn', 'our', 'or', 'after', "weren't", 'about', 'mr.', 'first', 'haven', 'needn', 'have', "isn't", 'now', "didn't", 'on', 'theirs', 'these', 'before', 'there', 'was', 'which', 'those', 'having', 'do', 'most', 'own', 'among', 'because', 'for', "should've", "shan't", 'so', 'being', 'few', 'too', 'to', 'at', 'people', 'her', 'meanwhile', 'both', 'down', 'doesn', 'below', 'mustn', 'an', 'two', 'more', 'japanese', 'ford', "you'd", 'about', 'but', 'doing', 'itself', 've', 'under', 'what', 'again', 'then', 'your', 'himself', 'now', 'against', 'just', 'does', 'net', "couldn't", 'that', 'he', 'revenue', 'because', 'yesterday', 'them', 'i', 'their', 'all', 'under', 'up', "haven't", 'while', "won't", 'it', 'more', 'it', 'ain', 'him', 'still', 'a', 'he', 'despite', 'should', 'during', 'nor', "shouldn't", 'such', "doesn't", 'are', "that'll", 'since', 'yourselves', 'such', 'those', 'after', 'weren', "you're", 'd', 'like', 'did', 'hadn', 'themselves', 'its', 'but', 'been', 's', "don't", 'these', 'they', 'this', 'his', "mightn't", 'moreover', 'how', 'new', 'above', 'ourselves', 'so', 'why', 'between', 'their', 'general', "wouldn't", 'who', 'i', 'in', 'don', 'shan', 'u.s.', 'ibm', 'separately', 'had', 'you', 'federal', 'if', 'our', 'and', 'only', 'y', 'many', 'one', 'no', 'though', 'won', 'last', 'from', 'each', 'traders', 'john', 'further', 'hers', 'both', "you've", "you'll", 'that', 'all', 'its', 'only', 'here', 'according', "mustn't", 'while', 'in', 'what', 'didn', 'when', 'some', 'on', 'can', 'yourself', 'herself', 'than', 'with', 'has', 'she', 'during', 'will', 'of', 'thus', 'you', 'very', 'o', 'investors', 'a', 'ms.', 'japan', 'were', 'the', 'we', 'm', 'as', 'll', 'be', 'by', 'other', 'yet', 'whom', 'some', 'indeed', 'other', "she's", "hadn't", 'by', 'earlier', 'for', 'instead', 'she', 'an', 't', 're', 'his', 'then', 'aren', 'although']
-# ptb_most_common_first_token = RuleBasedHeuristic(corpus=ptb.retrieve_all_sentences()).augment_using_most_frequent_starting_token(N=1)[0][0].lower()
-ptb_most_common_first_token = "the"
-
-from pytorch_lightning import Trainer
-
-trainer = Trainer(accelerator="auto", enable_progress_bar=False, max_epochs=-1)
-
-
-class PopulateCKYChart:
- def __init__(self, sentence):
- self.sentence = sentence
- self.sentence_list = sentence.split()
- self.sentence_length = len(sentence.split())
- self.span_scores = np.zeros((self.sentence_length + 1, self.sentence_length + 1), dtype=float)
- self.all_spans = NGramify(self.sentence).generate_ngrams(single_span=True, whole_span=True)
-
- def compute_scores(self, model, predict_type, scale_axis, predict_batch_size, chunks=128):
- inside_strings = []
- outside_strings = []
- inside_scores = []
- outside_scores = []
-
- for span in self.all_spans:
- _, inside_string, outside_string = InsideOutside(sentence=self.sentence).create_inside_outside_matrix(span)
- inside_strings.append(inside_string)
- outside_strings.append(outside_string)
-
- data = pd.DataFrame({"inside_sentence": inside_strings, "outside_sentence": outside_strings, "span": [span[0] for span in self.all_spans]})
-
- if predict_type == "inside":
-
- # if data.shape[0] > chunks:
- # data_chunks = np.array_split(data, data.shape[0] // chunks)
- # for data_chunk in data_chunks:
- # inside_scores.extend(model.predict_proba(spans=data_chunk.rename(columns={"inside_sentence": "sentence"})[["sentence"]],
- # scale_axis=scale_axis,
- # predict_batch_size=predict_batch_size)[:, 1])
- # else:
- # inside_scores.extend(model.predict_proba(spans=data.rename(columns={"inside_sentence": "sentence"})[["sentence"]],
- # scale_axis=scale_axis,
- # predict_batch_size=predict_batch_size)[:, 1])
-
- test_dataloader = DataModule(model_name_or_path="roberta-base", train_df=None, eval_df=None,
- test_df=data.rename(columns={"inside_sentence": "sentence"})[["sentence"]])
- inside_scores.extend(trainer.predict(model, dataloaders=test_dataloader)[0])
-
- data["inside_scores"] = inside_scores
- data.loc[
- (data["inside_sentence"].str.lower().str.startswith(ptb_most_common_first_token))
- & (data["inside_sentence"].str.lower().str.split().str.len() == 2)
- & (~data["inside_sentence"].str.lower().str.split().str[-1].isin(RuleBasedHeuristic().get_top_tokens())),
- "inside_scores",
- ] = 1
-
- is_upper_or_title = all([item.istitle() or item.isupper() for item in self.sentence.split()])
- is_stop = any([item for item in self.sentence.split() if item.lower() in ptb_top_100_common])
-
- flags = is_upper_or_title and not is_stop
-
- data["scores"] = data["inside_scores"]
-
- elif predict_type == "outside":
- outside_scores.extend(model.predict_proba(spans=data.rename(columns={"outside_sentence": "sentence"})[["sentence"]],
- scale_axis=scale_axis,
- predict_batch_size=predict_batch_size)[:, 1])
- data["outside_scores"] = outside_scores
- flags = False
- data["scores"] = data["outside_scores"]
-
- return flags, data
-
- def fill_chart(self, model, predict_type, scale_axis, predict_batch_size, data=None):
- if data is None:
- flags, data = self.compute_scores(model, predict_type, scale_axis, predict_batch_size)
- for span in self.all_spans:
- for i in range(0, self.sentence_length):
- for j in range(i + 1, self.sentence_length + 1):
- if span[0] == (i, j):
- self.span_scores[i, j] = data.loc[data["span"] == span[0], "scores"].item()
- return flags, self.span_scores, data
-
- def best_parse_tree(self, span_scores):
- span_scores_cky_format = span_scores[:-1, 1:]
- best_parse = get_best_parse(sentence=[self.sentence_list], spans=span_scores_cky_format)
- return best_parse
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/dev/packaging/README.md b/spaces/nikitaPDL2023/assignment4/detectron2/dev/packaging/README.md
deleted file mode 100644
index 0174b7dd528efcaa0fe27d46f40a3866f03e7c41..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/dev/packaging/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-## To build a cu101 wheel for release:
-
-```
-$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101
-# inside the container:
-# git clone https://github.com/facebookresearch/detectron2/
-# cd detectron2
-# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.8
-# ./dev/packaging/build_wheel.sh
-```
-
-## To build all wheels for combinations of CUDA and Python
-```
-./dev/packaging/build_all_wheels.sh
-./dev/packaging/gen_wheel_index.sh /path/to/wheels
-```
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/PointRend/point_rend/config.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/PointRend/point_rend/config.py
deleted file mode 100644
index a02c7829533545e81669785a53db90ef7e783156..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/PointRend/point_rend/config.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-from detectron2.config import CfgNode as CN
-
-
-def add_pointrend_config(cfg):
- """
- Add config for PointRend.
- """
- # We retry random cropping until no single category in semantic segmentation GT occupies more
- # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
- cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
- # Color augmentatition from SSD paper for semantic segmentation model during training.
- cfg.INPUT.COLOR_AUG_SSD = False
-
- # Names of the input feature maps to be used by a coarse mask head.
- cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",)
- cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024
- cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2
- # The side size of a coarse mask head prediction.
- cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7
- # True if point head is used.
- cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False
-
- cfg.MODEL.POINT_HEAD = CN()
- cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead"
- cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80
- # Names of the input feature maps to be used by a mask point head.
- cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",)
- # Number of points sampled during training for a mask point head.
- cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14
- # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
- # original paper.
- cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3
- # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
- # the original paper.
- cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75
- # Number of subdivision steps during inference.
- cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5
- # Maximum number of points selected at each subdivision step (N).
- cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28
- cfg.MODEL.POINT_HEAD.FC_DIM = 256
- cfg.MODEL.POINT_HEAD.NUM_FC = 3
- cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False
- # If True, then coarse prediction features are used as inout for each layer in PointRend's MLP.
- cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True
- cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME = "SemSegFPNHead"
-
- """
- Add config for Implicit PointRend.
- """
- cfg.MODEL.IMPLICIT_POINTREND = CN()
-
- cfg.MODEL.IMPLICIT_POINTREND.IMAGE_FEATURE_ENABLED = True
- cfg.MODEL.IMPLICIT_POINTREND.POS_ENC_ENABLED = True
-
- cfg.MODEL.IMPLICIT_POINTREND.PARAMS_L2_REGULARIZER = 0.00001
diff --git a/spaces/nomic-ai/google_MusicCaps/index.html b/spaces/nomic-ai/google_MusicCaps/index.html
deleted file mode 100644
index f85bfefb21b9625809fc5be878c734769cbe1819..0000000000000000000000000000000000000000
--- a/spaces/nomic-ai/google_MusicCaps/index.html
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
- google/MusicCaps
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/nomic-ai/timdettmers_openassistant-guanaco/index.html b/spaces/nomic-ai/timdettmers_openassistant-guanaco/index.html
deleted file mode 100644
index a4041bb347b0f90604c7ef56dbf013f02237a8b9..0000000000000000000000000000000000000000
--- a/spaces/nomic-ai/timdettmers_openassistant-guanaco/index.html
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
- timdettmers/openassistant-guanaco
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/nomnomnonono/Sound-Effect-Search/src/scrape.py b/spaces/nomnomnonono/Sound-Effect-Search/src/scrape.py
deleted file mode 100644
index bde4090cc003c6ed76bef113fbb602203da5f402..0000000000000000000000000000000000000000
--- a/spaces/nomnomnonono/Sound-Effect-Search/src/scrape.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import argparse
-import glob
-import os
-import time
-import urllib
-
-import librosa
-import pandas as pd
-import requests
-import soundfile as sf
-from bs4 import BeautifulSoup
-from omegaconf import OmegaConf
-from pydub import AudioSegment
-from requests.exceptions import Timeout
-
-
-class Scraper:
- def __init__(self, config):
- self.base_url = "https://soundeffect-lab.info/"
- self.df = pd.DataFrame([], columns=["filename", "title", "category", "url"])
- self.idx = 0
- self.config = OmegaConf.load(config)
- self.setup()
- os.makedirs(self.config.path_data, exist_ok=True)
- self.history = []
-
- def run(self):
- self.all_get()
- self.preprocess()
-
- def setup(self):
- try:
- html = requests.get(self.base_url, timeout=5)
- except Timeout:
- raise ValueError("Time Out")
- soup = BeautifulSoup(html.content, "html.parser")
- tags = soup.select("a")
- self.urls = []
- self.categories = []
- for tag in tags:
- category = tag.text
- url = tag.get("href")
- if "/sound/" in url:
- self.urls.append(url)
- self.categories.append(category)
-
- def all_get(self):
- for i in range(len(self.urls)):
- now_url = self.base_url + self.urls[i][1:]
- self.download(now_url, self.categories[i])
- self.df.to_csv(self.config.path_csv)
-
- def download(self, now_url, category):
- try:
- html = requests.get(now_url, timeout=5)
- soup = BeautifulSoup(html.content, "html.parser")
- body = soup.find(id="wrap").find("main")
- tags = body.find(id="playarea").select("a")
- count = 0
- for tag in tags:
- name = tag.get("download")
- url = tag.get("href")
- filename = os.path.join(self.config.path_data, name)
- if os.path.exists(filename):
- continue
- try:
- urllib.request.urlretrieve(now_url + url, filename)
- title = name.replace(".mp3", "")
- self.df.loc[self.idx] = {
- "filename": filename,
- "title": title,
- "category": category,
- "url": f"https://soundeffect-lab.info/sound/search.php?s={title}",
- }
- self.idx += 1
- time.sleep(2)
- count += 1
- except Exception:
- continue
- self.history.append(category)
- print(now_url, category, len(tags), count)
- paths = glob.glob(os.path.join(self.config.path_data, "*"))
- assert len(paths) == len(self.df)
-
- others = body.find(id="pagemenu-top").select("a")
- other_urls, other_categories = [], []
- for other in others:
- other_url = other.get("href")
- other_name = other.find("img").get("alt")
- if other_name in self.history:
- continue
- other_urls.append(other_url)
- other_categories.append(other_name)
- for i in range(len(other_urls)):
- self.download(self.base_url + other_urls[i][1:], other_categories[i])
- except Timeout:
- print(f"Time Out: {now_url}")
-
- def preprocess(self):
- for i in range(len(self.df)):
- song = AudioSegment.from_mp3(
- os.path.join(self.config.path_data, self.df.iloc[i]["filename"])
- )
- song.export(
- os.path.join(
- self.config.path_data,
- self.df.iloc[i]["filename"].replace(".mp3", ".wav"),
- ),
- format="wav",
- )
-
- for i in range(len(self.df)):
- file = os.path.join(
- self.config.path_data,
- self.df.iloc[i]["filename"].replace(".mp3", ".wav"),
- )
- y, sr = librosa.core.load(file, sr=self.config.sample_rate, mono=True)
- dir, name = os.path.split(file)
- sf.write(os.path.join(dir, "new_" + name), y, sr, subtype="PCM_16")
-
-
-def argparser():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "-c",
- "--config",
- type=str,
- default="config.yaml",
- help="File path for config file.",
- )
- args = parser.parse_args()
- return args
-
-
-if __name__ == "__main__":
- args = argparser()
- scraper = Scraper(args.config)
- scraper.run()
diff --git a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/errno_mapping.cc b/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/errno_mapping.cc
deleted file mode 100644
index 558abb33937619edc9bcc6a242e414d57bfcc11c..0000000000000000000000000000000000000000
--- a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/errno_mapping.cc
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "sparse_matmul/layers/errno_mapping.h"
-
-#include
-
-#include "absl/strings/str_cat.h"
-
-namespace csrblocksparse {
-
-namespace {
-
-absl::StatusCode ErrnoToCode(int error_number) {
- switch (error_number) {
- case 0:
- return absl::StatusCode::kOk;
- case EINVAL: // Invalid argument
- case ENAMETOOLONG: // Filename too long
- case E2BIG: // Argument list too long
- case EDESTADDRREQ: // Destination address required
- case EDOM: // Mathematics argument out of domain of function
- case EFAULT: // Bad address
- case EILSEQ: // Illegal byte sequence
- case ENOPROTOOPT: // Protocol not available
- case ENOSTR: // Not a STREAM
- case ENOTSOCK: // Not a socket
- case ENOTTY: // Inappropriate I/O control operation
- case EPROTOTYPE: // Protocol wrong type for socket
- case ESPIPE: // Invalid seek
- return absl::StatusCode::kInvalidArgument;
- case ETIMEDOUT: // Connection timed out
- case ETIME: // Timer expired
- return absl::StatusCode::kDeadlineExceeded;
- case ENODEV: // No such device
- case ENOENT: // No such file or directory
-#ifdef ENOMEDIUM
- case ENOMEDIUM: // No medium found
-#endif
- case ENXIO: // No such device or address
- case ESRCH: // No such process
- return absl::StatusCode::kNotFound;
- case EEXIST: // File exists
- case EADDRNOTAVAIL: // Address not available
- case EALREADY: // Connection already in progress
-#ifdef ENOTUNIQ
- case ENOTUNIQ: // Name not unique on network
-#endif
- return absl::StatusCode::kAlreadyExists;
- case EPERM: // Operation not permitted
- case EACCES: // Permission denied
-#ifdef ENOKEY
- case ENOKEY: // Required key not available
-#endif
- case EROFS: // Read only file system
- return absl::StatusCode::kPermissionDenied;
- case ENOTEMPTY: // Directory not empty
- case EISDIR: // Is a directory
- case ENOTDIR: // Not a directory
- case EADDRINUSE: // Address already in use
- case EBADF: // Invalid file descriptor
-#ifdef EBADFD
- case EBADFD: // File descriptor in bad state
-#endif
- case EBUSY: // Device or resource busy
- case ECHILD: // No child processes
- case EISCONN: // Socket is connected
-#ifdef EISNAM
- case EISNAM: // Is a named type file
-#endif
-#ifdef ENOTBLK
- case ENOTBLK: // Block device required
-#endif
- case ENOTCONN: // The socket is not connected
- case EPIPE: // Broken pipe
-#ifdef ESHUTDOWN
- case ESHUTDOWN: // Cannot send after transport endpoint shutdown
-#endif
- case ETXTBSY: // Text file busy
-#ifdef EUNATCH
- case EUNATCH: // Protocol driver not attached
-#endif
- return absl::StatusCode::kFailedPrecondition;
- case ENOSPC: // No space left on device
-#ifdef EDQUOT
- case EDQUOT: // Disk quota exceeded
-#endif
- case EMFILE: // Too many open files
- case EMLINK: // Too many links
- case ENFILE: // Too many open files in system
- case ENOBUFS: // No buffer space available
- case ENODATA: // No message is available on the STREAM read queue
- case ENOMEM: // Not enough space
- case ENOSR: // No STREAM resources
-#ifdef EUSERS
- case EUSERS: // Too many users
-#endif
- return absl::StatusCode::kResourceExhausted;
-#ifdef ECHRNG
- case ECHRNG: // Channel number out of range
-#endif
- case EFBIG: // File too large
- case EOVERFLOW: // Value too large to be stored in data type
- case ERANGE: // Result too large
- return absl::StatusCode::kOutOfRange;
-#ifdef ENOPKG
- case ENOPKG: // Package not installed
-#endif
- case ENOSYS: // Function not implemented
- case ENOTSUP: // Operation not supported
- case EAFNOSUPPORT: // Address family not supported
-#ifdef EPFNOSUPPORT
- case EPFNOSUPPORT: // Protocol family not supported
-#endif
- case EPROTONOSUPPORT: // Protocol not supported
-#ifdef ESOCKTNOSUPPORT
- case ESOCKTNOSUPPORT: // Socket type not supported
-#endif
- case EXDEV: // Improper link
- return absl::StatusCode::kUnimplemented;
- case EAGAIN: // Resource temporarily unavailable
-#ifdef ECOMM
- case ECOMM: // Communication error on send
-#endif
- case ECONNREFUSED: // Connection refused
- case ECONNABORTED: // Connection aborted
- case ECONNRESET: // Connection reset
- case EINTR: // Interrupted function call
-#ifdef EHOSTDOWN
- case EHOSTDOWN: // Host is down
-#endif
- case EHOSTUNREACH: // Host is unreachable
- case ENETDOWN: // Network is down
- case ENETRESET: // Connection aborted by network
- case ENETUNREACH: // Network unreachable
- case ENOLCK: // No locks available
- case ENOLINK: // Link has been severed
-#ifdef ENONET
- case ENONET: // Machine is not on the network
-#endif
- return absl::StatusCode::kUnavailable;
- case EDEADLK: // Resource deadlock avoided
-#ifdef ESTALE
- case ESTALE: // Stale file handle
-#endif
- return absl::StatusCode::kAborted;
- case ECANCELED: // Operation cancelled
- return absl::StatusCode::kCancelled;
- default:
- return absl::StatusCode::kUnknown;
- }
-}
-
-// POSIX `strerror_r()` returns `int`.
-ABSL_ATTRIBUTE_UNUSED std::string StrErrorResult(int result, const char* buffer,
- int error_code) {
- if (ABSL_PREDICT_FALSE(result != 0)) {
- return absl::StrCat("Unknown error ", error_code);
- }
- return buffer;
-}
-
-// GNU `strerror_r()` returns `char*`.
-ABSL_ATTRIBUTE_UNUSED std::string StrErrorResult(char* result,
- const char* buffer,
- int error_code) {
- return result;
-}
-
-std::string StrError(int error_code) {
- char message[256];
- return StrErrorResult(strerror_r(error_code, message, sizeof(message)),
- message, error_code);
-}
-
-} // namespace
-
-absl::Status ErrnoToCanonicalStatus(int error_number,
- absl::string_view message) {
- return absl::Status(ErrnoToCode(error_number),
- absl::StrCat(message, ": ", StrError(error_number)));
-}
-
-} // namespace csrblocksparse
diff --git a/spaces/oliver2023/chatgpt-on-wechat/plugins/tool/README.md b/spaces/oliver2023/chatgpt-on-wechat/plugins/tool/README.md
deleted file mode 100644
index 311ea69d9c47eec73ee37a5d53cdeca1ca0bd850..0000000000000000000000000000000000000000
--- a/spaces/oliver2023/chatgpt-on-wechat/plugins/tool/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-## 插件描述
-一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力
-使用该插件需在触发机器人回复条件时,在对话内容前加$tool
-### 本插件所有工具同步存放至专用仓库:[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)
-
-
-## 使用说明
-使用该插件后将默认使用4个工具, 无需额外配置长期生效:
-### 1. python
-###### python解释器,使用它来解释执行python指令,可以配合你想要chatgpt生成的代码输出结果或执行事务
-
-### 2. requests
-###### 往往用来获取某个网站具体内容,结果可能会被反爬策略影响
-
-### 3. terminal
-###### 在你运行的电脑里执行shell命令,可以配合你想要chatgpt生成的代码使用,给予自然语言控制手段
-
-### 4. meteo-weather
-###### 回答你有关天气的询问, 需要获取时间、地点上下文信息,本工具使用了[meteo open api](https://open-meteo.com/)
-注:该工具需提供时间,地点信息,获取的数据不保证准确性
-
-## 使用本插件对话(prompt)技巧
-### 1. 有指引的询问
-#### 例如:
-- 总结这个链接的内容 https://github.com/goldfishh/chatgpt-tool-hub
-- 使用Terminal执行curl cip.cc
-- 使用python查询今天日期
-
-### 2. 使用搜索引擎工具
-- 如果有搜索工具就能让chatgpt获取到你的未传达清楚的上下文信息,比如chatgpt不知道你的地理位置,现在时间等,所以无法查询到天气
-
-## 其他工具
-
-### 5. wikipedia
-###### 可以回答你想要知道确切的人事物
-
-### 6. news *
-###### 从全球 80,000 多个信息源中获取当前和历史新闻文章
-
-### 7. bing-search *
-###### bing搜索引擎,从此你不用再烦恼搜索要用哪些关键词
-
-### 8. wolfram-alpha *
-###### 知识搜索引擎、科学问答系统,常用于专业学科计算
-
-###### 注1:带*工具需要获取api-key才能使用,部分工具需要外网支持
-#### [申请方法](https://github.com/goldfishh/chatgpt-tool-hub/blob/master/docs/apply_optional_tool.md)
-
-## config.json 配置说明
-###### 默认工具无需配置,其它工具需手动配置,一个例子:
-```json
-{
- "tools": ["wikipedia"],
- "kwargs": {
- "top_k_results": 2,
- "no_default": false,
- "model_name": "gpt-3.5-turbo"
- }
-}
-```
-注:config.json文件非必须,未创建仍可使用本tool
-- `tools`:本插件初始化时加载的工具, 目前可选集:["wikipedia", "wolfram-alpha", "bing-search", "google-search", "news"],其中后4个工具需要申请服务api
-- `kwargs`:工具执行时的配置,一般在这里存放api-key,或环境配置
- - `no_default`: 用于配置默认加载4个工具的行为,如果为true则仅使用tools列表工具,不加载默认工具
- - `top_k_results`: 控制所有有关搜索的工具返回条目数,数字越高则参考信息越多,但无用信息可能干扰判断,该值一般为2
- - `model_name`: 用于控制tool插件底层使用的llm模型,目前暂未测试3.5以外的模型,一般保持默认
-
-
-## 备注
-- 强烈建议申请搜索工具搭配使用,推荐bing-search
-- 虽然我会有意加入一些限制,但请不要使用本插件做危害他人的事情,请提前了解清楚某些内容是否会违反相关规定,建议提前做好过滤
-- 未来一段时间我会实现一些有意思的工具,比如stable diffusion 中文prompt翻译、cv方向的模型推理,欢迎有想法的朋友关注,一起扩展这个项目
diff --git a/spaces/phenomenon1981/DreamlikeArt-PhotoReal-2.0/app.py b/spaces/phenomenon1981/DreamlikeArt-PhotoReal-2.0/app.py
deleted file mode 100644
index 463262c19b3b71be0d8eb107d15e42fe11698c16..0000000000000000000000000000000000000000
--- a/spaces/phenomenon1981/DreamlikeArt-PhotoReal-2.0/app.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import gradio as gr
-import os
-import sys
-from pathlib import Path
-import random
-import string
-import time
-from queue import Queue
-from threading import Thread
-import emoji
-
-
-text_gen=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
-def get_prompts(prompt_text):
- if prompt_text:
- return text_gen("photo, " + prompt_text)
- else:
- return text_gen("")
-proc1=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-2.0")
-
-def restart_script_periodically():
- while True:
- random_time = random.randint(540, 600)
- time.sleep(random_time)
- os.execl(sys.executable, sys.executable, *sys.argv)
-
-
-restart_thread = Thread(target=restart_script_periodically, daemon=True)
-restart_thread.start()
-
-
-queue = Queue()
-queue_threshold = 100
-
-def add_random_noise(prompt, noise_level=0.00):
- if noise_level == 0:
- noise_level = 0.00
- percentage_noise = noise_level * 5
- num_noise_chars = int(len(prompt) * (percentage_noise/100))
- noise_indices = random.sample(range(len(prompt)), num_noise_chars)
- prompt_list = list(prompt)
- noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
- noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈'])
- for index in noise_indices:
- prompt_list[index] = random.choice(noise_chars)
- return "".join(prompt_list)
-
-
-def send_it1(inputs, noise_level, proc1=proc1):
- prompt_with_noise = add_random_noise(inputs, noise_level)
- while queue.qsize() >= queue_threshold:
- time.sleep(2)
- queue.put(prompt_with_noise)
- output1 = proc1(prompt_with_noise)
- return output1
-
-def send_it2(inputs, noise_level, proc1=proc1):
- prompt_with_noise = add_random_noise(inputs, noise_level)
- while queue.qsize() >= queue_threshold:
- time.sleep(2)
- queue.put(prompt_with_noise)
- output2 = proc1(prompt_with_noise)
- return output2
-
-#def send_it3(inputs, noise_level, proc1=proc1):
- #prompt_with_noise = add_random_noise(inputs, noise_level)
- #while queue.qsize() >= queue_threshold:
- #time.sleep(2)
- #queue.put(prompt_with_noise)
- #output3 = proc1(prompt_with_noise)
- #return output3
-
-#def send_it4(inputs, noise_level, proc1=proc1):
- #prompt_with_noise = add_random_noise(inputs, noise_level)
- #while queue.qsize() >= queue_threshold:
- #time.sleep(2)
- #queue.put(prompt_with_noise)
- #output4 = proc1(prompt_with_noise)
- #return output4
-
-
-
-with gr.Blocks(css='style.css') as demo:
- gr.HTML(
- """
-
-
-
- Dreamlike Photoreal 2.0
-
-
-
- Noise Level: Controls how much randomness is added to the input before it is sent to the model. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs,
- created by Phenomenon1981.
-
-
- ❤️ Press the Like Button if you enjoy my space! ❤️
-
-
- """
- )
- with gr.Column(elem_id="col-container"):
- with gr.Row(variant="compact"):
- input_text = gr.Textbox(
- label="Short Prompt",
- show_label=False,
- max_lines=2,
- placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!",
- ).style(
- container=False,
- )
- see_prompts = gr.Button("✨ Magic Prompt ✨").style(full_width=False)
-
-
- with gr.Row(variant="compact"):
- prompt = gr.Textbox(
- label="Enter your prompt",
- show_label=False,
- max_lines=2,
- placeholder="Full Prompt",
- ).style(
- container=False,
- )
- run = gr.Button("Generate Images").style(full_width=False)
-
- with gr.Row():
- with gr.Row():
- noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level")
- with gr.Row():
- with gr.Row():
- output1=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False)
- output2=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False)
-
- #with gr.Row():
- #output1=gr.Image()
-
- see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
- run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
- run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
-
-
-
- with gr.Row():
- gr.HTML(
- """
-
-
- Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin!
-
-
- """
-)
-
- demo.launch(enable_queue=True, inline=True)
- block.queue(concurrency_count=100)
\ No newline at end of file
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Scripts/activate.ps1 b/spaces/pknez/face-swap-docker/mynewshinyroop/Scripts/activate.ps1
deleted file mode 100644
index bf4b0b7c1f746721fcdb55bb756788b46a7b4001..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Scripts/activate.ps1
+++ /dev/null
@@ -1,61 +0,0 @@
-$script:THIS_PATH = $myinvocation.mycommand.path
-$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent
-
-function global:deactivate([switch] $NonDestructive) {
- if (Test-Path variable:_OLD_VIRTUAL_PATH) {
- $env:PATH = $variable:_OLD_VIRTUAL_PATH
- Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global
- }
-
- if (Test-Path function:_old_virtual_prompt) {
- $function:prompt = $function:_old_virtual_prompt
- Remove-Item function:\_old_virtual_prompt
- }
-
- if ($env:VIRTUAL_ENV) {
- Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue
- }
-
- if ($env:VIRTUAL_ENV_PROMPT) {
- Remove-Item env:VIRTUAL_ENV_PROMPT -ErrorAction SilentlyContinue
- }
-
- if (!$NonDestructive) {
- # Self destruct!
- Remove-Item function:deactivate
- Remove-Item function:pydoc
- }
-}
-
-function global:pydoc {
- python -m pydoc $args
-}
-
-# unset irrelevant variables
-deactivate -nondestructive
-
-$VIRTUAL_ENV = $BASE_DIR
-$env:VIRTUAL_ENV = $VIRTUAL_ENV
-
-if ("" -ne "") {
- $env:VIRTUAL_ENV_PROMPT = ""
-}
-else {
- $env:VIRTUAL_ENV_PROMPT = $( Split-Path $env:VIRTUAL_ENV -Leaf )
-}
-
-New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH
-
-$env:PATH = "$env:VIRTUAL_ENV/Scripts;" + $env:PATH
-if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) {
- function global:_old_virtual_prompt {
- ""
- }
- $function:_old_virtual_prompt = $function:prompt
-
- function global:prompt {
- # Add the custom prefix to the existing prompt
- $previous_prompt_value = & $function:_old_virtual_prompt
- ("(" + $env:VIRTUAL_ENV_PROMPT + ") " + $previous_prompt_value)
- }
-}
diff --git a/spaces/pmuvval1/ChemistryMoleculeModelerTest/README.md b/spaces/pmuvval1/ChemistryMoleculeModelerTest/README.md
deleted file mode 100644
index 56ee38e33cee042e8ba868e7be639d2ad84cc57d..0000000000000000000000000000000000000000
--- a/spaces/pmuvval1/ChemistryMoleculeModelerTest/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ChemistryMoleculeModelerTest
-emoji: 📊
-colorFrom: red
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/prerna9811/Chord/portaudio/test/patest_buffer.c b/spaces/prerna9811/Chord/portaudio/test/patest_buffer.c
deleted file mode 100644
index d19e121926a03207c305bcc5199a7de5cbac51b0..0000000000000000000000000000000000000000
--- a/spaces/prerna9811/Chord/portaudio/test/patest_buffer.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/** @file patest_buffer.c
- @ingroup test_src
- @brief Test opening streams with different buffer sizes.
- @author Phil Burk http://www.softsynth.com
-*/
-/*
- * $Id$
- *
- * This program uses the PortAudio Portable Audio Library.
- * For more information see: http://www.portaudio.com
- * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-#include
-#include
-#include
-#include "portaudio.h"
-#define NUM_SECONDS (3)
-#define SAMPLE_RATE (44100)
-#ifndef M_PI
-#define M_PI (3.14159265)
-#endif
-#define TABLE_SIZE (200)
-
-#define BUFFER_TABLE 14
-long buffer_table[] = {paFramesPerBufferUnspecified,16,32,64,128,200,256,500,512,600,723,1000,1024,2345};
-
-typedef struct
-{
- short sine[TABLE_SIZE];
- int left_phase;
- int right_phase;
- unsigned int sampsToGo;
-}
-paTestData;
-PaError TestOnce( int buffersize, PaDeviceIndex );
-
-/* This routine will be called by the PortAudio engine when audio is needed.
-** It may called at interrupt level on some machines so don't do anything
-** that could mess up the system like calling malloc() or free().
-*/
-static int patest1Callback( const void *inputBuffer, void *outputBuffer,
- unsigned long framesPerBuffer,
- const PaStreamCallbackTimeInfo* timeInfo,
- PaStreamCallbackFlags statusFlags,
- void *userData )
-{
- paTestData *data = (paTestData*)userData;
- short *out = (short*)outputBuffer;
- unsigned int i;
- int finished = 0;
- (void) inputBuffer; /* Prevent "unused variable" warnings. */
-
- if( data->sampsToGo < framesPerBuffer )
- {
- /* final buffer... */
-
- for( i=0; isampsToGo; i++ )
- {
- *out++ = data->sine[data->left_phase]; /* left */
- *out++ = data->sine[data->right_phase]; /* right */
- data->left_phase += 1;
- if( data->left_phase >= TABLE_SIZE ) data->left_phase -= TABLE_SIZE;
- data->right_phase += 3; /* higher pitch so we can distinguish left and right. */
- if( data->right_phase >= TABLE_SIZE ) data->right_phase -= TABLE_SIZE;
- }
- /* zero remainder of final buffer */
- for( ; isine[data->left_phase]; /* left */
- *out++ = data->sine[data->right_phase]; /* right */
- data->left_phase += 1;
- if( data->left_phase >= TABLE_SIZE ) data->left_phase -= TABLE_SIZE;
- data->right_phase += 3; /* higher pitch so we can distinguish left and right. */
- if( data->right_phase >= TABLE_SIZE ) data->right_phase -= TABLE_SIZE;
- }
- data->sampsToGo -= framesPerBuffer;
- }
- return finished;
-}
-
-/*******************************************************************/
-int main(int argc, char **args);
-int main(int argc, char **args)
-{
- int i;
- int device = -1;
- PaError err;
- printf("Test opening streams with different buffer sizes\n");
- if( argc > 1 ) {
- device=atoi( args[1] );
- printf("Using device number %d.\n\n", device );
- } else {
- printf("Using default device.\n\n" );
- }
-
- for (i = 0 ; i < BUFFER_TABLE; i++)
- {
- printf("Buffer size %ld\n", buffer_table[i]);
- err = TestOnce(buffer_table[i], device);
- if( err < 0 ) return 0;
-
- }
- return 0;
-}
-
-
-PaError TestOnce( int buffersize, PaDeviceIndex device )
-{
- PaStreamParameters outputParameters;
- PaStream *stream;
- PaError err;
- paTestData data;
- int i;
- int totalSamps;
- /* initialise sinusoidal wavetable */
- for( i=0; idefaultLowOutputLatency;
- outputParameters.hostApiSpecificStreamInfo = NULL;
- err = Pa_OpenStream(
- &stream,
- NULL, /* no input */
- &outputParameters,
- SAMPLE_RATE,
- buffersize, /* frames per buffer */
- (paClipOff | paDitherOff),
- patest1Callback,
- &data );
- if( err != paNoError ) goto error;
-
- err = Pa_StartStream( stream );
- if( err != paNoError ) goto error;
- printf("Waiting for sound to finish.\n");
- Pa_Sleep(1000*NUM_SECONDS);
- err = Pa_CloseStream( stream );
- if( err != paNoError ) goto error;
- Pa_Terminate();
- return paNoError;
-error:
- Pa_Terminate();
- fprintf( stderr, "An error occurred while using the portaudio stream\n" );
- fprintf( stderr, "Error number: %d\n", err );
- fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
- fprintf( stderr, "Host Error message: %s\n", Pa_GetLastHostErrorInfo()->errorText );
- return err;
-}
diff --git a/spaces/prerna9811/Chord/portaudio/test/patest_prime.c b/spaces/prerna9811/Chord/portaudio/test/patest_prime.c
deleted file mode 100644
index e94331049009f6179b0f4656f79ecaf4a832ac1b..0000000000000000000000000000000000000000
--- a/spaces/prerna9811/Chord/portaudio/test/patest_prime.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/** @file patest_prime.c
- @ingroup test_src
- @brief Test stream priming mode.
- @author Ross Bencina http://www.audiomulch.com/~rossb
-*/
-
-/*
- * $Id$
- *
- * This program uses the PortAudio Portable Audio Library.
- * For more information see: http://www.portaudio.com
- * Copyright (c) 1999-2000 Ross Bencina and Phil Burk
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-#include
-#include
-#include "portaudio.h"
-#include "pa_util.h"
-
-#define NUM_BEEPS (3)
-#define SAMPLE_RATE (44100)
-#define SAMPLE_PERIOD (1.0/44100.0)
-#define FRAMES_PER_BUFFER (256)
-#define BEEP_DURATION (400)
-#define IDLE_DURATION (SAMPLE_RATE*2) /* 2 seconds */
-#define SLEEP_MSEC (50)
-
-#define STATE_BKG_IDLE (0)
-#define STATE_BKG_BEEPING (1)
-
-typedef struct
-{
- float leftPhase;
- float rightPhase;
- int state;
- int beepCountdown;
- int idleCountdown;
-}
-paTestData;
-
-static void InitializeTestData( paTestData *testData )
-{
- testData->leftPhase = 0;
- testData->rightPhase = 0;
- testData->state = STATE_BKG_BEEPING;
- testData->beepCountdown = BEEP_DURATION;
- testData->idleCountdown = IDLE_DURATION;
-}
-
-/* This routine will be called by the PortAudio engine when audio is needed.
-** It may called at interrupt level on some machines so don't do anything
-** that could mess up the system like calling malloc() or free().
-*/
-static int patestCallback( const void *inputBuffer, void *outputBuffer,
- unsigned long framesPerBuffer,
- const PaStreamCallbackTimeInfo *timeInfo,
- PaStreamCallbackFlags statusFlags, void *userData )
-{
- /* Cast data passed through stream to our structure. */
- paTestData *data = (paTestData*)userData;
- float *out = (float*)outputBuffer;
- unsigned int i;
- int result = paContinue;
-
- /* suppress unused parameter warnings */
- (void) inputBuffer;
- (void) timeInfo;
- (void) statusFlags;
-
- for( i=0; istate )
- {
- case STATE_BKG_IDLE:
- *out++ = 0.0; /* left */
- *out++ = 0.0; /* right */
- --data->idleCountdown;
-
- if( data->idleCountdown <= 0 ) result = paComplete;
- break;
-
- case STATE_BKG_BEEPING:
- if( data->beepCountdown <= 0 )
- {
- data->state = STATE_BKG_IDLE;
- *out++ = 0.0; /* left */
- *out++ = 0.0; /* right */
- }
- else
- {
- /* Play sawtooth wave. */
- *out++ = data->leftPhase; /* left */
- *out++ = data->rightPhase; /* right */
- /* Generate simple sawtooth phaser that ranges between -1.0 and 1.0. */
- data->leftPhase += 0.01f;
- /* When signal reaches top, drop back down. */
- if( data->leftPhase >= 1.0f ) data->leftPhase -= 2.0f;
- /* higher pitch so we can distinguish left and right. */
- data->rightPhase += 0.03f;
- if( data->rightPhase >= 1.0f ) data->rightPhase -= 2.0f;
- }
- --data->beepCountdown;
- break;
- }
- }
-
- return result;
-}
-
-/*******************************************************************/
-static PaError DoTest( int flags )
-{
- PaStream *stream;
- PaError err = paNoError;
- paTestData data;
- PaStreamParameters outputParameters;
-
- InitializeTestData( &data );
-
- outputParameters.device = Pa_GetDefaultOutputDevice();
- if (outputParameters.device == paNoDevice) {
- fprintf(stderr,"Error: No default output device.\n");
- goto error;
- }
- outputParameters.channelCount = 2;
- outputParameters.hostApiSpecificStreamInfo = NULL;
- outputParameters.sampleFormat = paFloat32;
- outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultHighOutputLatency;
-
- /* Open an audio I/O stream. */
- err = Pa_OpenStream(
- &stream,
- NULL, /* no input */
- &outputParameters,
- SAMPLE_RATE,
- FRAMES_PER_BUFFER, /* frames per buffer */
- paClipOff | flags, /* we won't output out of range samples so don't bother clipping them */
- patestCallback,
- &data );
- if( err != paNoError ) goto error;
-
-
- err = Pa_StartStream( stream );
- if( err != paNoError ) goto error;
-
- printf("hear \"BEEP\"\n" );
- fflush(stdout);
-
- while( ( err = Pa_IsStreamActive( stream ) ) == 1 ) Pa_Sleep(SLEEP_MSEC);
- if( err < 0 ) goto error;
-
- err = Pa_StopStream( stream );
- if( err != paNoError ) goto error;
-
- err = Pa_CloseStream( stream );
- if( err != paNoError ) goto error;
-
- return err;
-error:
- return err;
-}
-
-/*******************************************************************/
-int main(void);
-int main(void)
-{
- PaError err = paNoError;
- int i;
-
- /* Initialize library before making any other calls. */
- err = Pa_Initialize();
- if( err != paNoError ) goto error;
-
- printf("PortAudio Test: Testing stream playback with no priming.\n");
- printf("PortAudio Test: you should see BEEP before you hear it.\n");
- printf("BEEP %d times.\n", NUM_BEEPS );
-
- for( i=0; i< NUM_BEEPS; ++i )
- {
- err = DoTest( 0 );
- if( err != paNoError )
- goto error;
- }
-
- printf("PortAudio Test: Testing stream playback with priming.\n");
- printf("PortAudio Test: you should see BEEP around the same time you hear it.\n");
- for( i=0; i< NUM_BEEPS; ++i )
- {
- err = DoTest( paPrimeOutputBuffersUsingStreamCallback );
- if( err != paNoError )
- goto error;
- }
-
- printf("Test finished.\n");
-
- Pa_Terminate();
- return err;
-error:
- Pa_Terminate();
- fprintf( stderr, "An error occurred while using the portaudio stream\n" );
- fprintf( stderr, "Error number: %d\n", err );
- fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
- return err;
-}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_V_A_R_.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_V_A_R_.py
deleted file mode 100644
index 8371795eb2f2d2c233ec1725b8a2c21453170f23..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_V_A_R_.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .otBase import BaseTTXConverter
-
-
-class table_M_V_A_R_(BaseTTXConverter):
- pass
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/annotated_image.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/annotated_image.py
deleted file mode 100644
index b1a050c4bb7696175c49de5784976a764e3c04ba..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/annotated_image.py
+++ /dev/null
@@ -1,195 +0,0 @@
-"""gr.AnnotatedImage() component."""
-
-from __future__ import annotations
-
-from typing import Any, List
-
-import numpy as np
-from gradio_client.documentation import document, set_documentation_group
-from PIL import Image as _Image # using _ to minimize namespace pollution
-
-from gradio import processing_utils, utils
-from gradio.components.base import Component
-from gradio.data_classes import FileData, GradioModel
-from gradio.events import Events
-
-set_documentation_group("component")
-
-_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843
-
-
-class Annotation(GradioModel):
- image: FileData
- label: str
-
-
-class AnnotatedImageData(GradioModel):
- image: FileData
- annotations: List[Annotation]
-
-
-@document()
-class AnnotatedImage(Component):
- """
- Displays a base image and colored subsections on top of that image. Subsections can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation).
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]]} consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection.
-
- Demos: image_segmentation
- """
-
- EVENTS = [Events.select]
-
- data_model = AnnotatedImageData
-
- def __init__(
- self,
- value: tuple[
- np.ndarray | _Image.Image | str,
- list[tuple[np.ndarray | tuple[int, int, int, int], str]],
- ]
- | None = None,
- *,
- show_legend: bool = True,
- height: int | None = None,
- width: int | None = None,
- color_map: dict[str, str] | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- render: bool = True,
- ):
- """
- Parameters:
- value: Tuple of base image and list of (subsection, label) pairs.
- show_legend: If True, will show a legend of the subsections.
- height: Height of the displayed image.
- width: Width of the displayed image.
- color_map: A dictionary mapping labels to colors. The colors must be specified as hex codes.
- label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
- """
- self.show_legend = show_legend
- self.height = height
- self.width = width
- self.color_map = color_map
- super().__init__(
- label=label,
- every=every,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- render=render,
- value=value,
- )
-
- def postprocess(
- self,
- value: tuple[
- np.ndarray | _Image.Image | str,
- list[tuple[np.ndarray | tuple[int, int, int, int], str]],
- ]
- | None,
- ) -> AnnotatedImageData | None:
- """
- Parameters:
- value: Tuple of base image and list of subsections, with each subsection a two-part tuple where the first element is a 4 element bounding box or a 0-1 confidence mask, and the second element is the label.
- Returns:
- Tuple of base image file and list of subsections, with each subsection a two-part tuple where the first element image path of the mask, and the second element is the label.
- """
- if value is None:
- return None
- base_img = value[0]
- if isinstance(base_img, str):
- base_img_path = base_img
- base_img = np.array(_Image.open(base_img))
- elif isinstance(base_img, np.ndarray):
- base_file = processing_utils.save_img_array_to_cache(
- base_img, cache_dir=self.GRADIO_CACHE
- )
- base_img_path = str(utils.abspath(base_file))
- elif isinstance(base_img, _Image.Image):
- base_file = processing_utils.save_pil_to_cache(
- base_img, cache_dir=self.GRADIO_CACHE
- )
- base_img_path = str(utils.abspath(base_file))
- base_img = np.array(base_img)
- else:
- raise ValueError(
- "AnnotatedImage only accepts filepaths, PIL images or numpy arrays for the base image."
- )
-
- sections = []
- color_map = self.color_map or {}
-
- def hex_to_rgb(value):
- value = value.lstrip("#")
- lv = len(value)
- return [int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3)]
-
- for mask, label in value[1]:
- mask_array = np.zeros((base_img.shape[0], base_img.shape[1]))
- if isinstance(mask, np.ndarray):
- mask_array = mask
- else:
- x1, y1, x2, y2 = mask
- border_width = 3
- mask_array[y1:y2, x1:x2] = 0.5
- mask_array[y1:y2, x1 : x1 + border_width] = 1
- mask_array[y1:y2, x2 - border_width : x2] = 1
- mask_array[y1 : y1 + border_width, x1:x2] = 1
- mask_array[y2 - border_width : y2, x1:x2] = 1
-
- if label in color_map:
- rgb_color = hex_to_rgb(color_map[label])
- else:
- rgb_color = [255, 0, 0]
- colored_mask = np.zeros((base_img.shape[0], base_img.shape[1], 4))
- solid_mask = np.copy(mask_array)
- solid_mask[solid_mask > 0] = 1
-
- colored_mask[:, :, 0] = rgb_color[0] * solid_mask
- colored_mask[:, :, 1] = rgb_color[1] * solid_mask
- colored_mask[:, :, 2] = rgb_color[2] * solid_mask
- colored_mask[:, :, 3] = mask_array * 255
-
- colored_mask_img = _Image.fromarray((colored_mask).astype(np.uint8))
-
- mask_file = processing_utils.save_pil_to_cache(
- colored_mask_img, cache_dir=self.GRADIO_CACHE
- )
- mask_file_path = str(utils.abspath(mask_file))
- sections.append(
- Annotation(image=FileData(path=mask_file_path), label=label)
- )
-
- return AnnotatedImageData(
- image=FileData(path=base_img_path),
- annotations=sections,
- )
-
- def example_inputs(self) -> Any:
- return {}
-
- def preprocess(
- self, payload: AnnotatedImageData | None
- ) -> AnnotatedImageData | None:
- return payload
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_inline/newline.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_inline/newline.py
deleted file mode 100644
index ca8f1db02da07b023aa9fdb08ee7af326f773da8..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_inline/newline.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""Proceess '\n'."""
-from ..common.utils import charStrAt, isStrSpace
-from .state_inline import StateInline
-
-
-def newline(state: StateInline, silent: bool) -> bool:
- pos = state.pos
-
- if state.src[pos] != "\n":
- return False
-
- pmax = len(state.pending) - 1
- maximum = state.posMax
-
- # ' \n' -> hardbreak
- # Lookup in pending chars is bad practice! Don't copy to other rules!
- # Pending string is stored in concat mode, indexed lookups will cause
- # conversion to flat mode.
- if not silent:
- if pmax >= 0 and charStrAt(state.pending, pmax) == " ":
- if pmax >= 1 and charStrAt(state.pending, pmax - 1) == " ":
- # Find whitespaces tail of pending chars.
- ws = pmax - 1
- while ws >= 1 and charStrAt(state.pending, ws - 1) == " ":
- ws -= 1
- state.pending = state.pending[:ws]
-
- state.push("hardbreak", "br", 0)
- else:
- state.pending = state.pending[:-1]
- state.push("softbreak", "br", 0)
-
- else:
- state.push("softbreak", "br", 0)
-
- pos += 1
-
- # skip heading spaces for next line
- while pos < maximum and isStrSpace(state.src[pos]):
- pos += 1
-
- state.pos = pos
- return True
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h
deleted file mode 100644
index 1c25aa5fc4d1b2a9e0bbc71ab182744a5cf669c3..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h
+++ /dev/null
@@ -1,138 +0,0 @@
-#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
-#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
-
-#include "_numpyconfig.h"
-
-/*
- * On Mac OS X, because there is only one configuration stage for all the archs
- * in universal builds, any macro which depends on the arch needs to be
- * hardcoded.
- *
- * Note that distutils/pip will attempt a universal2 build when Python itself
- * is built as universal2, hence this hardcoding is needed even if we do not
- * support universal2 wheels anymore (see gh-22796).
- * This code block can be removed after we have dropped the setup.py based
- * build completely.
- */
-#ifdef __APPLE__
- #undef NPY_SIZEOF_LONG
- #undef NPY_SIZEOF_PY_INTPTR_T
-
- #ifdef __LP64__
- #define NPY_SIZEOF_LONG 8
- #define NPY_SIZEOF_PY_INTPTR_T 8
- #else
- #define NPY_SIZEOF_LONG 4
- #define NPY_SIZEOF_PY_INTPTR_T 4
- #endif
-
- #undef NPY_SIZEOF_LONGDOUBLE
- #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
- #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE
- #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE
- #endif
- #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
- #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
- #endif
-
- #if defined(__arm64__)
- #define NPY_SIZEOF_LONGDOUBLE 8
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
- #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1
- #elif defined(__x86_64)
- #define NPY_SIZEOF_LONGDOUBLE 16
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
- #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1
- #elif defined (__i386)
- #define NPY_SIZEOF_LONGDOUBLE 12
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
- #elif defined(__ppc__) || defined (__ppc64__)
- #define NPY_SIZEOF_LONGDOUBLE 16
- #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
- #else
- #error "unknown architecture"
- #endif
-#endif
-
-
-/**
- * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,
- * we include API version numbers for specific versions of NumPy.
- * To exclude all API that was deprecated as of 1.7, add the following before
- * #including any NumPy headers:
- * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
- * The same is true for NPY_TARGET_VERSION, although NumPy will default to
- * a backwards compatible build anyway.
- */
-#define NPY_1_7_API_VERSION 0x00000007
-#define NPY_1_8_API_VERSION 0x00000008
-#define NPY_1_9_API_VERSION 0x00000009
-#define NPY_1_10_API_VERSION 0x0000000a
-#define NPY_1_11_API_VERSION 0x0000000a
-#define NPY_1_12_API_VERSION 0x0000000a
-#define NPY_1_13_API_VERSION 0x0000000b
-#define NPY_1_14_API_VERSION 0x0000000c
-#define NPY_1_15_API_VERSION 0x0000000c
-#define NPY_1_16_API_VERSION 0x0000000d
-#define NPY_1_17_API_VERSION 0x0000000d
-#define NPY_1_18_API_VERSION 0x0000000d
-#define NPY_1_19_API_VERSION 0x0000000d
-#define NPY_1_20_API_VERSION 0x0000000e
-#define NPY_1_21_API_VERSION 0x0000000e
-#define NPY_1_22_API_VERSION 0x0000000f
-#define NPY_1_23_API_VERSION 0x00000010
-#define NPY_1_24_API_VERSION 0x00000010
-#define NPY_1_25_API_VERSION 0x00000011
-
-
-/*
- * Binary compatibility version number. This number is increased
- * whenever the C-API is changed such that binary compatibility is
- * broken, i.e. whenever a recompile of extension modules is needed.
- */
-#define NPY_VERSION NPY_ABI_VERSION
-
-/*
- * Minor API version we are compiling to be compatible with. The version
- * Number is always increased when the API changes via: `NPY_API_VERSION`
- * (and should maybe just track the NumPy version).
- *
- * If we have an internal build, we always target the current version of
- * course.
- *
- * For downstream users, we default to an older version to provide them with
- * maximum compatibility by default. Downstream can choose to extend that
- * default, or narrow it down if they wish to use newer API. If you adjust
- * this, consider the Python version support (example for 1.25.x):
- *
- * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12)
- * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9
- * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8
- * NumPy 1.15.x supports Python: ... 3.6 3.7
- *
- * Users of the stable ABI may wish to target the last Python that is not
- * end of life. This would be 3.8 at NumPy 1.25 release time.
- * 1.17 as default was the choice of oldest-support-numpy at the time and
- * has in practice no limit (comapared to 1.19). Even earlier becomes legacy.
- */
-#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
- /* NumPy internal build, always use current version. */
- #define NPY_FEATURE_VERSION NPY_API_VERSION
-#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION
- /* user provided a target version, use it */
- #define NPY_FEATURE_VERSION NPY_TARGET_VERSION
-#else
- /* Use the default (increase when dropping Python 3.9 support) */
- #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION
-#endif
-
-/* Sanity check the (requested) feature version */
-#if NPY_FEATURE_VERSION > NPY_API_VERSION
- #error "NPY_TARGET_VERSION higher than NumPy headers!"
-#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION
- /* No support for irrelevant old targets, no need for error, but warn. */
- #warning "Requested NumPy target lower than supported NumPy 1.15."
-#endif
-
-
-#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90
deleted file mode 100644
index b301710f5dda005e67e40cc21a5e0d62d0ec116a..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90
+++ /dev/null
@@ -1,34 +0,0 @@
-
-subroutine sum(x, res)
- implicit none
- real, intent(in) :: x(:)
- real, intent(out) :: res
-
- integer :: i
-
- !print *, "sum: size(x) = ", size(x)
-
- res = 0.0
-
- do i = 1, size(x)
- res = res + x(i)
- enddo
-
-end subroutine sum
-
-function fsum(x) result (res)
- implicit none
- real, intent(in) :: x(:)
- real :: res
-
- integer :: i
-
- !print *, "fsum: size(x) = ", size(x)
-
- res = 0.0
-
- do i = 1, size(x)
- res = res + x(i)
- enddo
-
-end function fsum
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90
deleted file mode 100644
index d3d15cfb20a15004ed86e45dc91792d1c089033a..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-subroutine selectedrealkind(p, r, res)
- implicit none
-
- integer, intent(in) :: p, r
- !f2py integer :: r=0
- integer, intent(out) :: res
- res = selected_real_kind(p, r)
-
-end subroutine
-
-subroutine selectedintkind(p, res)
- implicit none
-
- integer, intent(in) :: p
- integer, intent(out) :: res
- res = selected_int_kind(p)
-
-end subroutine
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/recfunctions.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/recfunctions.py
deleted file mode 100644
index 83ae413c6032bceec05c7e4dce17e16113f7625c..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/recfunctions.py
+++ /dev/null
@@ -1,1673 +0,0 @@
-"""
-Collection of utilities to manipulate structured arrays.
-
-Most of these functions were initially implemented by John Hunter for
-matplotlib. They have been rewritten and extended for convenience.
-
-"""
-import itertools
-import numpy as np
-import numpy.ma as ma
-from numpy import ndarray, recarray
-from numpy.ma import MaskedArray
-from numpy.ma.mrecords import MaskedRecords
-from numpy.core.overrides import array_function_dispatch
-from numpy.lib._iotools import _is_string_like
-
-_check_fill_value = np.ma.core._check_fill_value
-
-
-__all__ = [
- 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
- 'drop_fields', 'find_duplicates', 'flatten_descr',
- 'get_fieldstructure', 'get_names', 'get_names_flat',
- 'join_by', 'merge_arrays', 'rec_append_fields',
- 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
- 'rename_fields', 'repack_fields', 'require_fields',
- 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
- ]
-
-
-def _recursive_fill_fields_dispatcher(input, output):
- return (input, output)
-
-
-@array_function_dispatch(_recursive_fill_fields_dispatcher)
-def recursive_fill_fields(input, output):
- """
- Fills fields from output with fields from input,
- with support for nested structures.
-
- Parameters
- ----------
- input : ndarray
- Input array.
- output : ndarray
- Output array.
-
- Notes
- -----
- * `output` should be at least the same size as `input`
-
- Examples
- --------
- >>> from numpy.lib import recfunctions as rfn
- >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
- >>> b = np.zeros((3,), dtype=a.dtype)
- >>> rfn.recursive_fill_fields(a, b)
- array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
- >>> dt.descr
- [(('a', 'A'), '>> _get_fieldspec(dt)
- [(('a', 'A'), dtype('int64')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn
- >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
- ('A',)
- >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
- ('A', 'B')
- >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
- >>> rfn.get_names(adtype)
- ('a', ('b', ('ba', 'bb')))
- """
- listnames = []
- names = adtype.names
- for name in names:
- current = adtype[name]
- if current.names is not None:
- listnames.append((name, tuple(get_names(current))))
- else:
- listnames.append(name)
- return tuple(listnames)
-
-
-def get_names_flat(adtype):
- """
- Returns the field names of the input datatype as a tuple. Input datatype
- must have fields otherwise error is raised.
- Nested structure are flattened beforehand.
-
- Parameters
- ----------
- adtype : dtype
- Input datatype
-
- Examples
- --------
- >>> from numpy.lib import recfunctions as rfn
- >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
- False
- >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
- ('A', 'B')
- >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
- >>> rfn.get_names_flat(adtype)
- ('a', 'b', 'ba', 'bb')
- """
- listnames = []
- names = adtype.names
- for name in names:
- listnames.append(name)
- current = adtype[name]
- if current.names is not None:
- listnames.extend(get_names_flat(current))
- return tuple(listnames)
-
-
-def flatten_descr(ndtype):
- """
- Flatten a structured data-type description.
-
- Examples
- --------
- >>> from numpy.lib import recfunctions as rfn
- >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype)
- (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
-
- """
- names = ndtype.names
- if names is None:
- return (('', ndtype),)
- else:
- descr = []
- for field in names:
- (typ, _) = ndtype.fields[field]
- if typ.names is not None:
- descr.extend(flatten_descr(typ))
- else:
- descr.append((field, typ))
- return tuple(descr)
-
-
-def _zip_dtype(seqarrays, flatten=False):
- newdtype = []
- if flatten:
- for a in seqarrays:
- newdtype.extend(flatten_descr(a.dtype))
- else:
- for a in seqarrays:
- current = a.dtype
- if current.names is not None and len(current.names) == 1:
- # special case - dtypes of 1 field are flattened
- newdtype.extend(_get_fieldspec(current))
- else:
- newdtype.append(('', current))
- return np.dtype(newdtype)
-
-
-def _zip_descr(seqarrays, flatten=False):
- """
- Combine the dtype description of a series of arrays.
-
- Parameters
- ----------
- seqarrays : sequence of arrays
- Sequence of arrays
- flatten : {boolean}, optional
- Whether to collapse nested descriptions.
- """
- return _zip_dtype(seqarrays, flatten=flatten).descr
-
-
-def get_fieldstructure(adtype, lastname=None, parents=None,):
- """
- Returns a dictionary with fields indexing lists of their parent fields.
-
- This function is used to simplify access to fields nested in other fields.
-
- Parameters
- ----------
- adtype : np.dtype
- Input datatype
- lastname : optional
- Last processed field name (used internally during recursion).
- parents : dictionary
- Dictionary of parent fields (used interbally during recursion).
-
- Examples
- --------
- >>> from numpy.lib import recfunctions as rfn
- >>> ndtype = np.dtype([('A', int),
- ... ('B', [('BA', int),
- ... ('BB', [('BBA', int), ('BBB', int)])])])
- >>> rfn.get_fieldstructure(ndtype)
- ... # XXX: possible regression, order of BBA and BBB is swapped
- {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
-
- """
- if parents is None:
- parents = {}
- names = adtype.names
- for name in names:
- current = adtype[name]
- if current.names is not None:
- if lastname:
- parents[name] = [lastname, ]
- else:
- parents[name] = []
- parents.update(get_fieldstructure(current, name, parents))
- else:
- lastparent = [_ for _ in (parents.get(lastname, []) or [])]
- if lastparent:
- lastparent.append(lastname)
- elif lastname:
- lastparent = [lastname, ]
- parents[name] = lastparent or []
- return parents
-
-
-def _izip_fields_flat(iterable):
- """
- Returns an iterator of concatenated fields from a sequence of arrays,
- collapsing any nested structure.
-
- """
- for element in iterable:
- if isinstance(element, np.void):
- yield from _izip_fields_flat(tuple(element))
- else:
- yield element
-
-
-def _izip_fields(iterable):
- """
- Returns an iterator of concatenated fields from a sequence of arrays.
-
- """
- for element in iterable:
- if (hasattr(element, '__iter__') and
- not isinstance(element, str)):
- yield from _izip_fields(element)
- elif isinstance(element, np.void) and len(tuple(element)) == 1:
- # this statement is the same from the previous expression
- yield from _izip_fields(element)
- else:
- yield element
-
-
-def _izip_records(seqarrays, fill_value=None, flatten=True):
- """
- Returns an iterator of concatenated items from a sequence of arrays.
-
- Parameters
- ----------
- seqarrays : sequence of arrays
- Sequence of arrays.
- fill_value : {None, integer}
- Value used to pad shorter iterables.
- flatten : {True, False},
- Whether to
- """
-
- # Should we flatten the items, or just use a nested approach
- if flatten:
- zipfunc = _izip_fields_flat
- else:
- zipfunc = _izip_fields
-
- for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
- yield tuple(zipfunc(tup))
-
-
-def _fix_output(output, usemask=True, asrecarray=False):
- """
- Private function: return a recarray, a ndarray, a MaskedArray
- or a MaskedRecords depending on the input parameters
- """
- if not isinstance(output, MaskedArray):
- usemask = False
- if usemask:
- if asrecarray:
- output = output.view(MaskedRecords)
- else:
- output = ma.filled(output)
- if asrecarray:
- output = output.view(recarray)
- return output
-
-
-def _fix_defaults(output, defaults=None):
- """
- Update the fill_value and masked data of `output`
- from the default given in a dictionary defaults.
- """
- names = output.dtype.names
- (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
- for (k, v) in (defaults or {}).items():
- if k in names:
- fill_value[k] = v
- data[k][mask[k]] = v
- return output
-
-
-def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
- usemask=None, asrecarray=None):
- return seqarrays
-
-
-@array_function_dispatch(_merge_arrays_dispatcher)
-def merge_arrays(seqarrays, fill_value=-1, flatten=False,
- usemask=False, asrecarray=False):
- """
- Merge arrays field by field.
-
- Parameters
- ----------
- seqarrays : sequence of ndarrays
- Sequence of arrays
- fill_value : {float}, optional
- Filling value used to pad missing data on the shorter arrays.
- flatten : {False, True}, optional
- Whether to collapse nested fields.
- usemask : {False, True}, optional
- Whether to return a masked array or not.
- asrecarray : {False, True}, optional
- Whether to return a recarray (MaskedRecords) or not.
-
- Examples
- --------
- >>> from numpy.lib import recfunctions as rfn
- >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
- array([( 1, 10.), ( 2, 20.), (-1, 30.)],
- dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
- ... np.array([10., 20., 30.])), usemask=False)
- array([(1, 10.0), (2, 20.0), (-1, 30.0)],
- dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
- ... np.array([10., 20., 30.])),
- ... usemask=False, asrecarray=True)
- rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
- dtype=[('a', '>> from numpy.lib import recfunctions as rfn
- >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
- ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
- >>> rfn.drop_fields(a, 'a')
- array([((2., 3),), ((5., 6),)],
- dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba')
- array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb'])
- array([(1,), (4,)], dtype=[('a', '>> from numpy.lib import recfunctions as rfn
- >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
- ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
- >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
- array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
- dtype=[('A', ' 1:
- data = merge_arrays(data, flatten=True, usemask=usemask,
- fill_value=fill_value)
- else:
- data = data.pop()
- #
- output = ma.masked_all(
- max(len(base), len(data)),
- dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
- output = recursive_fill_fields(base, output)
- output = recursive_fill_fields(data, output)
- #
- return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
-
-
-def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
- yield base
- yield from data
-
-
-@array_function_dispatch(_rec_append_fields_dispatcher)
-def rec_append_fields(base, names, data, dtypes=None):
- """
- Add new fields to an existing array.
-
- The names of the fields are given with the `names` arguments,
- the corresponding values with the `data` arguments.
- If a single field is appended, `names`, `data` and `dtypes` do not have
- to be lists but just values.
-
- Parameters
- ----------
- base : array
- Input array to extend.
- names : string, sequence
- String or sequence of strings corresponding to the names
- of the new fields.
- data : array or sequence of arrays
- Array or sequence of arrays storing the fields to add to the base.
- dtypes : sequence of datatypes, optional
- Datatype or sequence of datatypes.
- If None, the datatypes are estimated from the `data`.
-
- See Also
- --------
- append_fields
-
- Returns
- -------
- appended_array : np.recarray
- """
- return append_fields(base, names, data=data, dtypes=dtypes,
- asrecarray=True, usemask=False)
-
-
-def _repack_fields_dispatcher(a, align=None, recurse=None):
- return (a,)
-
-
-@array_function_dispatch(_repack_fields_dispatcher)
-def repack_fields(a, align=False, recurse=False):
- """
- Re-pack the fields of a structured array or dtype in memory.
-
- The memory layout of structured datatypes allows fields at arbitrary
- byte offsets. This means the fields can be separated by padding bytes,
- their offsets can be non-monotonically increasing, and they can overlap.
-
- This method removes any overlaps and reorders the fields in memory so they
- have increasing byte offsets, and adds or removes padding bytes depending
- on the `align` option, which behaves like the `align` option to
- `numpy.dtype`.
-
- If `align=False`, this method produces a "packed" memory layout in which
- each field starts at the byte the previous field ended, and any padding
- bytes are removed.
-
- If `align=True`, this methods produces an "aligned" memory layout in which
- each field's offset is a multiple of its alignment, and the total itemsize
- is a multiple of the largest alignment, by adding padding bytes as needed.
-
- Parameters
- ----------
- a : ndarray or dtype
- array or dtype for which to repack the fields.
- align : boolean
- If true, use an "aligned" memory layout, otherwise use a "packed" layout.
- recurse : boolean
- If True, also repack nested structures.
-
- Returns
- -------
- repacked : ndarray or dtype
- Copy of `a` with fields repacked, or `a` itself if no repacking was
- needed.
-
- Examples
- --------
-
- >>> from numpy.lib import recfunctions as rfn
- >>> def print_offsets(d):
- ... print("offsets:", [d.fields[name][1] for name in d.names])
- ... print("itemsize:", d.itemsize)
- ...
- >>> dt = np.dtype('u1, >> dt
- dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt)
- offsets: [0, 8, 16]
- itemsize: 24
- >>> packed_dt = rfn.repack_fields(dt)
- >>> packed_dt
- dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt)
- offsets: [0, 1, 9]
- itemsize: 17
-
- """
- if not isinstance(a, np.dtype):
- dt = repack_fields(a.dtype, align=align, recurse=recurse)
- return a.astype(dt, copy=False)
-
- if a.names is None:
- return a
-
- fieldinfo = []
- for name in a.names:
- tup = a.fields[name]
- if recurse:
- fmt = repack_fields(tup[0], align=align, recurse=True)
- else:
- fmt = tup[0]
-
- if len(tup) == 3:
- name = (tup[2], name)
-
- fieldinfo.append((name, fmt))
-
- dt = np.dtype(fieldinfo, align=align)
- return np.dtype((a.type, dt))
-
-def _get_fields_and_offsets(dt, offset=0):
- """
- Returns a flat list of (dtype, count, offset) tuples of all the
- scalar fields in the dtype "dt", including nested fields, in left
- to right order.
- """
-
- # counts up elements in subarrays, including nested subarrays, and returns
- # base dtype and count
- def count_elem(dt):
- count = 1
- while dt.shape != ():
- for size in dt.shape:
- count *= size
- dt = dt.base
- return dt, count
-
- fields = []
- for name in dt.names:
- field = dt.fields[name]
- f_dt, f_offset = field[0], field[1]
- f_dt, n = count_elem(f_dt)
-
- if f_dt.names is None:
- fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
- else:
- subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
- size = f_dt.itemsize
-
- for i in range(n):
- if i == 0:
- # optimization: avoid list comprehension if no subarray
- fields.extend(subfields)
- else:
- fields.extend([(d, c, o + i*size) for d, c, o in subfields])
- return fields
-
-def _common_stride(offsets, counts, itemsize):
- """
- Returns the stride between the fields, or None if the stride is not
- constant. The values in "counts" designate the lengths of
- subarrays. Subarrays are treated as many contiguous fields, with
- always positive stride.
- """
- if len(offsets) <= 1:
- return itemsize
-
- negative = offsets[1] < offsets[0] # negative stride
- if negative:
- # reverse, so offsets will be ascending
- it = zip(reversed(offsets), reversed(counts))
- else:
- it = zip(offsets, counts)
-
- prev_offset = None
- stride = None
- for offset, count in it:
- if count != 1: # subarray: always c-contiguous
- if negative:
- return None # subarrays can never have a negative stride
- if stride is None:
- stride = itemsize
- if stride != itemsize:
- return None
- end_offset = offset + (count - 1) * itemsize
- else:
- end_offset = offset
-
- if prev_offset is not None:
- new_stride = offset - prev_offset
- if stride is None:
- stride = new_stride
- if stride != new_stride:
- return None
-
- prev_offset = end_offset
-
- if negative:
- return -stride
- return stride
-
-
-def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
- casting=None):
- return (arr,)
-
-@array_function_dispatch(_structured_to_unstructured_dispatcher)
-def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
- """
- Converts an n-D structured array into an (n+1)-D unstructured array.
-
- The new array will have a new last dimension equal in size to the
- number of field-elements of the input array. If not supplied, the output
- datatype is determined from the numpy type promotion rules applied to all
- the field datatypes.
-
- Nested fields, as well as each element of any subarray fields, all count
- as a single field-elements.
-
- Parameters
- ----------
- arr : ndarray
- Structured array or dtype to convert. Cannot contain object datatype.
- dtype : dtype, optional
- The dtype of the output unstructured array.
- copy : bool, optional
- If true, always return a copy. If false, a view is returned if
- possible, such as when the `dtype` and strides of the fields are
- suitable and the array subtype is one of `np.ndarray`, `np.recarray`
- or `np.memmap`.
-
- .. versionchanged:: 1.25.0
- A view can now be returned if the fields are separated by a
- uniform stride.
-
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- See casting argument of `numpy.ndarray.astype`. Controls what kind of
- data casting may occur.
-
- Returns
- -------
- unstructured : ndarray
- Unstructured array with one more dimension.
-
- Examples
- --------
-
- >>> from numpy.lib import recfunctions as rfn
- >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
- >>> a
- array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
- (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
- dtype=[('a', '>> rfn.structured_to_unstructured(a)
- array([[0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0.],
- [0., 0., 0., 0., 0.]])
-
- >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
- ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
- >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
- array([ 3. , 5.5, 9. , 11. ])
-
- """
- if arr.dtype.names is None:
- raise ValueError('arr must be a structured array')
-
- fields = _get_fields_and_offsets(arr.dtype)
- n_fields = len(fields)
- if n_fields == 0 and dtype is None:
- raise ValueError("arr has no fields. Unable to guess dtype")
- elif n_fields == 0:
- # too many bugs elsewhere for this to work now
- raise NotImplementedError("arr with no fields is not supported")
-
- dts, counts, offsets = zip(*fields)
- names = ['f{}'.format(n) for n in range(n_fields)]
-
- if dtype is None:
- out_dtype = np.result_type(*[dt.base for dt in dts])
- else:
- out_dtype = np.dtype(dtype)
-
- # Use a series of views and casts to convert to an unstructured array:
-
- # first view using flattened fields (doesn't work for object arrays)
- # Note: dts may include a shape for subarrays
- flattened_fields = np.dtype({'names': names,
- 'formats': dts,
- 'offsets': offsets,
- 'itemsize': arr.dtype.itemsize})
- arr = arr.view(flattened_fields)
-
- # we only allow a few types to be unstructured by manipulating the
- # strides, because we know it won't work with, for example, np.matrix nor
- # np.ma.MaskedArray.
- can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
- if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
- # all elements have the right dtype already; if they have a common
- # stride, we can just return a view
- common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
- if common_stride is not None:
- wrap = arr.__array_wrap__
-
- new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
- new_strides = arr.strides + (abs(common_stride), 1)
-
- arr = arr[..., np.newaxis].view(np.uint8) # view as bytes
- arr = arr[..., min(offsets):] # remove the leading unused data
- arr = np.lib.stride_tricks.as_strided(arr,
- new_shape,
- new_strides,
- subok=True)
-
- # cast and drop the last dimension again
- arr = arr.view(out_dtype)[..., 0]
-
- if common_stride < 0:
- arr = arr[..., ::-1] # reverse, if the stride was negative
- if type(arr) is not type(wrap.__self__):
- # Some types (e.g. recarray) turn into an ndarray along the
- # way, so we have to wrap it again in order to match the
- # behavior with copy=True.
- arr = wrap(arr)
- return arr
-
- # next cast to a packed format with all fields converted to new dtype
- packed_fields = np.dtype({'names': names,
- 'formats': [(out_dtype, dt.shape) for dt in dts]})
- arr = arr.astype(packed_fields, copy=copy, casting=casting)
-
- # finally is it safe to view the packed fields as the unstructured type
- return arr.view((out_dtype, (sum(counts),)))
-
-
-def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
- align=None, copy=None, casting=None):
- return (arr,)
-
-@array_function_dispatch(_unstructured_to_structured_dispatcher)
-def unstructured_to_structured(arr, dtype=None, names=None, align=False,
- copy=False, casting='unsafe'):
- """
- Converts an n-D unstructured array into an (n-1)-D structured array.
-
- The last dimension of the input array is converted into a structure, with
- number of field-elements equal to the size of the last dimension of the
- input array. By default all output fields have the input array's dtype, but
- an output structured dtype with an equal number of fields-elements can be
- supplied instead.
-
- Nested fields, as well as each element of any subarray fields, all count
- towards the number of field-elements.
-
- Parameters
- ----------
- arr : ndarray
- Unstructured array or dtype to convert.
- dtype : dtype, optional
- The structured dtype of the output array
- names : list of strings, optional
- If dtype is not supplied, this specifies the field names for the output
- dtype, in order. The field dtypes will be the same as the input array.
- align : boolean, optional
- Whether to create an aligned memory layout.
- copy : bool, optional
- See copy argument to `numpy.ndarray.astype`. If true, always return a
- copy. If false, and `dtype` requirements are satisfied, a view is
- returned.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- See casting argument of `numpy.ndarray.astype`. Controls what kind of
- data casting may occur.
-
- Returns
- -------
- structured : ndarray
- Structured array with fewer dimensions.
-
- Examples
- --------
-
- >>> from numpy.lib import recfunctions as rfn
- >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
- >>> a = np.arange(20).reshape((4,5))
- >>> a
- array([[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [10, 11, 12, 13, 14],
- [15, 16, 17, 18, 19]])
- >>> rfn.unstructured_to_structured(a, dt)
- array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
- (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
- dtype=[('a', '>> from numpy.lib import recfunctions as rfn
- >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
- ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
- >>> rfn.apply_along_fields(np.mean, b)
- array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
- >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
- array([ 3. , 5.5, 9. , 11. ])
-
- """
- if arr.dtype.names is None:
- raise ValueError('arr must be a structured array')
-
- uarr = structured_to_unstructured(arr)
- return func(uarr, axis=-1)
- # works and avoids axis requirement, but very, very slow:
- #return np.apply_along_axis(func, -1, uarr)
-
-def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
- return dst, src
-
-@array_function_dispatch(_assign_fields_by_name_dispatcher)
-def assign_fields_by_name(dst, src, zero_unassigned=True):
- """
- Assigns values from one structured array to another by field name.
-
- Normally in numpy >= 1.14, assignment of one structured array to another
- copies fields "by position", meaning that the first field from the src is
- copied to the first field of the dst, and so on, regardless of field name.
-
- This function instead copies "by field name", such that fields in the dst
- are assigned from the identically named field in the src. This applies
- recursively for nested structures. This is how structure assignment worked
- in numpy >= 1.6 to <= 1.13.
-
- Parameters
- ----------
- dst : ndarray
- src : ndarray
- The source and destination arrays during assignment.
- zero_unassigned : bool, optional
- If True, fields in the dst for which there was no matching
- field in the src are filled with the value 0 (zero). This
- was the behavior of numpy <= 1.13. If False, those fields
- are not modified.
- """
-
- if dst.dtype.names is None:
- dst[...] = src
- return
-
- for name in dst.dtype.names:
- if name not in src.dtype.names:
- if zero_unassigned:
- dst[name] = 0
- else:
- assign_fields_by_name(dst[name], src[name],
- zero_unassigned)
-
-def _require_fields_dispatcher(array, required_dtype):
- return (array,)
-
-@array_function_dispatch(_require_fields_dispatcher)
-def require_fields(array, required_dtype):
- """
- Casts a structured array to a new dtype using assignment by field-name.
-
- This function assigns from the old to the new array by name, so the
- value of a field in the output array is the value of the field with the
- same name in the source array. This has the effect of creating a new
- ndarray containing only the fields "required" by the required_dtype.
-
- If a field name in the required_dtype does not exist in the
- input array, that field is created and set to 0 in the output array.
-
- Parameters
- ----------
- a : ndarray
- array to cast
- required_dtype : dtype
- datatype for output array
-
- Returns
- -------
- out : ndarray
- array with the new dtype, with field values copied from the fields in
- the input array with the same name
-
- Examples
- --------
-
- >>> from numpy.lib import recfunctions as rfn
- >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
- >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
- array([(1., 1), (1., 1), (1., 1), (1., 1)],
- dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
- array([(1., 0), (1., 0), (1., 0), (1., 0)],
- dtype=[('b', '>> from numpy.lib import recfunctions as rfn
- >>> x = np.array([1, 2,])
- >>> rfn.stack_arrays(x) is x
- True
- >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
- >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
- ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
- >>> test = rfn.stack_arrays((z,zz))
- >>> test
- masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
- (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
- mask=[(False, False, True), (False, False, True),
- (False, False, False), (False, False, False),
- (False, False, False)],
- fill_value=(b'N/A', 1.e+20, 1.e+20),
- dtype=[('A', 'S3'), ('B', ' '%s'" %
- (cdtype, fdtype))
- # Only one field: use concatenate
- if len(newdescr) == 1:
- output = ma.concatenate(seqarrays)
- else:
- #
- output = ma.masked_all((np.sum(nrecords),), newdescr)
- offset = np.cumsum(np.r_[0, nrecords])
- seen = []
- for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
- names = a.dtype.names
- if names is None:
- output['f%i' % len(seen)][i:j] = a
- else:
- for name in n:
- output[name][i:j] = a[name]
- if name not in seen:
- seen.append(name)
- #
- return _fix_output(_fix_defaults(output, defaults),
- usemask=usemask, asrecarray=asrecarray)
-
-
-def _find_duplicates_dispatcher(
- a, key=None, ignoremask=None, return_index=None):
- return (a,)
-
-
-@array_function_dispatch(_find_duplicates_dispatcher)
-def find_duplicates(a, key=None, ignoremask=True, return_index=False):
- """
- Find the duplicates in a structured array along a given key
-
- Parameters
- ----------
- a : array-like
- Input array
- key : {string, None}, optional
- Name of the fields along which to check the duplicates.
- If None, the search is performed by records
- ignoremask : {True, False}, optional
- Whether masked data should be discarded or considered as duplicates.
- return_index : {False, True}, optional
- Whether to return the indices of the duplicated values.
-
- Examples
- --------
- >>> from numpy.lib import recfunctions as rfn
- >>> ndtype = [('a', int)]
- >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
- ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
- >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
- (masked_array(data=[(1,), (1,), (2,), (2,)],
- mask=[(False,), (False,), (False,), (False,)],
- fill_value=(999999,),
- dtype=[('a', '= nb1)] - nb1
- (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
- if jointype == 'inner':
- (r1spc, r2spc) = (0, 0)
- elif jointype == 'outer':
- idx_out = idx_sort[~flag_in]
- idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
- idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
- (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
- elif jointype == 'leftouter':
- idx_out = idx_sort[~flag_in]
- idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
- (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
- # Select the entries from each input
- (s1, s2) = (r1[idx_1], r2[idx_2])
- #
- # Build the new description of the output array .......
- # Start with the key fields
- ndtype = _get_fieldspec(r1k.dtype)
-
- # Add the fields from r1
- for fname, fdtype in _get_fieldspec(r1.dtype):
- if fname not in key:
- ndtype.append((fname, fdtype))
-
- # Add the fields from r2
- for fname, fdtype in _get_fieldspec(r2.dtype):
- # Have we seen the current name already ?
- # we need to rebuild this list every time
- names = list(name for name, dtype in ndtype)
- try:
- nameidx = names.index(fname)
- except ValueError:
- #... we haven't: just add the description to the current list
- ndtype.append((fname, fdtype))
- else:
- # collision
- _, cdtype = ndtype[nameidx]
- if fname in key:
- # The current field is part of the key: take the largest dtype
- ndtype[nameidx] = (fname, max(fdtype, cdtype))
- else:
- # The current field is not part of the key: add the suffixes,
- # and place the new field adjacent to the old one
- ndtype[nameidx:nameidx + 1] = [
- (fname + r1postfix, cdtype),
- (fname + r2postfix, fdtype)
- ]
- # Rebuild a dtype from the new fields
- ndtype = np.dtype(ndtype)
- # Find the largest nb of common fields :
- # r1cmn and r2cmn should be equal, but...
- cmn = max(r1cmn, r2cmn)
- # Construct an empty array
- output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
- names = output.dtype.names
- for f in r1names:
- selected = s1[f]
- if f not in names or (f in r2names and not r2postfix and f not in key):
- f += r1postfix
- current = output[f]
- current[:r1cmn] = selected[:r1cmn]
- if jointype in ('outer', 'leftouter'):
- current[cmn:cmn + r1spc] = selected[r1cmn:]
- for f in r2names:
- selected = s2[f]
- if f not in names or (f in r1names and not r1postfix and f not in key):
- f += r2postfix
- current = output[f]
- current[:r2cmn] = selected[:r2cmn]
- if (jointype == 'outer') and r2spc:
- current[-r2spc:] = selected[r2cmn:]
- # Sort and finalize the output
- output.sort(order=key)
- kwargs = dict(usemask=usemask, asrecarray=asrecarray)
- return _fix_output(_fix_defaults(output, defaults), **kwargs)
-
-
-def _rec_join_dispatcher(
- key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
- defaults=None):
- return (r1, r2)
-
-
-@array_function_dispatch(_rec_join_dispatcher)
-def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
- defaults=None):
- """
- Join arrays `r1` and `r2` on keys.
- Alternative to join_by, that always returns a np.recarray.
-
- See Also
- --------
- join_by : equivalent function
- """
- kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
- defaults=defaults, usemask=False, asrecarray=True)
- return join_by(key, r1, r2, **kwargs)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.py
deleted file mode 100644
index 2cabbe3ff07da11ae2886d3f8aadc80370692ba0..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-__all__ = [
- "dtypes",
- "localize_pydatetime",
- "NaT",
- "NaTType",
- "iNaT",
- "nat_strings",
- "OutOfBoundsDatetime",
- "OutOfBoundsTimedelta",
- "IncompatibleFrequency",
- "Period",
- "Resolution",
- "Timedelta",
- "normalize_i8_timestamps",
- "is_date_array_normalized",
- "dt64arr_to_periodarr",
- "delta_to_nanoseconds",
- "ints_to_pydatetime",
- "ints_to_pytimedelta",
- "get_resolution",
- "Timestamp",
- "tz_convert_from_utc_single",
- "tz_convert_from_utc",
- "to_offset",
- "Tick",
- "BaseOffset",
- "tz_compare",
- "is_unitless",
- "astype_overflowsafe",
- "get_unit_from_dtype",
- "periods_per_day",
- "periods_per_second",
- "is_supported_unit",
- "npy_unit_to_abbrev",
- "get_supported_reso",
-]
-
-from pandas._libs.tslibs import dtypes # pylint: disable=import-self
-from pandas._libs.tslibs.conversion import localize_pydatetime
-from pandas._libs.tslibs.dtypes import (
- Resolution,
- get_supported_reso,
- is_supported_unit,
- npy_unit_to_abbrev,
- periods_per_day,
- periods_per_second,
-)
-from pandas._libs.tslibs.nattype import (
- NaT,
- NaTType,
- iNaT,
- nat_strings,
-)
-from pandas._libs.tslibs.np_datetime import (
- OutOfBoundsDatetime,
- OutOfBoundsTimedelta,
- astype_overflowsafe,
- is_unitless,
- py_get_unit_from_dtype as get_unit_from_dtype,
-)
-from pandas._libs.tslibs.offsets import (
- BaseOffset,
- Tick,
- to_offset,
-)
-from pandas._libs.tslibs.period import (
- IncompatibleFrequency,
- Period,
-)
-from pandas._libs.tslibs.timedeltas import (
- Timedelta,
- delta_to_nanoseconds,
- ints_to_pytimedelta,
-)
-from pandas._libs.tslibs.timestamps import Timestamp
-from pandas._libs.tslibs.timezones import tz_compare
-from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single
-from pandas._libs.tslibs.vectorized import (
- dt64arr_to_periodarr,
- get_resolution,
- ints_to_pydatetime,
- is_date_array_normalized,
- normalize_i8_timestamps,
- tz_convert_from_utc,
-)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/test_numba.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/test_numba.py
deleted file mode 100644
index f5ef6a00e0b329eb8d31dfed73e4a3a132dd52bb..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/test_numba.py
+++ /dev/null
@@ -1,461 +0,0 @@
-import numpy as np
-import pytest
-
-from pandas.compat import (
- is_ci_environment,
- is_platform_mac,
- is_platform_windows,
-)
-from pandas.errors import NumbaUtilError
-import pandas.util._test_decorators as td
-
-from pandas import (
- DataFrame,
- Series,
- option_context,
- to_datetime,
-)
-import pandas._testing as tm
-
-pytestmark = [
- pytest.mark.single_cpu,
- pytest.mark.skipif(
- is_ci_environment() and (is_platform_windows() or is_platform_mac()),
- reason="On GHA CI, Windows can fail with "
- "'Windows fatal exception: stack overflow' "
- "and macOS can timeout",
- ),
-]
-
-
-@pytest.fixture(params=["single", "table"])
-def method(request):
- """method keyword in rolling/expanding/ewm constructor"""
- return request.param
-
-
-@pytest.fixture(
- params=[
- ["sum", {}],
- ["mean", {}],
- ["median", {}],
- ["max", {}],
- ["min", {}],
- ["var", {}],
- ["var", {"ddof": 0}],
- ["std", {}],
- ["std", {"ddof": 0}],
- ]
-)
-def arithmetic_numba_supported_operators(request):
- return request.param
-
-
-@td.skip_if_no("numba")
-@pytest.mark.filterwarnings("ignore")
-# Filter warnings when parallel=True and the function can't be parallelized by Numba
-class TestEngine:
- @pytest.mark.parametrize("jit", [True, False])
- def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center, step):
- def f(x, *args):
- arg_sum = 0
- for arg in args:
- arg_sum += arg
- return np.mean(x) + arg_sum
-
- if jit:
- import numba
-
- f = numba.jit(f)
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
- args = (2,)
-
- s = Series(range(10))
- result = s.rolling(2, center=center, step=step).apply(
- f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True
- )
- expected = s.rolling(2, center=center, step=step).apply(
- f, engine="cython", args=args, raw=True
- )
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize(
- "data",
- [
- DataFrame(np.eye(5)),
- DataFrame(
- [
- [5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3],
- [5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3],
- [np.nan, np.nan, 5, 6, 7, 5, 5, 5, 5, 5],
- ]
- ).T,
- Series(range(5), name="foo"),
- Series([20, 10, 10, np.inf, 1, 1, 2, 3]),
- Series([20, 10, 10, np.nan, 10, 1, 2, 3]),
- ],
- )
- def test_numba_vs_cython_rolling_methods(
- self,
- data,
- nogil,
- parallel,
- nopython,
- arithmetic_numba_supported_operators,
- step,
- ):
- method, kwargs = arithmetic_numba_supported_operators
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- roll = data.rolling(3, step=step)
- result = getattr(roll, method)(
- engine="numba", engine_kwargs=engine_kwargs, **kwargs
- )
- expected = getattr(roll, method)(engine="cython", **kwargs)
- tm.assert_equal(result, expected)
-
- @pytest.mark.parametrize(
- "data", [DataFrame(np.eye(5)), Series(range(5), name="foo")]
- )
- def test_numba_vs_cython_expanding_methods(
- self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators
- ):
- method, kwargs = arithmetic_numba_supported_operators
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- data = DataFrame(np.eye(5))
- expand = data.expanding()
- result = getattr(expand, method)(
- engine="numba", engine_kwargs=engine_kwargs, **kwargs
- )
- expected = getattr(expand, method)(engine="cython", **kwargs)
- tm.assert_equal(result, expected)
-
- @pytest.mark.parametrize("jit", [True, False])
- def test_cache_apply(self, jit, nogil, parallel, nopython, step):
- # Test that the functions are cached correctly if we switch functions
- def func_1(x):
- return np.mean(x) + 4
-
- def func_2(x):
- return np.std(x) * 5
-
- if jit:
- import numba
-
- func_1 = numba.jit(func_1)
- func_2 = numba.jit(func_2)
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- roll = Series(range(10)).rolling(2, step=step)
- result = roll.apply(
- func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
- )
- expected = roll.apply(func_1, engine="cython", raw=True)
- tm.assert_series_equal(result, expected)
-
- result = roll.apply(
- func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
- )
- expected = roll.apply(func_2, engine="cython", raw=True)
- tm.assert_series_equal(result, expected)
- # This run should use the cached func_1
- result = roll.apply(
- func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
- )
- expected = roll.apply(func_1, engine="cython", raw=True)
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize(
- "window,window_kwargs",
- [
- ["rolling", {"window": 3, "min_periods": 0}],
- ["expanding", {}],
- ],
- )
- def test_dont_cache_args(
- self, window, window_kwargs, nogil, parallel, nopython, method
- ):
- # GH 42287
-
- def add(values, x):
- return np.sum(values) + x
-
- engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
- df = DataFrame({"value": [0, 0, 0]})
- result = getattr(df, window)(method=method, **window_kwargs).apply(
- add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(1,)
- )
- expected = DataFrame({"value": [1.0, 1.0, 1.0]})
- tm.assert_frame_equal(result, expected)
-
- result = getattr(df, window)(method=method, **window_kwargs).apply(
- add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(2,)
- )
- expected = DataFrame({"value": [2.0, 2.0, 2.0]})
- tm.assert_frame_equal(result, expected)
-
- def test_dont_cache_engine_kwargs(self):
- # If the user passes a different set of engine_kwargs don't return the same
- # jitted function
- nogil = False
- parallel = True
- nopython = True
-
- def func(x):
- return nogil + parallel + nopython
-
- engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
- df = DataFrame({"value": [0, 0, 0]})
- result = df.rolling(1).apply(
- func, raw=True, engine="numba", engine_kwargs=engine_kwargs
- )
- expected = DataFrame({"value": [2.0, 2.0, 2.0]})
- tm.assert_frame_equal(result, expected)
-
- parallel = False
- engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}
- result = df.rolling(1).apply(
- func, raw=True, engine="numba", engine_kwargs=engine_kwargs
- )
- expected = DataFrame({"value": [1.0, 1.0, 1.0]})
- tm.assert_frame_equal(result, expected)
-
-
-@td.skip_if_no("numba")
-class TestEWM:
- @pytest.mark.parametrize(
- "grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]
- )
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_invalid_engine(self, grouper, method):
- df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
- with pytest.raises(ValueError, match="engine must be either"):
- getattr(grouper(df).ewm(com=1.0), method)(engine="foo")
-
- @pytest.mark.parametrize(
- "grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]
- )
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_invalid_engine_kwargs(self, grouper, method):
- df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
- with pytest.raises(ValueError, match="cython engine does not"):
- getattr(grouper(df).ewm(com=1.0), method)(
- engine="cython", engine_kwargs={"nopython": True}
- )
-
- @pytest.mark.parametrize("grouper", ["None", "groupby"])
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_cython_vs_numba(
- self, grouper, method, nogil, parallel, nopython, ignore_na, adjust
- ):
- df = DataFrame({"B": range(4)})
- if grouper == "None":
- grouper = lambda x: x
- else:
- df["A"] = ["a", "b", "a", "b"]
- grouper = lambda x: x.groupby("A")
- if method == "sum":
- adjust = True
- ewm = grouper(df).ewm(com=1.0, adjust=adjust, ignore_na=ignore_na)
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
- result = getattr(ewm, method)(engine="numba", engine_kwargs=engine_kwargs)
- expected = getattr(ewm, method)(engine="cython")
-
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("grouper", ["None", "groupby"])
- def test_cython_vs_numba_times(self, grouper, nogil, parallel, nopython, ignore_na):
- # GH 40951
-
- df = DataFrame({"B": [0, 0, 1, 1, 2, 2]})
- if grouper == "None":
- grouper = lambda x: x
- else:
- grouper = lambda x: x.groupby("A")
- df["A"] = ["a", "b", "a", "b", "b", "a"]
-
- halflife = "23 days"
- times = to_datetime(
- [
- "2020-01-01",
- "2020-01-01",
- "2020-01-02",
- "2020-01-10",
- "2020-02-23",
- "2020-01-03",
- ]
- )
- ewm = grouper(df).ewm(
- halflife=halflife, adjust=True, ignore_na=ignore_na, times=times
- )
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- result = ewm.mean(engine="numba", engine_kwargs=engine_kwargs)
- expected = ewm.mean(engine="cython")
-
- tm.assert_frame_equal(result, expected)
-
-
-@td.skip_if_no("numba")
-def test_use_global_config():
- def f(x):
- return np.mean(x) + 2
-
- s = Series(range(10))
- with option_context("compute.use_numba", True):
- result = s.rolling(2).apply(f, engine=None, raw=True)
- expected = s.rolling(2).apply(f, engine="numba", raw=True)
- tm.assert_series_equal(expected, result)
-
-
-@td.skip_if_no("numba")
-def test_invalid_kwargs_nopython():
- with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"):
- Series(range(1)).rolling(1).apply(
- lambda x: x, kwargs={"a": 1}, engine="numba", raw=True
- )
-
-
-@td.skip_if_no("numba")
-@pytest.mark.slow
-@pytest.mark.filterwarnings("ignore")
-# Filter warnings when parallel=True and the function can't be parallelized by Numba
-class TestTableMethod:
- def test_table_series_valueerror(self):
- def f(x):
- return np.sum(x, axis=0) + 1
-
- with pytest.raises(
- ValueError, match="method='table' not applicable for Series objects."
- ):
- Series(range(1)).rolling(1, method="table").apply(
- f, engine="numba", raw=True
- )
-
- def test_table_method_rolling_methods(
- self,
- axis,
- nogil,
- parallel,
- nopython,
- arithmetic_numba_supported_operators,
- step,
- ):
- method, kwargs = arithmetic_numba_supported_operators
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- df = DataFrame(np.eye(3))
- roll_table = df.rolling(2, method="table", axis=axis, min_periods=0, step=step)
- if method in ("var", "std"):
- with pytest.raises(NotImplementedError, match=f"{method} not supported"):
- getattr(roll_table, method)(
- engine_kwargs=engine_kwargs, engine="numba", **kwargs
- )
- else:
- roll_single = df.rolling(
- 2, method="single", axis=axis, min_periods=0, step=step
- )
- result = getattr(roll_table, method)(
- engine_kwargs=engine_kwargs, engine="numba", **kwargs
- )
- expected = getattr(roll_single, method)(
- engine_kwargs=engine_kwargs, engine="numba", **kwargs
- )
- tm.assert_frame_equal(result, expected)
-
- def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython, step):
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- def f(x):
- return np.sum(x, axis=0) + 1
-
- df = DataFrame(np.eye(3))
- result = df.rolling(
- 2, method="table", axis=axis, min_periods=0, step=step
- ).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba")
- expected = df.rolling(
- 2, method="single", axis=axis, min_periods=0, step=step
- ).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba")
- tm.assert_frame_equal(result, expected)
-
- def test_table_method_rolling_weighted_mean(self, step):
- def weighted_mean(x):
- arr = np.ones((1, x.shape[1]))
- arr[:, :2] = (x[:, :2] * x[:, 2]).sum(axis=0) / x[:, 2].sum()
- return arr
-
- df = DataFrame([[1, 2, 0.6], [2, 3, 0.4], [3, 4, 0.2], [4, 5, 0.7]])
- result = df.rolling(2, method="table", min_periods=0, step=step).apply(
- weighted_mean, raw=True, engine="numba"
- )
- expected = DataFrame(
- [
- [1.0, 2.0, 1.0],
- [1.8, 2.0, 1.0],
- [3.333333, 2.333333, 1.0],
- [1.555556, 7, 1.0],
- ]
- )[::step]
- tm.assert_frame_equal(result, expected)
-
- def test_table_method_expanding_apply(self, axis, nogil, parallel, nopython):
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- def f(x):
- return np.sum(x, axis=0) + 1
-
- df = DataFrame(np.eye(3))
- result = df.expanding(method="table", axis=axis).apply(
- f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
- )
- expected = df.expanding(method="single", axis=axis).apply(
- f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
- )
- tm.assert_frame_equal(result, expected)
-
- def test_table_method_expanding_methods(
- self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
- ):
- method, kwargs = arithmetic_numba_supported_operators
-
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- df = DataFrame(np.eye(3))
- expand_table = df.expanding(method="table", axis=axis)
- if method in ("var", "std"):
- with pytest.raises(NotImplementedError, match=f"{method} not supported"):
- getattr(expand_table, method)(
- engine_kwargs=engine_kwargs, engine="numba", **kwargs
- )
- else:
- expand_single = df.expanding(method="single", axis=axis)
- result = getattr(expand_table, method)(
- engine_kwargs=engine_kwargs, engine="numba", **kwargs
- )
- expected = getattr(expand_single, method)(
- engine_kwargs=engine_kwargs, engine="numba", **kwargs
- )
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("data", [np.eye(3), np.ones((2, 3)), np.ones((3, 2))])
- @pytest.mark.parametrize("method", ["mean", "sum"])
- def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython):
- engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
-
- df = DataFrame(data)
-
- result = getattr(df.ewm(com=1, method="table", axis=axis), method)(
- engine_kwargs=engine_kwargs, engine="numba"
- )
- expected = getattr(df.ewm(com=1, method="single", axis=axis), method)(
- engine_kwargs=engine_kwargs, engine="numba"
- )
- tm.assert_frame_equal(result, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/config.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/config.py
deleted file mode 100644
index 7409847b087ef77f43da736cedffda926cf58d6d..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/config.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import annotations as _annotations
-
-import warnings
-from typing import TYPE_CHECKING, Any
-
-from typing_extensions import Literal, deprecated
-
-from .._internal import _config
-from ..warnings import PydanticDeprecatedSince20
-
-if not TYPE_CHECKING:
- # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915
- # and https://youtrack.jetbrains.com/issue/PY-51428
- DeprecationWarning = PydanticDeprecatedSince20
-
-__all__ = 'BaseConfig', 'Extra'
-
-
-class _ConfigMetaclass(type):
- def __getattr__(self, item: str) -> Any:
- warnings.warn(_config.DEPRECATION_MESSAGE, DeprecationWarning)
-
- try:
- return _config.config_defaults[item]
- except KeyError as exc:
- raise AttributeError(f"type object '{self.__name__}' has no attribute {exc}") from exc
-
-
-@deprecated('BaseConfig is deprecated. Use the `pydantic.ConfigDict` instead.', category=PydanticDeprecatedSince20)
-class BaseConfig(metaclass=_ConfigMetaclass):
- """This class is only retained for backwards compatibility.
-
- !!! Warning "Deprecated"
- BaseConfig is deprecated. Use the [`pydantic.ConfigDict`][pydantic.ConfigDict] instead.
- """
-
- def __getattr__(self, item: str) -> Any:
- warnings.warn(_config.DEPRECATION_MESSAGE, DeprecationWarning)
- try:
- return super().__getattribute__(item)
- except AttributeError as exc:
- try:
- return getattr(type(self), item)
- except AttributeError:
- # re-raising changes the displayed text to reflect that `self` is not a type
- raise AttributeError(str(exc)) from exc
-
- def __init_subclass__(cls, **kwargs: Any) -> None:
- warnings.warn(_config.DEPRECATION_MESSAGE, DeprecationWarning)
- return super().__init_subclass__(**kwargs)
-
-
-class _ExtraMeta(type):
- def __getattribute__(self, __name: str) -> Any:
- # The @deprecated decorator accesses other attributes, so we only emit a warning for the expected ones
- if __name in {'allow', 'ignore', 'forbid'}:
- warnings.warn(
- "`pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`)",
- DeprecationWarning,
- stacklevel=2,
- )
- return super().__getattribute__(__name)
-
-
-@deprecated(
- "Extra is deprecated. Use literal values instead (e.g. `extra='allow'`)", category=PydanticDeprecatedSince20
-)
-class Extra(metaclass=_ExtraMeta):
- allow: Literal['allow'] = 'allow'
- ignore: Literal['ignore'] = 'ignore'
- forbid: Literal['forbid'] = 'forbid'
diff --git a/spaces/pycui/RealChar/client/web/src/utils/audioUtils.js b/spaces/pycui/RealChar/client/web/src/utils/audioUtils.js
deleted file mode 100644
index 0d03bc1c95748e5f65367b8e6777a8ece50669b8..0000000000000000000000000000000000000000
--- a/spaces/pycui/RealChar/client/web/src/utils/audioUtils.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * src/utils/audioUtils.js
- * Audio playback.
- *
- * created by Lynchee on 7/16/23
- */
-
-const unlockAudioContext = (audioContext) => {
- if (audioContext.state === 'suspended') {
- const unlock = function() {
- audioContext.resume().then(function() {
- document.body.removeEventListener('touchstart', unlock);
- document.body.removeEventListener('touchend', unlock);
- });
- };
- document.body.addEventListener('touchstart', unlock, false);
- document.body.addEventListener('touchend', unlock, false);
- }
-}
-
-// play a single audio chunk
-const playAudio = (audioContextRef, audioPlayer, url) => {
- if (!audioContextRef.current) {
- audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)();
- unlockAudioContext(audioContextRef.current);
- }
-
- return new Promise((resolve) => {
- audioPlayer.current.src = url;
- audioPlayer.current.muted = true; // Start muted
- audioPlayer.current.onended = resolve;
- audioPlayer.current.play().then(() => {
- audioPlayer.current.muted = false; // Unmute after playback starts
- }).catch(error => {
- if (error.name === 'NotSupportedError') {
- alert(`Playback failed because: ${error}. Please check https://elevenlabs.io/subscription if you have encough characters left.`);
- } else {
- alert(`Playback failed because: ${error}`);
- }
- });
- });
-}
-
-// play all audio chunks
-export const playAudios = async (audioContextRef, audioPlayer, audioQueue, setIsPlaying) => {
- while (audioQueue.current.length > 0) {
- let data = audioQueue.current[0];
- let blob = new Blob([data], { type: 'audio/mp3' });
- let audioUrl = URL.createObjectURL(blob);
- await playAudio(audioContextRef, audioPlayer, audioUrl);
- audioQueue.current.shift();
- }
-
- // done playing audios
- setIsPlaying(false);
-}
diff --git a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/prt_util.py b/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/prt_util.py
deleted file mode 100644
index 7eba32fa0b396f420b2e332abbb67135dbc14d6b..0000000000000000000000000000000000000000
--- a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/prt_util.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import os
-import trimesh
-import numpy as np
-import math
-from scipy.special import sph_harm
-import argparse
-from tqdm import tqdm
-
-def factratio(N, D):
- if N >= D:
- prod = 1.0
- for i in range(D+1, N+1):
- prod *= i
- return prod
- else:
- prod = 1.0
- for i in range(N+1, D+1):
- prod *= i
- return 1.0 / prod
-
-def KVal(M, L):
- return math.sqrt(((2 * L + 1) / (4 * math.pi)) * (factratio(L - M, L + M)))
-
-def AssociatedLegendre(M, L, x):
- if M < 0 or M > L or np.max(np.abs(x)) > 1.0:
- return np.zeros_like(x)
-
- pmm = np.ones_like(x)
- if M > 0:
- somx2 = np.sqrt((1.0 + x) * (1.0 - x))
- fact = 1.0
- for i in range(1, M+1):
- pmm = -pmm * fact * somx2
- fact = fact + 2
-
- if L == M:
- return pmm
- else:
- pmmp1 = x * (2 * M + 1) * pmm
- if L == M+1:
- return pmmp1
- else:
- pll = np.zeros_like(x)
- for i in range(M+2, L+1):
- pll = (x * (2 * i - 1) * pmmp1 - (i + M - 1) * pmm) / (i - M)
- pmm = pmmp1
- pmmp1 = pll
- return pll
-
-def SphericalHarmonic(M, L, theta, phi):
- if M > 0:
- return math.sqrt(2.0) * KVal(M, L) * np.cos(M * phi) * AssociatedLegendre(M, L, np.cos(theta))
- elif M < 0:
- return math.sqrt(2.0) * KVal(-M, L) * np.sin(-M * phi) * AssociatedLegendre(-M, L, np.cos(theta))
- else:
- return KVal(0, L) * AssociatedLegendre(0, L, np.cos(theta))
-
-def save_obj(mesh_path, verts):
- file = open(mesh_path, 'w')
- for v in verts:
- file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
- file.close()
-
-def sampleSphericalDirections(n):
- xv = np.random.rand(n,n)
- yv = np.random.rand(n,n)
- theta = np.arccos(1-2 * xv)
- phi = 2.0 * math.pi * yv
-
- phi = phi.reshape(-1)
- theta = theta.reshape(-1)
-
- vx = -np.sin(theta) * np.cos(phi)
- vy = -np.sin(theta) * np.sin(phi)
- vz = np.cos(theta)
- return np.stack([vx, vy, vz], 1), phi, theta
-
-def getSHCoeffs(order, phi, theta):
- shs = []
- for n in range(0, order+1):
- for m in range(-n,n+1):
- s = SphericalHarmonic(m, n, theta, phi)
- shs.append(s)
-
- return np.stack(shs, 1)
-
-def computePRT(mesh_path, n, order):
- mesh = trimesh.load(mesh_path, process=False)
- vectors_orig, phi, theta = sampleSphericalDirections(n)
- SH_orig = getSHCoeffs(order, phi, theta)
-
- w = 4.0 * math.pi / (n*n)
-
- origins = mesh.vertices
- normals = mesh.vertex_normals
- n_v = origins.shape[0]
-
- origins = np.repeat(origins[:,None], n, axis=1).reshape(-1,3)
- normals = np.repeat(normals[:,None], n, axis=1).reshape(-1,3)
- PRT_all = None
- for i in tqdm(range(n)):
- SH = np.repeat(SH_orig[None,(i*n):((i+1)*n)], n_v, axis=0).reshape(-1,SH_orig.shape[1])
- vectors = np.repeat(vectors_orig[None,(i*n):((i+1)*n)], n_v, axis=0).reshape(-1,3)
-
- dots = (vectors * normals).sum(1)
- front = (dots > 0.0)
-
- delta = 1e-3*min(mesh.bounding_box.extents)
- hits = mesh.ray.intersects_any(origins + delta * normals, vectors)
- nohits = np.logical_and(front, np.logical_not(hits))
-
- PRT = (nohits.astype(np.float) * dots)[:,None] * SH
-
- if PRT_all is not None:
- PRT_all += (PRT.reshape(-1, n, SH.shape[1]).sum(1))
- else:
- PRT_all = (PRT.reshape(-1, n, SH.shape[1]).sum(1))
-
- PRT = w * PRT_all
-
- # NOTE: trimesh sometimes break the original vertex order, but topology will not change.
- # when loading PRT in other program, use the triangle list from trimesh.
- return PRT, mesh.faces
-
-def testPRT(dir_path, n=40):
- if dir_path[-1] == '/':
- dir_path = dir_path[:-1]
- sub_name = dir_path.split('/')[-1][:-4]
- obj_path = os.path.join(dir_path, sub_name + '_100k.obj')
- os.makedirs(os.path.join(dir_path, 'bounce'), exist_ok=True)
-
- PRT, F = computePRT(obj_path, n, 2)
- np.savetxt(os.path.join(dir_path, 'bounce', 'bounce0.txt'), PRT, fmt='%.8f')
- np.save(os.path.join(dir_path, 'bounce', 'face.npy'), F)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('-i', '--input', type=str, default='/home/shunsuke/Downloads/rp_dennis_posed_004_OBJ')
- parser.add_argument('-n', '--n_sample', type=int, default=40, help='squared root of number of sampling. the higher, the more accurate, but slower')
- args = parser.parse_args()
-
- testPRT(args.input)
diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/models3D/__init__.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/data/models3D/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/demo/analyze/track/retinasort/config.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/demo/analyze/track/retinasort/config.py
deleted file mode 100644
index b0c3ca78e73af40d4e4bd5500059986702c49654..0000000000000000000000000000000000000000
--- a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/demo/analyze/track/retinasort/config.py
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-cfg_retinasort = {
-
- 'retina': {
- 'model_name': 'mobile0.25',
- 'extra_features': ['landmarks'],
- 'postreat': {
- 'resize': 1.,
- 'score_thr': 0.75,
- 'top_k': 5000,
- 'nms_thr': 0.4,
- 'keep_top_k': 50}
- },
-
- 'sort': {
- 'max_age': 1,
- 'min_hits': 3,
- 'iou_threshold': 0.3,
- }
-}
-
-cfg_retinasort_res50 = {
-
- 'retina': {
- 'model_name': 'resnet50',
- 'extra_features': ['landmarks'],
- 'postreat': {
- 'resize': 1.,
- 'score_thr': 0.75,
- 'top_k': 5000,
- 'nms_thr': 0.4,
- 'keep_top_k': 50}
- },
-
- 'sort': {
- 'max_age': 1,
- 'min_hits': 3,
- 'iou_threshold': 0.3,
- }
-}
-
-cfg_retinasort_cav3d = {
-
- 'retina': {
- 'model_name': 'resnet50',
- 'extra_features': ['landmarks'],
- 'postreat': {
- 'resize': 1.,
- 'score_thr': 0.95,
- 'top_k': 5000,
- 'nms_thr': 0.8,
- 'keep_top_k': 50}
- },
-
- 'sort': {
- 'max_age': 90,
- 'min_hits': 3,
- 'iou_threshold': 0.3,
- }
-}
-
-cfg_retinasort_av16 = {
-
- 'retina': {
- 'model_name': 'resnet50',
- 'extra_features': ['landmarks'],
- 'postreat': {
- 'resize': 1.,
- 'score_thr': 0.75,
- 'top_k': 5000,
- 'nms_thr': 0.8,
- 'keep_top_k': 50}
- },
-
- 'sort': {
- 'max_age': 90,
- 'min_hits': 3,
- 'iou_threshold': 0.3,
- }
-}
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Adobe Master Collection Cs6 Serial Number 1325.md b/spaces/raedeXanto/academic-chatgpt-beta/Adobe Master Collection Cs6 Serial Number 1325.md
deleted file mode 100644
index 08137331205da9d3937f80adddc2b83a1c7f2625..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Adobe Master Collection Cs6 Serial Number 1325.md
+++ /dev/null
@@ -1,56 +0,0 @@
-
-How to Find Your Adobe Master Collection CS6 Serial Number 1325
-If you have lost or forgotten your Adobe Master Collection CS6 serial number, you may be wondering how to retrieve it from your old laptop or computer. The serial number is a 24-digit code that starts with 1325 and is required to activate and use the software. Without it, you may not be able to install or reinstall the software on a new device.
-adobe master collection cs6 serial number 1325
DOWNLOAD ✒ ✒ ✒ https://tinourl.com/2uL0FY
-Fortunately, there are some ways to find your Adobe Master Collection CS6 serial number 1325 if you still have access to your old laptop or computer. Here are some methods you can try:
-
-- Use a third-party software to scan your system and recover the serial number. There are some free tools available online that can help you find your Adobe product keys, such as Belarc Advisor for Windows and Mac Product Key Finder for Mac. These tools will scan your system and display a list of software licenses and serial numbers that are installed on your device. You can then copy and paste the serial number for Adobe Master Collection CS6 1325 from the list. However, be careful when downloading and using these tools, as some of them may contain malware or viruses that can harm your device.
-- Check your Adobe account online. If you have registered your Adobe Master Collection CS6 1325 with your Adobe ID, you may be able to find your serial number online by logging into your account at https://www.adobe.com/. Go to the Plans section and click on View Products. You should see a list of products that are associated with your account, along with their serial numbers. If you see Adobe Master Collection CS6 1325 in the list, you can copy and paste the serial number from there.
-- Contact Adobe customer support. If none of the above methods work for you, you may need to contact Adobe customer support and provide proof of purchase for your Adobe Master Collection CS6 1325. You can contact them by phone, chat, or email at https://helpx.adobe.com/contact.html. You may need to provide information such as your name, email address, order number, receipt, or invoice for your purchase. Adobe customer support may be able to verify your purchase and provide you with a new serial number for your software.
-
-Once you have found your Adobe Master Collection CS6 serial number 1325, make sure to write it down somewhere safe and secure, such as a notebook or a password manager. You may also want to register your software with your Adobe ID online, so that you can access it anytime from any device.
-I hope this article has helped you find your Adobe Master Collection CS6 serial number 1325. If you have any questions or feedback, please let me know in the comments below.
Adobe Master Collection CS6 is a suite of software that includes various applications for design, web, video, and audio production. It is a powerful and versatile tool that can help you create stunning and professional projects. However, it can also be overwhelming and confusing for beginners who are not familiar with how to use it.
-That's why I have compiled some of the best resources and tutorials that can help you learn how to use Adobe Master Collection CS6 effectively. Whether you want to edit photos, create logos, design websites, make animations, or record audio, you will find something useful and helpful in this article. Here are some of the topics that I will cover:
-
-- How to install and activate Adobe Master Collection CS6
-- How to use Photoshop CS6 for image editing and manipulation
-- How to use Illustrator CS6 for vector graphics and illustration
-- How to use InDesign CS6 for page layout and print production
-- How to use Dreamweaver CS6 for web design and development
-- How to use Flash Professional CS6 for animation and interactivity
-- How to use Premiere Pro CS6 for video editing and production
-- How to use After Effects CS6 for motion graphics and visual effects
-- How to use Audition CS6 for audio editing and mixing
-- How to use SpeedGrade CS6 for color grading and correction
-- How to use Prelude CS6 for video ingest and logging
-- How to use Encore CS6 for DVD and Blu-ray authoring
-- How to use Bridge CS6 for file management and organization
-- How to use Media Encoder CS6 for media encoding and transcoding
-- How to use Flash Builder 4.6 Premium Edition for mobile application development
-
-Let's get started!
-
-
-How to install and activate Adobe Master Collection CS6
-The first step to use Adobe Master Collection CS6 is to install it on your computer. You can download the installation files from the Adobe website or use the DVDs that came with your purchase. You will need a valid serial number that starts with 1325 to activate the software. Here are the steps to install and activate Adobe Master Collection CS6:
-
-- Disconnect from the internet.
-- Run the installer file for your operating system (Windows or Mac) and follow the instructions on the screen.
-- Select the language and the components that you want to install. You can choose to install all or some of the applications in the suite.
-- Enter your serial number when prompted. Make sure you enter it correctly and do not include any spaces or dashes.
-- Complete the installation process and restart your computer if required.
-- Connect to the internet and launch any of the applications in the suite. You may need to sign in with your Adobe ID or create one if you don't have one.
-- You may also need to update your software with the latest patches and fixes. You can do this by using the Adobe Application Manager or by downloading them manually from the Adobe website.
-
-Congratulations! You have successfully installed and activated Adobe Master Collection CS6. You can now start using it for your creative projects.
-
-How to use Photoshop CS6 for image editing and manipulation
-Photoshop CS6 is one of the most popular and widely used applications in Adobe Master Collection CS6. It is a powerful software that allows you to edit, enhance, manipulate, and create images with ease. Whether you want to retouch photos, create digital paintings, design logos, or make collages, Photoshop CS6 can help you achieve your goals.
-To use Photoshop CS6 effectively, you need to understand its interface, tools, layers, adjustments, filters, and other features. Here are some of the best resources and tutorials that can help you learn how to use Photoshop CS6:
-
-
-- Photoshop User Guide: This is the official documentation from Adobe that covers everything you need to know about Photoshop CS6. You can find detailed explanations, instructions, tips, examples, videos, and more on how to use Photoshop CS6.
-
-- https://tinourl.com/2uL2SS
-
-One of the advantages of ArcSoft PhotoStudio is that it comes in a portable version, which means you can run it from a USB flash drive or any other removable device without installing it on your PC. This way, you can use it on any computer without leaving any traces or affecting the system performance.
-
-In this article, we will show you how to download and use ArcSoft PhotoStudio V6.0.0.157 [Portable] on your PC.
-
-How to Download ArcSoft PhotoStudio V6.0.0.157 [Portable]
-
-There are several websites that offer the portable version of ArcSoft PhotoStudio for free download, such as Malavida[^1^], Share4u[^2^], OpenSea[^3^], and RonenBekerman[^4^]. However, you should always be careful when downloading software from unknown sources, as they may contain viruses or malware that can harm your PC.
-
-Therefore, we recommend that you download ArcSoft PhotoStudio V6.0.0.157 [Portable] from the official website of ArcSoft, which is a trusted and reputable company that has been developing photo and video software for over 20 years.
-
-To download ArcSoft PhotoStudio V6.0.0.157 [Portable] from the official website of ArcSoft, follow these steps:
-
-
-
-- Go to https://www.arcsoft.com/photostudio/ and click on the "Download" button.
-- Select the "Portable" option and click on the "Download Now" button.
-- Save the file "PhotoStudio_6_Portable.zip" to your preferred location on your PC.
-- Extract the file "PhotoStudio_6_Portable.zip" using a program like WinRAR or 7-Zip.
-- You will see a folder called "PhotoStudio_6_Portable" that contains the executable file "PhotoStudio.exe" and other files and folders.
-
-
-Congratulations! You have successfully downloaded ArcSoft PhotoStudio V6.0.0.157 [Portable] on your PC.
-
-How to Use ArcSoft PhotoStudio V6.0.0.157 [Portable]
-
-To use ArcSoft PhotoStudio V6.0.0.157 [Portable] on your PC, follow these steps:
-
-
-- Open the folder "PhotoStudio_6_Portable" and double-click on the executable file "PhotoStudio.exe".
-- You will see a splash screen and then the main interface of ArcSoft PhotoStudio.
-- You can now start editing your photos using the tools and features of ArcSoft PhotoStudio.
-- To open a photo, click on the "File" menu and select "Open" or press Ctrl+O on your keyboard.
-- To save a photo, click on the "File" menu and select "Save As" or press Ctrl+Shift+S on your keyboard.
-- To close ArcSoft PhotoStudio, click on the "File" menu and select "Exit" or press Alt+F4 on your keyboard.
-
-
-Note: You can also copy the folder "PhotoStudio_6_Portable" to a USB flash drive or any other removable device and run it from there on any computer without installing it.
-
-Conclusion
-
-ArcSoft PhotoStudio V6.0.0.157 [Portable] is a great photo editing software that you can use on your PC without installing it. It offers a range of features and tools to help you enhance, manage, print
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Corel PaintShop Pro X9 Ultimate Crack With Serial Key.md b/spaces/raedeXanto/academic-chatgpt-beta/Corel PaintShop Pro X9 Ultimate Crack With Serial Key.md
deleted file mode 100644
index 8afa2ec66ac16c79afe9609dcf04a76078364ab9..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Corel PaintShop Pro X9 Ultimate Crack With Serial Key.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-How to Crack Corel PaintShop Pro X9 Ultimate with Serial Key
-Corel PaintShop Pro X9 Ultimate is a powerful photo editing software that offers a range of tools and features to enhance your images. Whether you want to crop, resize, adjust colors, apply effects, remove unwanted objects, or create stunning graphics, Corel PaintShop Pro X9 Ultimate can help you achieve your creative vision.
-However, if you want to use Corel PaintShop Pro X9 Ultimate without paying for a license, you might be tempted to look for a crack or a serial key online. This is not only illegal, but also risky, as you might end up downloading malware or viruses that can harm your computer or compromise your personal data.
-Corel PaintShop Pro X9 Ultimate Crack with Serial Key
Download ►►►►► https://tinourl.com/2uL1OA
-Therefore, we strongly advise you to avoid using Corel PaintShop Pro X9 Ultimate crack or serial key, and instead opt for a legal and safe way to use the software. Here are some of the options you have:
-
-- Download a free trial version of Corel PaintShop Pro X9 Ultimate from the official website[^3^]. You can use the software for 30 days with full functionality and access to all the features and tools. This is a great way to test the software before deciding whether to buy it or not.
-- Buy a license for Corel PaintShop Pro X9 Ultimate from the official website[^3^] or from an authorized reseller. You can choose between a one-time purchase or a subscription plan, depending on your needs and budget. By buying a license, you will get access to all the updates, support, and tutorials that Corel offers to its customers.
-- Use an alternative free photo editing software that can meet your needs. There are many options available online, such as GIMP, PhotoScape X, Pixlr, or Fotor. These programs may not have all the features and tools that Corel PaintShop Pro X9 Ultimate has, but they can still help you edit your photos and create amazing graphics.
-
-We hope this article has helped you understand why you should not use Corel PaintShop Pro X9 Ultimate crack or serial key, and what are some of the legal and safe ways to use the software. Remember, cracking software is not only unethical, but also dangerous for your computer and your privacy. So, stay away from illegal downloads and enjoy using Corel PaintShop Pro X9 Ultimate legally and safely.
How to Use Corel PaintShop Pro X9 Ultimate
-Corel PaintShop Pro X9 Ultimate is a versatile photo editing software that can help you create stunning images and graphics. Whether you are a beginner or a professional, you can find the tools and features you need to enhance your photos, correct common flaws, add artistic effects, work with layers and masks, draw and paint with brushes, and much more.
-But how do you use Corel PaintShop Pro X9 Ultimate? How do you get started with the software and learn how to use its various functions? Here are some tips and resources that can help you master Corel PaintShop Pro X9 Ultimate:
-
-- Read the user guide. The user guide is a comprehensive document that covers all the aspects of Corel PaintShop Pro X9 Ultimate, from installation and activation to advanced techniques and troubleshooting. You can access the user guide online[^2^] or download it as a PDF file for offline viewing.
-- Use the Learning Center palette. The Learning Center palette is a handy tool that guides you through the basic and intermediate tasks of photo editing. You can access the Learning Center palette from the Window menu or by pressing F10 on your keyboard. The Learning Center palette provides step-by-step instructions for common tasks, such as cropping, resizing, adjusting brightness and contrast, removing red-eye, applying effects, and more.
-- Watch video tutorials. Video tutorials are a great way to learn by watching and following along. You can find dozens of video tutorials on the official website[^1^] or on YouTube[^3^] that cover various topics, such as how to use focus stacking, how to use the frame tool, how to use AI-based features, how to work with RAW photos, and more.
-- Use web-based resources. There are many web-based resources that can help you learn more about Corel PaintShop Pro X9 Ultimate and get inspired by other users. You can visit the Discovery Center[^1^] for tips, tricks, tutorials, projects, and contests. You can also join the user community forum to ask questions, share your work, and get feedback from other users.
-
-With these tips and resources, you can learn how to use Corel PaintShop Pro X9 Ultimate effectively and unleash your creativity. Have fun editing your photos and creating amazing graphics with Corel PaintShop Pro X9 Ultimate!
- 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Digital Soccer Draw v5.2 A Powerful Software for Soccer Analysis and Visualization.md b/spaces/raedeXanto/academic-chatgpt-beta/Digital Soccer Draw v5.2 A Powerful Software for Soccer Analysis and Visualization.md
deleted file mode 100644
index 0f1501a3a055be81f8d4471cce1b4e24bcc57572..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Digital Soccer Draw v5.2 A Powerful Software for Soccer Analysis and Visualization.md
+++ /dev/null
@@ -1,155 +0,0 @@
-
-Digital Soccer Draw v5.2: A Powerful Tool for Soccer Coaches
-If you are a soccer coach, you know how important it is to have a clear and engaging way of communicating your ideas and strategies to your players. Whether you are planning a training session, preparing for a match, or analyzing a performance, you need a tool that can help you create soccer drills, tactics, and lineups easily and effectively.
-Digital Soccer Draw v5.2
Download Zip ⚹⚹⚹ https://tinourl.com/2uKZwz
-That's where Digital Soccer Draw v5.2 comes in handy. This software is a digital online soccer tactic board that allows you to draw soccer diagrams and animations within seconds for free. You can use it on any device, such as a computer, tablet, or smartphone, and access it from anywhere via web or app.
-In this article, we will show you what Digital Soccer Draw v5.2 is, how to use it, why you should use it, and how to get the most out of it. By the end of this article, you will be ready to unleash your creativity and take your coaching skills to the next level.
- What is Digital Soccer Draw v5.2?
-Digital Soccer Draw v5.2 is a software that lets you create soccer drawings free of charge in any browser. It is based on the popular digital_soccer_draw_v5_2_9jv9u package that was published on npm 16 days ago.
-With Digital Soccer Draw v5.2, you can:
-How to use Digital Soccer Draw v5.2 for coaching
-Digital Soccer Draw v5.2 review and features
-Best price for Digital Soccer Draw v5.2 software
-Digital Soccer Draw v5.2 vs other soccer drawing tools
-Benefits of using Digital Soccer Draw v5.2 for tactics
-Digital Soccer Draw v5.2 tutorial and tips
-How to download and install Digital Soccer Draw v5.2
-Digital Soccer Draw v5.2 customer support and feedback
-How to create and share soccer diagrams with Digital Soccer Draw v5.2
-Digital Soccer Draw v5.2 license and activation
-How to upgrade from Digital Soccer Draw v4 to v5.2
-Digital Soccer Draw v5.2 system requirements and compatibility
-How to customize and edit soccer fields with Digital Soccer Draw v5.2
-How to add and animate players with Digital Soccer Draw v5.2
-How to export and print soccer drawings with Digital Soccer Draw v5.2
-How to import and use soccer images with Digital Soccer Draw v5.2
-How to use Digital Soccer Draw v5.2 for online collaboration
-How to backup and restore soccer drawings with Digital Soccer Draw v5.2
-How to troubleshoot common issues with Digital Soccer Draw v5.2
-How to get updates and new features for Digital Soccer Draw v5.2
-How to use Digital Soccer Draw v5.2 for different soccer formats and levels
-How to apply soccer principles and strategies with Digital Soccer Draw v5.2
-How to use Digital Soccer Draw v5.2 for training and drills
-How to use Digital Soccer Draw v5.2 for match analysis and evaluation
-How to use Digital Soccer Draw v5.2 for fun and entertainment
-How to get a free trial of Digital Soccer Draw v5.2
-How to buy Digital Soccer Draw v5.2 with a discount code or coupon
-How to get a refund or exchange for Digital Soccer Draw v5.2
-How to contact the developers of Digital Soccer Draw v5.2
-How to give feedback and suggestions for Digital Soccer Draw v5.2
-What are the advantages and disadvantages of Digital Soccer Draw v5.2
-What are the alternatives and competitors of Digital Soccer Draw v5.2
-What are the testimonials and reviews of Digital Soccer Draw v5.2 users
-What are the best practices and tips for using Digital Soccer Draw v5.2 effectively
-What are the latest news and updates about Digital Soccer Draw v5.2
-What are the frequently asked questions and answers about Digital Soccer Draw v5.2
-What are the terms and conditions of using Digital Soccer Draw v5.2
-What are the privacy policy and security features of Digital Soccer Draw v5.2
-What are the awards and recognition of Digital Soccer Draw v5.2
-What are the future plans and roadmap of Digital Soccer Draw v5.2
-Who are the target audience and customers of Digital Soccer Draw v5.2
-Who are the partners and sponsors of Digital Soccer Draw v5.2
-Who are the creators and developers of Digital Soccer Draw v5.2
-Why should you choose Digital Soccer Draw v5.2 over other soccer drawing tools
-Why is Digital Soccer Draw v5.2 the best soccer drawing software in the market
-When is the best time to use Digital Soccer Draw v5.2 for your soccer needs
-When is the next release date and version of Digital Soccer Draw v5.2
-Where can you find more information and resources about Digital Soccer Draw v5.2
-Where can you download or buy Digital Soccer Draw v5.2 online
-
-- Choose from different field layouts, such as full pitch, half pitch, penalty area, etc.
-- Place over 250 icons on the field, such as players, balls, cones, goals, etc.
-- Draw lines and arrows to represent movements, passes, shots, etc.
-- Animate your drawings with a simple click.
-- Change the colors and sizes of objects and lines.
-- Create your own folders and sub-folders to organize your drawings.
-- Share your drawings via social media or email.
-- Export your drawings as PNG or PDF files.
-- Integrate your drawings with other apps and platforms.
-
-To download and install Digital Soccer Draw v5.2, you just need to follow these simple steps:
-
-- Go to this link and click on the "Download" button.
-- Save the file on your device and unzip it.
-- Open the "index.html" file in your browser.
-- Start drawing!
-
-To use Digital Soccer Draw v5.2 to create soccer drills and tactics, you just need to follow these simple steps:
-
-- Select a field layout from the icons at the top.
-- Place your objects and players onto the field using the icons on the left.
-- Use the line tools on the right to draw movements, passes, shots, etc.
-- Use the "+" or "-" buttons to resize objects.
-- Use the paintbrush tool to change colors of objects and lines.
-- Use the rotate tool to rotate objects.
-- Use the oval or rectangle overlay tool to highlight an area on the field.
-- Use the play button to animate your drawing.
-- Use the undo and redo buttons to correct mistakes.
-
- Why use Digital Soccer Draw v5.2?
-Digital Soccer Draw v5.2 is not just another drawing tool. It is a powerful tool that can help you improve your coaching skills and achieve better results with your team. Here are some of the benefits of using a digital soccer tactic board:
- The benefits of using a digital soccer tactic board
-
-- You can visualize your ideas more clearly and accurately than using paper or whiteboard.
-- You can save time and effort by creating drawings quickly and easily.
-- You can edit and modify your drawings as much as you want without wasting paper or ink.
-- You can access your drawings from anywhere and anytime via web or app.
-- You can share your drawings with your players and staff instantly and conveniently.
-
- The advantages of Digital Soccer Draw v5.2 over other similar tools
-
-- Digital Soccer Draw v5.2 is free of charge and does not require any registration or subscription.
-- Digital Soccer Draw v5.2 has more features and options than other tools, such as animation, overlay, rotation, etc.
-- Digital Soccer Draw v5.2 has more icons and field layouts than other tools, allowing you to create any drill or tactic imaginable.
-- Digital Soccer Draw v5.2 has a simple and intuitive interface that makes it easy to use for anyone.
-
- The testimonials and reviews from satisfied users
- Digital Soccer Draw v5.2 has been used by thousands of soccer coaches around the world who have given positive feedbacks and reviews about it. Here are some examples:
- "Digital Soccer Draw v5.2 is an amazing tool that has helped me a lot in my coaching career. I can create drills and tactics in minutes and share them with my players easily. It has improved my communication skills and my team's performance." - John Smith, U-15 coach
- "I love Digital Soccer Draw v5.2 because it allows me to express my creativity and vision as a coach. I can draw anything I want with this tool and animate it with a click. It makes my sessions more fun and engaging for my players." - Maria Garcia, U-10 coach
- "Digital Soccer Draw v5.2 is a must-have tool for any soccer coach who wants to take their coaching skills to the next level. It is easy to use, versatile, and powerful. It has everything I need to plan my sessions, prepare for my matches, and analyze my performances." - James Lee, U-18 coach
- How to get the most out of Digital Soccer Draw v5.2?
- Digital Soccer Draw v5.2 is not only a tool for creating soccer drawings but also a tool for learning and improving as a coach. Here are some tips and tricks for getting the most out of it:
- Tips and tricks for creating effective soccer drawings
-
- - Use different colors for different teams or groups of players.
- - Use different types of lines for different types of actions (e.g., solid line for running, dashed line for passing).
- - Use arrows at the end of lines to indicate direction or speed (e.g., longer arrow for faster movement).
- - Use numbers or letters to label players or positions (e.g., 1 for goalkeeper).
- - Use symbols or icons to indicate specific actions or situations (e.g., ball icon for possession).
- - Use overlay shapes to highlight an area or zone on the field (e.g., rectangle for offside trap).
- - Add text boxes or notes to explain or describe your drawing (e.g., "press high", "switch play").
-
- How to share and export your drawings with others
-
- - To share your drawing via social media or email, click on the "Share" button at the top right corner of the screen.
- - To export your drawing with others
-- To integrate your drawing with other apps and platforms, click on the "Integrate" button at the top right corner of the screen.
-
- How to integrate your drawings with other apps and platforms
- Digital Soccer Draw v5.2 is compatible with many other apps and platforms that can enhance your coaching experience. Here are some examples:
-
-- easy2coach: This is a platform that offers a team manager and a training app for soccer coaches. You can integrate your drawings and animations with match days, training days, and drills with one click. You can also access thousands of ready-made exercises and tactics from other coaches.
-- planet.training: This is a platform that offers a training planner and a player development app for soccer coaches. You can integrate your drawings and animations with your training plans and player profiles. You can also create and share your own exercises and tactics with the community.
-- ConceptDraw: This is a software that offers a diagramming and vector drawing tool for various purposes. You can integrate your drawings and animations with other diagrams, such as flowcharts, mind maps, charts, etc. You can also export your drawings as various file formats, such as SVG, PDF, PNG, etc.
-
- Conclusion
- Digital Soccer Draw v5.2 is a powerful tool for soccer coaches who want to create soccer drills, tactics, and lineups easily and effectively. It is free of charge, easy to use, and compatible with many other apps and platforms. It can help you improve your communication skills, save time and effort, and achieve better results with your team.
-If you want to try Digital Soccer Draw v5.2 for yourself, you can download it from this link and start drawing right away. You can also check out some examples of soccer drawings created by other users on this link.
-We hope you enjoyed this article and learned something new about Digital Soccer Draw v5.2. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.
- FAQs
-
-- What is Digital Soccer Draw v5.2?
-Digital Soccer Draw v5.2 is a digital online soccer tactic board that allows you to draw soccer diagrams and animations within seconds for free.
-- How to use Digital Soccer Draw v5.2?
-To use Digital Soccer Draw v5.2, you need to download and install it on your device, select a field layout, place objects and players on the field, draw lines and arrows to represent actions, animate your drawing with a click, and share or export your drawing as you like.
-- Why use Digital Soccer Draw v5.2?
-Digital Soccer Draw v5.2 can help you visualize your ideas more clearly and accurately than using paper or whiteboard, save time and effort by creating drawings quickly and easily, edit and modify your drawings as much as you want without wasting paper or ink, access your drawings from anywhere and anytime via web or app, share your drawings with your players and staff instantly and conveniently.
-- How to get the most out of Digital Soccer Draw v5.2?
-To get the most out of Digital Soccer Draw v5.2, you can use different colors for different teams or groups of players, use different types of lines for different types of actions, use arrows at the end of lines to indicate direction or speed, use numbers or letters to label players or positions, use symbols or icons to indicate specific actions or situations, use overlay shapes to highlight an area or zone on the field, add text boxes or notes to explain or describe your drawing, integrate your drawings with other apps and platforms that can enhance your coaching experience.
-- Where can I download Digital Soccer Draw v5.2?
-You can download Digital Soccer Draw v5.2 from this link.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Art Plus Digital Photo Recovery 6.1 .0.114 Serial Crack.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Art Plus Digital Photo Recovery 6.1 .0.114 Serial Crack.md
deleted file mode 100644
index c7bebd6496df7ef1ecbb2bd2888ce84b60440618..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Art Plus Digital Photo Recovery 6.1 .0.114 Serial Crack.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Art Plus Digital Photo Recovery 6.1 .0.114 Serial Crack
Download ✑ ✑ ✑ https://urlgoal.com/2uCKAk
-
- d5da3c52bf
-
-
-
diff --git a/spaces/rf5860/bg3_character_generator/app.py b/spaces/rf5860/bg3_character_generator/app.py
deleted file mode 100644
index 28a1f278e8633c7124d2a69f76d4a971c680d082..0000000000000000000000000000000000000000
--- a/spaces/rf5860/bg3_character_generator/app.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import gradio as gr
-import random
-
-def generate_characters():
- class_list = ["Barbarian","Bard","Cleric","Druid","Fighter","Monk","Paladin","Ranger","Rogue","Sorcerer","Warlock","Wizard"]
- sub_class_list = ["Barbarian - Beserker","Barbarian - Wildheart","Barbarian - Wild Magic","Bard - College of Lore","Bard - College of Valour","Bard - College of Swords","Cleric - Life Domain","Cleric - Light Domain","Cleric - Trickery Domain","Cleric - Knowledge Domain","Cleric - Nature Domain","Cleric - Tempest Domain","Cleric - War Domain","Druid - Circle of the moon","Druid - Circle of the Land","Druid - Circle of the Spores","Fighter - Battle Master","Fighter - Eldritch Knight","Fighter - Champion","Monk - Way of the open hand","Monk - Way of shadow","Monk - Way of the four elements","Paladin - Oath of Devotion","Paladin - Oath of the ancients","Paladin - Oath of Vengence","Paladin - Oathbreaker","Ranger - Beat Master","Ranger - Hunter","Ranger - Gloom Stalker","Rogue - Thief","Rogue - Arcane Trickster","Rogue - Assassin","Sorcerer - Draconic Bloodline","Sorcerer - Wild Magic","Sorcerer - Storm Sorcery","Warlock - The Fiend","Warlock - the great old one","Warlock - Archfey","Wizard - Abjuration","Wizard - Conjuration","Wizard - Divination","Wizard - Enchantment","Wizard - Evocation","Wizard - Necromancer","Wizard - Illusion","Wizard - Transmutation"]
- race_list = ["Dragonborn","Drow","Dwarf","Elf","Githyanki","Gnome","Half-Elf","Half-Orc","Halfling","Human","Tiefling"]
- sub_race_list = ["Black Dragonborn","Blue Dragonborn","Brass Dragonborn","Bronze Dragonborn","Copper Dragonborn","Gold Dragonborn","Green Dragonborn","Red Dragonborn","Silver Dragonborn","White Dragonborn","Lolth-Sworn Drow","Seldarine Drow","Gold Dwarf","Shield Dwarf","Duegar (Dwarf)","High Elf - Elf","Wood Elf - Elf","Githyanki","Deep Gnome","Forest Gnome","Rock Gnome","High Half-Elf","Wood Half-Elf","Drow Half-Elf","Half-Orc","Lightfoot Halfling","Strongheart Halfling","Human","Asmodeus Tiefling","Mephistopheles Tiefling","Zariel Tiefling"]
- alignment_list = ["Chaotic Good","Lawful Good","Neutral Good","Chaotic Evil","Lawful Evil","Neutral Evil","True Chaotic","True Neutral"]
- background_list = ["Acolyte","Charlatan","Criminal","Entertainer","Folk Hero","Guild Artisan","Noble","Hermit","Outlander","Sage","Soldier"]
-
- def generate_character():
- character = {
- 'Class': random.choice(class_list),
- 'Sub Class': random.choice(sub_class_list),
- 'Race': random.choice(race_list),
- 'Sub Race': random.choice(sub_race_list),
- 'Alignment': random.choice(alignment_list),
- 'Background': random.choice(background_list),
- }
- return character
-
- def generate_multiclass_character():
- character = generate_character()
- second_class = random.choice([cls for cls in class_list if cls != character['Class']])
- levels_class1 = random.randint(1, 11)
- levels_class2 = 12 - levels_class1
- character['Class'] += f' ({levels_class1} levels), {second_class} ({levels_class2} levels)'
- return character
-
- characters = [generate_character() for _ in range(2)]
- characters.append(generate_multiclass_character())
-
- output = ""
- for idx, char in enumerate(characters, 1):
- output += f"### Character {idx}:\n"
- for key, value in char.items():
- output += f"- **{key}:** {value}\n"
- output += "\n"
-
- return output
-
-iface = gr.Interface(fn=generate_characters, inputs=[], outputs="markdown")
-iface.launch()
diff --git a/spaces/rgres/Seg2Sat/frontend/src/app.css b/spaces/rgres/Seg2Sat/frontend/src/app.css
deleted file mode 100644
index f39386e47449716dc5a0c936162a38263e61b2b7..0000000000000000000000000000000000000000
--- a/spaces/rgres/Seg2Sat/frontend/src/app.css
+++ /dev/null
@@ -1,10 +0,0 @@
-@import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@100;200;300;400;500;600;700;800&display=swap');
-@tailwind base;
-@tailwind components;
-@tailwind utilities;
-
-@layer base {
- html {
- font-family: 'Open Sans', sans-serif;
- }
-}
diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/kitti2012_kitti2015_320x896.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/kitti2012_kitti2015_320x896.py
deleted file mode 100644
index f0f70a01abfe719153b97a3973b1c97be82c23e3..0000000000000000000000000000000000000000
--- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/kitti2012_kitti2015_320x896.py
+++ /dev/null
@@ -1,108 +0,0 @@
-img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False)
-
-crop_size = (320, 896)
-
-global_transform = dict(
- translates=(0.02, 0.02),
- zoom=(0.98, 1.02),
- shear=(1.0, 1.0),
- rotate=(-0.5, 0.5))
-
-relative_transform = dict(
- translates=(0.0025, 0.0025),
- zoom=(0.99, 1.01),
- shear=(1.0, 1.0),
- rotate=(-0.5, 0.5))
-
-sparse_train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', sparse=True),
- dict(
- type='ColorJitter',
- brightness=0.05,
- contrast=0.2,
- saturation=0.25,
- hue=0.1),
- dict(type='RandomGamma', gamma_range=(0.7, 1.5)),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)),
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
- dict(type='RandomFlip', prob=0.5, direction='vertical'),
- dict(
- type='RandomAffine',
- global_transform=global_transform,
- relative_transform=relative_transform),
- dict(type='RandomCrop', crop_size=crop_size),
- dict(type='DefaultFormatBundle'),
- dict(
- type='Collect',
- keys=['imgs', 'flow_gt', 'valid'],
- meta_keys=[
- 'img_fields', 'ann_fields', 'filename1', 'filename2',
- 'ori_filename1', 'ori_filename2', 'filename_flow',
- 'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg'
- ]),
-]
-
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', sparse=True),
- dict(type='InputResize', exponent=6),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='TestFormatBundle'),
- dict(
- type='Collect',
- keys=['imgs'],
- meta_keys=[
- 'flow_gt', 'valid', 'filename1', 'filename2', 'ori_filename1',
- 'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg',
- 'scale_factor', 'pad_shape'
- ])
-]
-
-kitti2015_train = dict(
- type='KITTI2015',
- data_root='data/kitti2015',
- pipeline=sparse_train_pipeline,
- test_mode=False)
-
-kitti2015_val_test = dict(
- type='KITTI2015',
- data_root='data/kitti2015',
- pipeline=test_pipeline,
- test_mode=True)
-
-kitti2012_train = dict(
- type='KITTI2012',
- data_root='data/kitti2012',
- pipeline=sparse_train_pipeline,
- test_mode=False),
-
-kitti2012_val_test = dict(
- type='KITTI2012',
- data_root='data/kitti2012',
- pipeline=test_pipeline,
- test_mode=True)
-
-data = dict(
- train_dataloader=dict(
- samples_per_gpu=1,
- workers_per_gpu=2,
- drop_last=True,
- shuffle=False,
- persistent_workers=True),
- val_dataloader=dict(
- samples_per_gpu=1,
- workers_per_gpu=5,
- shuffle=False,
- persistent_workers=True),
- test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=5, shuffle=False),
- train=[kitti2015_train, kitti2012_train],
- val=dict(
- type='ConcatDataset',
- datasets=[kitti2015_val_test, kitti2012_val_test],
- separate_eval=True),
- test=dict(
- type='ConcatDataset',
- datasets=[kitti2015_val_test, kitti2012_val_test],
- separate_eval=True))
diff --git a/spaces/risingodegua/wine_quality_predictor/README.md b/spaces/risingodegua/wine_quality_predictor/README.md
deleted file mode 100644
index fd445dd2af8f5b35a41f0cfe1eeb206e6f3c95f8..0000000000000000000000000000000000000000
--- a/spaces/risingodegua/wine_quality_predictor/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Wine_quality_predictor
-emoji: 🐨
-colorFrom: yellow
-colorTo: pink
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/pipelines/test_time_aug.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/pipelines/test_time_aug.py
deleted file mode 100644
index 5f1ab7b7cc81891dd14d136a24cec5228495d2f0..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/pipelines/test_time_aug.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import warnings
-
-import mmcv
-
-from ..builder import PIPELINES
-from .compose import Compose
-
-
-@PIPELINES.register_module()
-class MultiScaleFlipAug:
- """Test-time augmentation with multiple scales and flipping.
-
- An example configuration is as followed:
-
- .. code-block::
-
- img_scale=[(1333, 400), (1333, 800)],
- flip=True,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ]
-
- After MultiScaleFLipAug with above configuration, the results are wrapped
- into lists of the same length as followed:
-
- .. code-block::
-
- dict(
- img=[...],
- img_shape=[...],
- scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
- flip=[False, True, False, True]
- ...
- )
-
- Args:
- transforms (list[dict]): Transforms to apply in each augmentation.
- img_scale (tuple | list[tuple] | None): Images scales for resizing.
- scale_factor (float | list[float] | None): Scale factors for resizing.
- flip (bool): Whether apply flip augmentation. Default: False.
- flip_direction (str | list[str]): Flip augmentation directions,
- options are "horizontal", "vertical" and "diagonal". If
- flip_direction is a list, multiple flip augmentations will be
- applied. It has no effect when flip == False. Default:
- "horizontal".
- """
-
- def __init__(self,
- transforms,
- img_scale=None,
- scale_factor=None,
- flip=False,
- flip_direction='horizontal'):
- self.transforms = Compose(transforms)
- assert (img_scale is None) ^ (scale_factor is None), (
- 'Must have but only one variable can be set')
- if img_scale is not None:
- self.img_scale = img_scale if isinstance(img_scale,
- list) else [img_scale]
- self.scale_key = 'scale'
- assert mmcv.is_list_of(self.img_scale, tuple)
- else:
- self.img_scale = scale_factor if isinstance(
- scale_factor, list) else [scale_factor]
- self.scale_key = 'scale_factor'
-
- self.flip = flip
- self.flip_direction = flip_direction if isinstance(
- flip_direction, list) else [flip_direction]
- assert mmcv.is_list_of(self.flip_direction, str)
- if not self.flip and self.flip_direction != ['horizontal']:
- warnings.warn(
- 'flip_direction has no effect when flip is set to False')
- if (self.flip
- and not any([t['type'] == 'RandomFlip' for t in transforms])):
- warnings.warn(
- 'flip has no effect when RandomFlip is not in transforms')
-
- def __call__(self, results):
- """Call function to apply test time augment transforms on results.
-
- Args:
- results (dict): Result dict contains the data to transform.
-
- Returns:
- dict[str: list]: The augmented data, where each value is wrapped
- into a list.
- """
-
- aug_data = []
- flip_args = [(False, None)]
- if self.flip:
- flip_args += [(True, direction)
- for direction in self.flip_direction]
- for scale in self.img_scale:
- for flip, direction in flip_args:
- _results = results.copy()
- _results[self.scale_key] = scale
- _results['flip'] = flip
- _results['flip_direction'] = direction
- data = self.transforms(_results)
- aug_data.append(data)
- # list of dict to dict of list
- aug_data_dict = {key: [] for key in aug_data[0]}
- for data in aug_data:
- for key, val in data.items():
- aug_data_dict[key].append(val)
- return aug_data_dict
-
- def __repr__(self):
- repr_str = self.__class__.__name__
- repr_str += f'(transforms={self.transforms}, '
- repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
- repr_str += f'flip_direction={self.flip_direction})'
- return repr_str
diff --git a/spaces/rorallitri/biomedical-language-models/logs/All 5 Brickgun Pdf Instructions Explorez les instructions de construction et les graphiques poustouflants pour tous les modles BrickGun.md b/spaces/rorallitri/biomedical-language-models/logs/All 5 Brickgun Pdf Instructions Explorez les instructions de construction et les graphiques poustouflants pour tous les modles BrickGun.md
deleted file mode 100644
index 603bed6be75309dbb2f4a99a6e2eb190cab64058..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/All 5 Brickgun Pdf Instructions Explorez les instructions de construction et les graphiques poustouflants pour tous les modles BrickGun.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-The BrickGun Book shows you how to build five remarkably sleek LEGO® handgun replicas, like the classic Berreta 92FS and a formidable rubber-band-firing MAC-11. Each chapter includes step-by-step building instructions and a complete parts list using only readily available LEGO pieces.
-All 5 Brickgun Pdf Instructions
Download File ————— https://tinurll.com/2uzozK
-In addition to the instructions provided, the BrickGun Book includes parts lists so you can gather all the needed parts before starting your build. I recommend photocopying these pages, and crossing them off as you find the right parts.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Hindi 1080p Songs Enjoy the High-Quality Music Videos of Your Favorite Artists.md b/spaces/rorallitri/biomedical-language-models/logs/Hindi 1080p Songs Enjoy the High-Quality Music Videos of Your Favorite Artists.md
deleted file mode 100644
index 8e6e029a45c1e6443274d2bb2371eaaef7bde767..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Hindi 1080p Songs Enjoy the High-Quality Music Videos of Your Favorite Artists.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-Mp4moviez popular website for leaking Hollywood, Bollywood, South, Web Series, Tv-Shows, and other languages. dubbed movies for free, so here we can see the impact of downloading movies on the torrent website. There are many options on these sites like Vegamovies Hindi Movie Download Mp4moviez HD printing, 720p 300Mb, 480p, 1080p, and 480p.
-Allmovieshub popular website for leaking Hollywood, Bollywood, South, Web Series, Tv-Shows, and other languages. dubbed movies for free, so here we can see the impact of downloading movies on the torrent website. There are many options on these sites like HD printing, Vegamovies Marathi Movie Download Allmovieshub 720p 300Mb, 480p, 1080p, and 480p.
-hindi 1080p
Download ⚙⚙⚙ https://tinurll.com/2uzn2C
-It is one of the biggest pirated movie-downloading websites primarily known as yomovies link and yomovies sh, it provides you with the latest Hollywood dubbed movies in Hindi, Tamil and Telugu. Apart from this all the Latest Bollywood Movies 2023 can be downloaded and streamed on this site. You can watch movies online in 480p 720p 1080p quality using your smartphone and desktop.
-Yomovies is a movie browsing website which offers the latest Bollywood, Tamil, Telugu, and Hollywood Dubbed movies in HD quality. You can download all types of movies and web series on this website. Yomovies ac allows users to download all the latest movies and web series in different quality options like 480p 1080p 4K quality.
-There are many categories you can find on Yomovies and you can easily download movies by following the steps given above. You can find the latest movies on your quality preferences, Bollywood movies can be downloaded in 480p, 720p, or 1080p format and can be watched online too. As far as piracy is concerned, yes this is a pirated website and we never recommend anyone to use this as it is not allowed by the officials.
-There are many latest Hollywood movies like Comedy, Action, Suspense, Romance, Drama, etc. which are available in dubbed versions on these websites, you can download all the Hollywood Hindi Dubbed movies on Yomovies and they can be watched online too in 720p 1080p and 4K quality options. There are many other genres available in the Hollywood movie 2023 category, you can choose according to your preference.
- aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rubensmau/Dov_Tzamir/data_driven_characters/interfaces/commandline_ui.py b/spaces/rubensmau/Dov_Tzamir/data_driven_characters/interfaces/commandline_ui.py
deleted file mode 100644
index 0e4029d8696b2c28516b05b5da365e785761f811..0000000000000000000000000000000000000000
--- a/spaces/rubensmau/Dov_Tzamir/data_driven_characters/interfaces/commandline_ui.py
+++ /dev/null
@@ -1,12 +0,0 @@
-class CommandLine:
- def __init__(self, chatbot):
- self.chatbot = chatbot
-
- def run(self):
- print(f"{self.chatbot.character_definition.name}: {self.chatbot.greet()}")
- while True:
- text = input("You: ")
- if text:
- print(
- f"{self.chatbot.character_definition.name}: {self.chatbot.step(text)}"
- )
diff --git a/spaces/runwayml/stable-diffusion-inpainting/share_btn.py b/spaces/runwayml/stable-diffusion-inpainting/share_btn.py
deleted file mode 100644
index 5bce98ad54d491f9d5691fea427efeccc77690cc..0000000000000000000000000000000000000000
--- a/spaces/runwayml/stable-diffusion-inpainting/share_btn.py
+++ /dev/null
@@ -1,93 +0,0 @@
-community_icon_html = """"""
-
-loading_icon_html = """"""
-
-share_js = """async () => {
- async function uploadFile(file){
- const UPLOAD_URL = 'https://huggingface.co/uploads';
- const response = await fetch(UPLOAD_URL, {
- method: 'POST',
- headers: {
- 'Content-Type': file.type,
- 'X-Requested-With': 'XMLHttpRequest',
- },
- body: file, /// <- File inherits from Blob
- });
- const url = await response.text();
- return url;
- }
-
- async function getInputImgFile(imgCanvas){
- const blob = await new Promise(resolve => imgCanvas.toBlob(resolve));
- const imgId = Date.now() % 200;
- const fileName = `sd-inpainting-${{imgId}}.png`;
- return new File([blob], fileName, { type: 'image/png' });
- }
-
- async function getOutoutImgFile(imgEl){
- const res = await fetch(imgEl.src);
- const blob = await res.blob();
- const imgId = Date.now() % 200;
- const fileName = `sd-inpainting-${{imgId}}.png`;
- return new File([blob], fileName, { type: 'image/png' });
- }
-
- const gradioEl = document.querySelector('body > gradio-app');
- // const gradioEl = document.querySelector("gradio-app").shadowRoot;
- const inputImgCanvas = gradioEl.querySelector('canvas[key="drawing"]');
- const outputImgEl = gradioEl.querySelector('#output-img img');
- const promptTxt = gradioEl.querySelector('#input-text textarea').value;
- let titleTxt = promptTxt;
- if(titleTxt.length > 100){
- titleTxt = titleTxt.slice(0, 100) + ' ...';
- }
- const shareBtnEl = gradioEl.querySelector('#share-btn');
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
-
- if(!outputImgEl){
- return;
- };
-
- shareBtnEl.style.pointerEvents = 'none';
- shareIconEl.style.display = 'none';
- loadingIconEl.style.removeProperty('display');
-
- const inputImgFile = await getInputImgFile(inputImgCanvas);
- const outputImgFile = await getOutoutImgFile(outputImgEl);
- const files = [inputImgFile, outputImgFile];
-
- const urls = await Promise.all(files.map((f) => uploadFile(f)));
-
- const htmlImgs = urls.map(url => `
`);
- const [inputImgUrl, outputImgUrl] = htmlImgs;
-
- const descriptionMd = `
-
-${inputImgUrl}
-
-${promptTxt}
-
-
-${outputImgUrl}
-
-`;
-
- const params = new URLSearchParams({
- title: titleTxt,
- description: descriptionMd,
- });
-
- const paramsStr = params.toString();
- window.open(`${window.location.href}/discussions/new?${paramsStr}`, '_blank');
-
- shareBtnEl.style.removeProperty('pointer-events');
- shareIconEl.style.removeProperty('display');
- loadingIconEl.style.display = 'none';
-}"""
\ No newline at end of file
diff --git a/spaces/samcaicn/bingai/Dockerfile b/spaces/samcaicn/bingai/Dockerfile
deleted file mode 100644
index 305461a6ea79cba91e88209b3fafafce30b7c0a0..0000000000000000000000000000000000000000
--- a/spaces/samcaicn/bingai/Dockerfile
+++ /dev/null
@@ -1,37 +0,0 @@
-FROM node:18
-
-
-ARG DEBIAN_FRONTEND=noninteractive
-
-ENV BING_UA Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0
-ENV BING_COOKIE ""
-
-# Set up a new user named "user" with user ID 1000
-RUN useradd -o -u 1000 user
-
-# Switch to the "user" user
-USER user
-
-# Set home to the user's home directory
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Install app dependencies
-# A wildcard is used to ensure both package.json AND package-lock.json are copied
-# where available (npm@5+)
-COPY --chown=user package*.json $HOME/app
-
-RUN npm install
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app
-
-RUN npm run build
-
-ENV PORT 7860
-EXPOSE 7860
-
-CMD npm start -- --port $PORT
diff --git a/spaces/scedlatioru/img-to-music/example/Autodesk Inventor 2010 Free Download Full [TOP] Version For Windows 7 32 BIT.md b/spaces/scedlatioru/img-to-music/example/Autodesk Inventor 2010 Free Download Full [TOP] Version For Windows 7 32 BIT.md
deleted file mode 100644
index 6a49a6dab0d6c16ad1a596fd7ed55acb15c7469d..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Autodesk Inventor 2010 Free Download Full [TOP] Version For Windows 7 32 BIT.md
+++ /dev/null
@@ -1,6 +0,0 @@
-autodesk inventor 2010 free download full version for windows 7 32 BIT
Download Zip 🌟 https://gohhs.com/2uEAaQ
-
-It was working fine in 7 but when I accepted and installed 10. ... .com/en-US/windows-8/older-programs-compatible-version-windows ... Please feel free to visit our ... Forcing an install of 32 bit inventor on 64 bit windows works, but it is achieved ... Account profile · Download Center · Microsoft Store support · Returns · Order ... 1fdad05405
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Baixar Crack Para Euro Truck 1.3l _VERIFIED_.md b/spaces/scedlatioru/img-to-music/example/Baixar Crack Para Euro Truck 1.3l _VERIFIED_.md
deleted file mode 100644
index a59527e327f3dcd5a35d8736dd362e861d28376a..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Baixar Crack Para Euro Truck 1.3l _VERIFIED_.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-euro truck simulator 2 has several modes. for example, the player can select and drive the city routes, the countryside routes, and the motorway routes. the game offers 2-d and 3-d views. the design of the gameplay is unique. the player has a lot of options to select the type of cargo. in addition, there are many accessories and tools for the trucks. and most importantly, the game is very realistic. to finish all the missions, the player will have to collect money.
-the game has a totally adjustable load. you can transport your cargo in the most different types of trucks: two-wheel drive, four-wheel drive, skip, and much more. from the very beginning, the player will get a good impression of the game. the pictures of the scenery are beautiful and realistic. the best thing is that the game has a lot of opportunities for the player to progress.
-Baixar Crack Para Euro Truck 1.3l
Download Zip ☆ https://gohhs.com/2uEzmE
-moreover, the graphics of the game are realistic and not laggy. the player will get a real impression of the real european landscape. moreover, the game will not take too long to download and install. the game is available in english, german, dutch, spanish, french, italian, and polish. the price of the game will be $ 10.95.
-during your travels youll be confronted by every kind of traffic problem and situation that youll face in the real world. youll be forced to negotiate complex routes, find hidden shortcuts, and perform various maneuvers to get the cargo delivered. no matter if youre at the wheel of a 20-ton or a 1,000-ton truck, you need to be careful at all times. whether youre carrying large amounts of cargo or small deliveries, youre always in charge of your own fate. start off in the uk with your truck and deliver cargo to germany. set up your company and place your orders, hire drivers and start your own fleet, do you have what it takes?
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/example/Superpro Designer Free Download Crack Windows VERIFIED.md b/spaces/scedlatioru/img-to-music/example/Superpro Designer Free Download Crack Windows VERIFIED.md
deleted file mode 100644
index 1974c626a4f403c83fb2639c7efbe27d1af1e499..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Superpro Designer Free Download Crack Windows VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Superpro Designer Free Download Crack Windows
Download ✪ https://gohhs.com/2uEyRW
-
- 4d29de3e1b
-
-
-
diff --git a/spaces/segestic/COVIDPrediction/app.py b/spaces/segestic/COVIDPrediction/app.py
deleted file mode 100644
index b89162c28aa1631ff8de13fb004f9501685fe03c..0000000000000000000000000000000000000000
--- a/spaces/segestic/COVIDPrediction/app.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import subprocess
-
-#port = $PORT
-#subprocess.run("uvicorn application.server.main:app --host 0.0.0.0 --port 7860", shell=True)
-
-
-import streamlit as st
-import json
-import requests
-from PIL import Image
-import os
-
-#option 2
-import numpy as np
-from tensorflow.keras.models import load_model
-import cv2
-
-#Hide Made with streamlit
-st.markdown(
-'''''', unsafe_allow_html=True
-)
-
-
-def load_image(image):
- img = Image.open(image)
- return img
-
-def save_uploadedfile(uploadedfile):
- with open(os.path.join("images/img",uploadedfile.name),"wb") as f:
- f.write(uploadedfile.getbuffer())
- uploaded_location = os.path.join("images/img",uploadedfile.name)
- return uploaded_location#st.success("Saved File:{} to {}".format(uploadedfile.name, uploaded_location))
-
-
-def image_predict (image_file):
- model_path = 'application/models/resnet_ct.h5'
- h5_model = load_model(model_path)
- image = cv2.imread(image_file)
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- image = cv2.resize(image, (224, 224))
- image = np.array(image) / 255
- image = np.expand_dims(image, axis=0)
- h5_prediction = h5_model.predict(image)
- print('Prediction from h5 model: {}'.format(h5_prediction))
- print(h5_prediction)
- probability = h5_prediction[0]
- print("H5 Predictions:")
- print (probability)
- if probability[0] > 0.5:
- covid_chest_pred = str('%.2f' % (probability[0] * 100) + '% COVID-Positive')
- probability = (probability[0] * 100)
- else:
- covid_chest_pred = str('%.2f' % ((1 - probability[0]) * 100) + '% COVID-Negative')
- probability = ((1 - probability[0]) * 100)
- return covid_chest_pred
-
-
-st.title("Covid Prediction App from CT Images")
-
-
-
-#taking user inputs
-
-st.write("")
-
-#converting input to json
-
-
-image = st.file_uploader("Upload CT Scan", type=["png","jpg","jpeg"])
-
-if image is not None:
- # To See details
- file_details = {"filename":image.name, "filetype":image.type,
- "filesize":image.size}
- st.write(file_details)
- #image1 = Image.open(image)
- #img_array = np.array(image1)
-
- #View Uploaded Image
- st.image(load_image(image),width=250)
- #save image to disk
- saved = save_uploadedfile(image)
-
- #if st.button ('Analyze'):
- #test_file = open(os.path.join("images/img", image.name), "rb")
- #response = requests.post('http://127.0.0.1:8000/predict/image', files={'file': test_file })
- #prediction = response.json()##json_object["prediction"]
- #st.write(prediction)
- #st. subheader (f"Response from Covid Analyzer API = {prediction}")
-
- #OPTION 2 - NON API..
- if st.button ('Analyze'):
- with st.spinner('Analyzing...'):
- prediction = image_predict(saved)
- #st.write(prediction)
- st. subheader (f"Image Prediction = {prediction}")
- st.success(f"Image Prediction = {prediction}", icon="✅")
-
-
-
-#RUN BOTH...
-#streamlit run app.py
-#uvicorn application.server.main:app
-
-#OPTION 2....
-
-
-
diff --git a/spaces/sgangireddy/whisper-largeV2-mls-spanish-demo/app.py b/spaces/sgangireddy/whisper-largeV2-mls-spanish-demo/app.py
deleted file mode 100644
index 0801756c385915da28cbda71263de80d68def5f0..0000000000000000000000000000000000000000
--- a/spaces/sgangireddy/whisper-largeV2-mls-spanish-demo/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import torch
-
-import gradio as gr
-import pytube as pt
-from transformers import pipeline
-from huggingface_hub import model_info
-
-MODEL_NAME = "sgangireddy/whisper-largev2-mls-es" #this always needs to stay in line 8 :D sorry for the hackiness
-lang = "es"
-
-device = 0 if torch.cuda.is_available() else "cpu"
-pipe = pipeline(
- task="automatic-speech-recognition",
- model=MODEL_NAME,
- chunk_length_s=30,
- device=device,
-)
-
-pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
-
-def transcribe(microphone, file_upload):
- warn_output = ""
- if (microphone is not None) and (file_upload is not None):
- warn_output = (
- "WARNING: You've uploaded an audio file and used the microphone. "
- "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
- )
-
- elif (microphone is None) and (file_upload is None):
- return "ERROR: You have to either use the microphone or upload an audio file"
-
- file = microphone if microphone is not None else file_upload
-
- text = pipe(file)["text"]
-
- return warn_output + text
-
-
-def _return_yt_html_embed(yt_url):
- video_id = yt_url.split("?v=")[-1]
- HTML_str = (
- f' '
- " "
- )
- return HTML_str
-
-
-def yt_transcribe(yt_url):
- yt = pt.YouTube(yt_url)
- html_embed_str = _return_yt_html_embed(yt_url)
- stream = yt.streams.filter(only_audio=True)[0]
- stream.download(filename="audio.mp3")
-
- text = pipe("audio.mp3")["text"]
-
- return html_embed_str, text
-
-
-demo = gr.Blocks()
-
-mf_transcribe = gr.Interface(
- fn=transcribe,
- inputs=[
- gr.inputs.Audio(source="microphone", type="filepath", optional=True),
- gr.inputs.Audio(source="upload", type="filepath", optional=True),
- ],
- outputs="text",
- layout="horizontal",
- theme="huggingface",
- title="Whisper Demo: Transcribe Audio",
- description=(
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the the fine-tuned"
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
- " of arbitrary length."
- ),
- allow_flagging="never",
-)
-
-yt_transcribe = gr.Interface(
- fn=yt_transcribe,
- inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")],
- outputs=["html", "text"],
- layout="horizontal",
- theme="huggingface",
- title="Whisper Demo: Transcribe YouTube",
- description=(
- "Transcribe long-form YouTube videos with the click of a button! Demo uses the the fine-tuned checkpoint:"
- f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files of"
- " arbitrary length."
- ),
- allow_flagging="never",
-)
-
-with demo:
- gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"])
-
-demo.launch(enable_queue=True)
diff --git a/spaces/shencc/gpt/docs/README_EN.md b/spaces/shencc/gpt/docs/README_EN.md
deleted file mode 100644
index db214f5327b8cdcd84ed1c57390c3b24ba83d78f..0000000000000000000000000000000000000000
--- a/spaces/shencc/gpt/docs/README_EN.md
+++ /dev/null
@@ -1,291 +0,0 @@
-> **Note**
->
-> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct.
->
-
-#
ChatGPT Academic Optimization
-
-**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a [README in English](docs/README_EN.md) translated by this project itself.**
-
-> **Note**
->
-> 1. Please note that only **functions with red color** supports reading files, some functions are located in the **dropdown menu** of plugins. Additionally, we welcome and prioritize any new plugin PRs with **highest priority**!
->
-> 2. The functionality of each file in this project is detailed in the self-translation report [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the project. With the iteration of the version, you can also click on the relevant function plugins at any time to call GPT to regenerate the self-analysis report of the project. The FAQ summary is in the [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) section.
->
-
-
-
-
-Function | Description
---- | ---
-One-Click Polish | Supports one-click polishing and finding grammar errors in academic papers.
-One-Key Translation Between Chinese and English | One-click translation between Chinese and English.
-One-Key Code Interpretation | Can correctly display and interpret code.
-[Custom Shortcut Keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys.
-[Configure Proxy Server](https://www.bilibili.com/video/BV1rc411W7Dr) | Supports configuring proxy servers.
-Modular Design | Supports custom high-order function plugins and [function plugins], and plugins support [hot updates](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
-[Self-programming Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] [One-Key Read] (https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) The source code of this project is analyzed.
-[Program Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] One-click can analyze the project tree of other Python/C/C++/Java/Lua/... projects
-Read the Paper | [Function Plugin] One-click interpretation of the full text of latex paper and generation of abstracts
-Latex Full Text Translation, Proofreading | [Function Plugin] One-click translation or proofreading of latex papers.
-Batch Comment Generation | [Function Plugin] One-click batch generation of function comments
-Chat Analysis Report Generation | [Function Plugin] After running, an automatic summary report will be generated
-[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function Plugin] Enter the arxiv article url to translate the abstract and download the PDF with one click
-[Full-text Translation Function of PDF Paper](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function Plugin] Extract the title & abstract of the PDF paper + translate the full text (multithreading)
-[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function Plugin] Given any Google Scholar search page URL, let gpt help you choose interesting articles.
-Formula / Picture / Table Display | Can display both the tex form and the rendering form of formulas at the same time, support formula and code highlighting
-Multithreaded Function Plugin Support | Supports multi-threaded calling chatgpt, one-click processing of massive text or programs
-Start Dark Gradio [Theme](https://github.com/binary-husky/chatgpt_academic/issues/173) | Add ```/?__dark-theme=true``` at the end of the browser url to switch to dark theme
-[Multiple LLM Models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | It must feel nice to be served by both GPT3.5, GPT4, and [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B)!
-Huggingface non-Science Net [Online Experience](https://huggingface.co/spaces/qingxu98/gpt-academic) | After logging in to huggingface, copy [this space](https://huggingface.co/spaces/qingxu98/gpt-academic)
-... | ...
-
-
-
-
-- New interface (switch between "left-right layout" and "up-down layout" by modifying the LAYOUT option in config.py)
-
-
-
-
-
-- All buttons are dynamically generated by reading functional.py and can add custom functionality at will, freeing up clipboard
-
-
-
-
-- Proofreading / correcting
-
-
-
-
-- If the output contains formulas, it will be displayed in both the tex form and the rendering form at the same time, which is convenient for copying and reading
-
-
-
-
-- Don't want to read the project code? Just take the whole project to chatgpt
-
-
-
-
-- Multiple major language model mixing calls (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
-
-
-
-
-Multiple major language model mixing call [huggingface beta version](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (the huggingface version does not support chatglm)
-
-
----
-
-## Installation-Method 1: Run directly (Windows, Linux or MacOS)
-
-1. Download project
-```sh
-git clone https://github.com/binary-husky/chatgpt_academic.git
-cd chatgpt_academic
-```
-
-2. Configure API_KEY and proxy settings
-
-
-In `config.py`, configure the overseas Proxy and OpenAI API KEY as follows:
-```
-1. If you are in China, you need to set up an overseas proxy to use the OpenAI API smoothly. Please read config.py carefully for setup details (1. Modify USE_PROXY to True; 2. Modify proxies according to the instructions).
-2. Configure the OpenAI API KEY. You need to register and obtain an API KEY on the OpenAI website. Once you get the API KEY, you can configure it in the config.py file.
-3. Issues related to proxy networks (network timeouts, proxy failures) are summarized at https://github.com/binary-husky/chatgpt_academic/issues/1
-```
-(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py` and use the same-name configuration in `config.py` to overwrite it. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configuration in `config.py` to` config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure.))
-
-
-3. Install dependencies
-```sh
-# (Option One) Recommended
-python -m pip install -r requirements.txt
-
-# (Option Two) If you use anaconda, the steps are similar:
-# (Option Two.1) conda create -n gptac_venv python=3.11
-# (Option Two.2) conda activate gptac_venv
-# (Option Two.3) python -m pip install -r requirements.txt
-
-# Note: Use official pip source or Ali pip source. Other pip sources (such as some university pips) may have problems, and temporary replacement methods are as follows:
-# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
-```
-
-If you need to support Tsinghua ChatGLM, you need to install more dependencies (if you are not familiar with python or your computer configuration is not good, we recommend not to try):
-```sh
-python -m pip install -r request_llm/requirements_chatglm.txt
-```
-
-4. Run
-```sh
-python main.py
-```
-
-5. Test function plugins
-```
-- Test Python project analysis
- In the input area, enter `./crazy_functions/test_project/python/dqn`, and then click "Analyze the entire Python project"
-- Test self-code interpretation
- Click "[Multithreading Demo] Interpretation of This Project Itself (Source Code Interpretation)"
-- Test experimental function template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions.
- Click "[Function Plugin Template Demo] Today in History"
-- There are more functions to choose from in the function plugin area drop-down menu.
-```
-
-## Installation-Method 2: Use Docker (Linux)
-
-1. ChatGPT only (recommended for most people)
-``` sh
-# download project
-git clone https://github.com/binary-husky/chatgpt_academic.git
-cd chatgpt_academic
-# configure overseas Proxy and OpenAI API KEY
-Edit config.py with any text editor
-# Install
-docker build -t gpt-academic .
-# Run
-docker run --rm -it --net=host gpt-academic
-
-# Test function plug-in
-## Test function plugin template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions.
-Click "[Function Plugin Template Demo] Today in History"
-## Test Abstract Writing for Latex Projects
-Enter ./crazy_functions/test_project/latex/attention in the input area, and then click "Read Tex Paper and Write Abstract"
-## Test Python Project Analysis
-Enter ./crazy_functions/test_project/python/dqn in the input area and click "Analyze the entire Python project."
-
-More functions are available in the function plugin area drop-down menu.
-```
-
-2. ChatGPT+ChatGLM (requires strong familiarity with docker + strong computer configuration)
-
-``` sh
-# Modify dockerfile
-cd docs && nano Dockerfile+ChatGLM
-# How to build | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
-docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
-# How to run | 如何运行 (1) 直接运行:
-docker run --rm -it --net=host --gpus=all gpt-academic
-# How to run | 如何运行 (2) 我想运行之前进容器做一些调整:
-docker run --rm -it --net=host --gpus=all gpt-academic bash
-```
-
-
-## Installation-Method 3: Other Deployment Methods
-
-1. Remote Cloud Server Deployment
-Please visit [Deployment Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
-
-2. Use WSL2 (Windows Subsystem for Linux)
-Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
-
-
-## Installation-Proxy Configuration
-### Method 1: Conventional method
-[Configure Proxy](https://github.com/binary-husky/chatgpt_academic/issues/1)
-
-### Method Two: Step-by-step tutorial for newcomers
-[Step-by-step tutorial for newcomers](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
-
----
-
-## Customizing Convenient Buttons (Customizing Academic Shortcuts)
-Open `core_functional.py` with any text editor and add an item as follows, then restart the program (if the button has been successfully added and visible, both the prefix and suffix support hot modification without the need to restart the program to take effect). For example:
-```
-"Super English to Chinese translation": {
- # Prefix, which will be added before your input. For example, to describe your requirements, such as translation, code interpretation, polishing, etc.
- "Prefix": "Please translate the following content into Chinese and use a markdown table to interpret the proprietary terms in the text one by one:\n\n",
-
- # Suffix, which will be added after your input. For example, combined with the prefix, you can put your input content in quotes.
- "Suffix": "",
-},
-```
-
-
-
-
----
-
-
-## Some Function Displays
-
-### Image Display:
-
-
-You are a professional academic paper translator.
-
-
-
-
-
-### If a program can understand and analyze itself:
-
-
-
-
-
-
-
-
-
-### Analysis of any Python/Cpp project:
-
-
-
-
-
-
-
-
-### One-click reading comprehension and summary generation of Latex papers
-
-
-
-
-### Automatic report generation
-
-
-
-
-
-
-### Modular functional design
-
-
-
-
-
-### Source code translation to English
-
-
-
-
-
-## Todo and version planning:
-- version 3.2+ (todo): Function plugin supports more parameter interfaces
-- version 3.1: Support for inquiring multiple GPT models at the same time! Support for api2d, support for multiple apikeys load balancing
-- version 3.0: Support for chatglm and other small llms
-- version 2.6: Refactored the plugin structure, improved interactivity, added more plugins
-- version 2.5: Self-updating, solves the problem of text being too long and token overflowing when summarizing large project source code
-- version 2.4: (1) Added PDF full text translation function; (2) Added function to switch input area position; (3) Added vertical layout option; (4) Multi-threaded function plugin optimization.
-- version 2.3: Enhanced multi-threaded interactivity
-- version 2.2: Function plugin supports hot reloading
-- version 2.1: Foldable layout
-- version 2.0: Introduction of modular function plugins
-- version 1.0: Basic functions
-
-## Reference and learning
-
-```
-The code design of this project has referenced many other excellent projects, including:
-
-# Reference project 1: Borrowed many tips from ChuanhuChatGPT
-https://github.com/GaiZhenbiao/ChuanhuChatGPT
-
-# Reference project 2: Tsinghua ChatGLM-6B:
-https://github.com/THUDM/ChatGLM-6B
-```
-
diff --git a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/base.py b/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/base.py
deleted file mode 100644
index 8cdbe2d3e7dbadf4ed5e5a7cf2d248761ef25d9c..0000000000000000000000000000000000000000
--- a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/base.py
+++ /dev/null
@@ -1,627 +0,0 @@
-"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch"""
-
-import os
-
-import pandas as pd
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from scipy.io import loadmat
-from torch.nn.modules import BatchNorm2d
-
-from . import resnet
-from . import mobilenet
-
-
-NUM_CLASS = 150
-base_path = os.path.dirname(os.path.abspath(__file__)) # current file path
-colors_path = os.path.join(base_path, 'color150.mat')
-classes_path = os.path.join(base_path, 'object150_info.csv')
-
-segm_options = dict(colors=loadmat(colors_path)['colors'],
- classes=pd.read_csv(classes_path),)
-
-
-class NormalizeTensor:
- def __init__(self, mean, std, inplace=False):
- """Normalize a tensor image with mean and standard deviation.
- .. note::
- This transform acts out of place by default, i.e., it does not mutates the input tensor.
- See :class:`~torchvision.transforms.Normalize` for more details.
- Args:
- tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
- mean (sequence): Sequence of means for each channel.
- std (sequence): Sequence of standard deviations for each channel.
- inplace(bool,optional): Bool to make this operation inplace.
- Returns:
- Tensor: Normalized Tensor image.
- """
-
- self.mean = mean
- self.std = std
- self.inplace = inplace
-
- def __call__(self, tensor):
- if not self.inplace:
- tensor = tensor.clone()
-
- dtype = tensor.dtype
- mean = torch.as_tensor(self.mean, dtype=dtype, device=tensor.device)
- std = torch.as_tensor(self.std, dtype=dtype, device=tensor.device)
- tensor.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
- return tensor
-
-
-# Model Builder
-class ModelBuilder:
- # custom weights initialization
- @staticmethod
- def weights_init(m):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- nn.init.kaiming_normal_(m.weight.data)
- elif classname.find('BatchNorm') != -1:
- m.weight.data.fill_(1.)
- m.bias.data.fill_(1e-4)
-
- @staticmethod
- def build_encoder(arch='resnet50dilated', fc_dim=512, weights=''):
- pretrained = True if len(weights) == 0 else False
- arch = arch.lower()
- if arch == 'mobilenetv2dilated':
- orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=pretrained)
- net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=8)
- elif arch == 'resnet18':
- orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
- net_encoder = Resnet(orig_resnet)
- elif arch == 'resnet18dilated':
- orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
- net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
- elif arch == 'resnet50dilated':
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
- net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
- elif arch == 'resnet50':
- orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
- net_encoder = Resnet(orig_resnet)
- else:
- raise Exception('Architecture undefined!')
-
- # encoders are usually pretrained
- # net_encoder.apply(ModelBuilder.weights_init)
- if len(weights) > 0:
- print('Loading weights for net_encoder')
- net_encoder.load_state_dict(
- torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
- return net_encoder
-
- @staticmethod
- def build_decoder(arch='ppm_deepsup',
- fc_dim=512, num_class=NUM_CLASS,
- weights='', use_softmax=False, drop_last_conv=False):
- arch = arch.lower()
- if arch == 'ppm_deepsup':
- net_decoder = PPMDeepsup(
- num_class=num_class,
- fc_dim=fc_dim,
- use_softmax=use_softmax,
- drop_last_conv=drop_last_conv)
- elif arch == 'c1_deepsup':
- net_decoder = C1DeepSup(
- num_class=num_class,
- fc_dim=fc_dim,
- use_softmax=use_softmax,
- drop_last_conv=drop_last_conv)
- else:
- raise Exception('Architecture undefined!')
-
- net_decoder.apply(ModelBuilder.weights_init)
- if len(weights) > 0:
- print('Loading weights for net_decoder')
- net_decoder.load_state_dict(
- torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
- return net_decoder
-
- @staticmethod
- def get_decoder(weights_path, arch_encoder, arch_decoder, fc_dim, drop_last_conv, *arts, **kwargs):
- path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/decoder_epoch_20.pth')
- return ModelBuilder.build_decoder(arch=arch_decoder, fc_dim=fc_dim, weights=path, use_softmax=True, drop_last_conv=drop_last_conv)
-
- @staticmethod
- def get_encoder(weights_path, arch_encoder, arch_decoder, fc_dim, segmentation,
- *arts, **kwargs):
- if segmentation:
- path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/encoder_epoch_20.pth')
- else:
- path = ''
- return ModelBuilder.build_encoder(arch=arch_encoder, fc_dim=fc_dim, weights=path)
-
-
-def conv3x3_bn_relu(in_planes, out_planes, stride=1):
- return nn.Sequential(
- nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
- BatchNorm2d(out_planes),
- nn.ReLU(inplace=True),
- )
-
-
-class SegmentationModule(nn.Module):
- def __init__(self,
- weights_path,
- num_classes=150,
- arch_encoder="resnet50dilated",
- drop_last_conv=False,
- net_enc=None, # None for Default encoder
- net_dec=None, # None for Default decoder
- encode=None, # {None, 'binary', 'color', 'sky'}
- use_default_normalization=False,
- return_feature_maps=False,
- return_feature_maps_level=3, # {0, 1, 2, 3}
- return_feature_maps_only=True,
- **kwargs,
- ):
- super().__init__()
- self.weights_path = weights_path
- self.drop_last_conv = drop_last_conv
- self.arch_encoder = arch_encoder
- if self.arch_encoder == "resnet50dilated":
- self.arch_decoder = "ppm_deepsup"
- self.fc_dim = 2048
- elif self.arch_encoder == "mobilenetv2dilated":
- self.arch_decoder = "c1_deepsup"
- self.fc_dim = 320
- else:
- raise NotImplementedError(f"No such arch_encoder={self.arch_encoder}")
- model_builder_kwargs = dict(arch_encoder=self.arch_encoder,
- arch_decoder=self.arch_decoder,
- fc_dim=self.fc_dim,
- drop_last_conv=drop_last_conv,
- weights_path=self.weights_path)
-
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- self.encoder = ModelBuilder.get_encoder(**model_builder_kwargs) if net_enc is None else net_enc
- self.decoder = ModelBuilder.get_decoder(**model_builder_kwargs) if net_dec is None else net_dec
- self.use_default_normalization = use_default_normalization
- self.default_normalization = NormalizeTensor(mean=[0.485, 0.456, 0.406],
- std=[0.229, 0.224, 0.225])
-
- self.encode = encode
-
- self.return_feature_maps = return_feature_maps
-
- assert 0 <= return_feature_maps_level <= 3
- self.return_feature_maps_level = return_feature_maps_level
-
- def normalize_input(self, tensor):
- if tensor.min() < 0 or tensor.max() > 1:
- raise ValueError("Tensor should be 0..1 before using normalize_input")
- return self.default_normalization(tensor)
-
- @property
- def feature_maps_channels(self):
- return 256 * 2**(self.return_feature_maps_level) # 256, 512, 1024, 2048
-
- def forward(self, img_data, segSize=None):
- if segSize is None:
- raise NotImplementedError("Please pass segSize param. By default: (300, 300)")
-
- fmaps = self.encoder(img_data, return_feature_maps=True)
- pred = self.decoder(fmaps, segSize=segSize)
-
- if self.return_feature_maps:
- return pred, fmaps
- # print("BINARY", img_data.shape, pred.shape)
- return pred
-
- def multi_mask_from_multiclass(self, pred, classes):
- def isin(ar1, ar2):
- return (ar1[..., None] == ar2).any(-1).float()
- return isin(pred, torch.LongTensor(classes).to(self.device))
-
- @staticmethod
- def multi_mask_from_multiclass_probs(scores, classes):
- res = None
- for c in classes:
- if res is None:
- res = scores[:, c]
- else:
- res += scores[:, c]
- return res
-
- def predict(self, tensor, imgSizes=(-1,), # (300, 375, 450, 525, 600)
- segSize=None):
- """Entry-point for segmentation. Use this methods instead of forward
- Arguments:
- tensor {torch.Tensor} -- BCHW
- Keyword Arguments:
- imgSizes {tuple or list} -- imgSizes for segmentation input.
- default: (300, 450)
- original implementation: (300, 375, 450, 525, 600)
-
- """
- if segSize is None:
- segSize = tensor.shape[-2:]
- segSize = (tensor.shape[2], tensor.shape[3])
- with torch.no_grad():
- if self.use_default_normalization:
- tensor = self.normalize_input(tensor)
- scores = torch.zeros(1, NUM_CLASS, segSize[0], segSize[1]).to(self.device)
- features = torch.zeros(1, self.feature_maps_channels, segSize[0], segSize[1]).to(self.device)
-
- result = []
- for img_size in imgSizes:
- if img_size != -1:
- img_data = F.interpolate(tensor.clone(), size=img_size)
- else:
- img_data = tensor.clone()
-
- if self.return_feature_maps:
- pred_current, fmaps = self.forward(img_data, segSize=segSize)
- else:
- pred_current = self.forward(img_data, segSize=segSize)
-
-
- result.append(pred_current)
- scores = scores + pred_current / len(imgSizes)
-
- # Disclaimer: We use and aggregate only last fmaps: fmaps[3]
- if self.return_feature_maps:
- features = features + F.interpolate(fmaps[self.return_feature_maps_level], size=segSize) / len(imgSizes)
-
- _, pred = torch.max(scores, dim=1)
-
- if self.return_feature_maps:
- return features
-
- return pred, result
-
- def get_edges(self, t):
- edge = torch.cuda.ByteTensor(t.size()).zero_()
- edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
- edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
- edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
- edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
-
- if True:
- return edge.half()
- return edge.float()
-
-
-# pyramid pooling, deep supervision
-class PPMDeepsup(nn.Module):
- def __init__(self, num_class=NUM_CLASS, fc_dim=4096,
- use_softmax=False, pool_scales=(1, 2, 3, 6),
- drop_last_conv=False):
- super().__init__()
- self.use_softmax = use_softmax
- self.drop_last_conv = drop_last_conv
-
- self.ppm = []
- for scale in pool_scales:
- self.ppm.append(nn.Sequential(
- nn.AdaptiveAvgPool2d(scale),
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True)
- ))
- self.ppm = nn.ModuleList(self.ppm)
- self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
-
- self.conv_last = nn.Sequential(
- nn.Conv2d(fc_dim + len(pool_scales) * 512, 512,
- kernel_size=3, padding=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True),
- nn.Dropout2d(0.1),
- nn.Conv2d(512, num_class, kernel_size=1)
- )
- self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
- self.dropout_deepsup = nn.Dropout2d(0.1)
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
-
- input_size = conv5.size()
- ppm_out = [conv5]
- for pool_scale in self.ppm:
- ppm_out.append(nn.functional.interpolate(
- pool_scale(conv5),
- (input_size[2], input_size[3]),
- mode='bilinear', align_corners=False))
- ppm_out = torch.cat(ppm_out, 1)
-
- if self.drop_last_conv:
- return ppm_out
- else:
- x = self.conv_last(ppm_out)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- return x
-
- # deep sup
- conv4 = conv_out[-2]
- _ = self.cbr_deepsup(conv4)
- _ = self.dropout_deepsup(_)
- _ = self.conv_last_deepsup(_)
-
- x = nn.functional.log_softmax(x, dim=1)
- _ = nn.functional.log_softmax(_, dim=1)
-
- return (x, _)
-
-
-class Resnet(nn.Module):
- def __init__(self, orig_resnet):
- super(Resnet, self).__init__()
-
- # take pretrained resnet, except AvgPool and FC
- self.conv1 = orig_resnet.conv1
- self.bn1 = orig_resnet.bn1
- self.relu1 = orig_resnet.relu1
- self.conv2 = orig_resnet.conv2
- self.bn2 = orig_resnet.bn2
- self.relu2 = orig_resnet.relu2
- self.conv3 = orig_resnet.conv3
- self.bn3 = orig_resnet.bn3
- self.relu3 = orig_resnet.relu3
- self.maxpool = orig_resnet.maxpool
- self.layer1 = orig_resnet.layer1
- self.layer2 = orig_resnet.layer2
- self.layer3 = orig_resnet.layer3
- self.layer4 = orig_resnet.layer4
-
- def forward(self, x, return_feature_maps=False):
- conv_out = []
-
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x); conv_out.append(x);
- x = self.layer2(x); conv_out.append(x);
- x = self.layer3(x); conv_out.append(x);
- x = self.layer4(x); conv_out.append(x);
-
- if return_feature_maps:
- return conv_out
- return [x]
-
-# Resnet Dilated
-class ResnetDilated(nn.Module):
- def __init__(self, orig_resnet, dilate_scale=8):
- super().__init__()
- from functools import partial
-
- if dilate_scale == 8:
- orig_resnet.layer3.apply(
- partial(self._nostride_dilate, dilate=2))
- orig_resnet.layer4.apply(
- partial(self._nostride_dilate, dilate=4))
- elif dilate_scale == 16:
- orig_resnet.layer4.apply(
- partial(self._nostride_dilate, dilate=2))
-
- # take pretrained resnet, except AvgPool and FC
- self.conv1 = orig_resnet.conv1
- self.bn1 = orig_resnet.bn1
- self.relu1 = orig_resnet.relu1
- self.conv2 = orig_resnet.conv2
- self.bn2 = orig_resnet.bn2
- self.relu2 = orig_resnet.relu2
- self.conv3 = orig_resnet.conv3
- self.bn3 = orig_resnet.bn3
- self.relu3 = orig_resnet.relu3
- self.maxpool = orig_resnet.maxpool
- self.layer1 = orig_resnet.layer1
- self.layer2 = orig_resnet.layer2
- self.layer3 = orig_resnet.layer3
- self.layer4 = orig_resnet.layer4
-
- def _nostride_dilate(self, m, dilate):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- # the convolution with stride
- if m.stride == (2, 2):
- m.stride = (1, 1)
- if m.kernel_size == (3, 3):
- m.dilation = (dilate // 2, dilate // 2)
- m.padding = (dilate // 2, dilate // 2)
- # other convoluions
- else:
- if m.kernel_size == (3, 3):
- m.dilation = (dilate, dilate)
- m.padding = (dilate, dilate)
-
- def forward(self, x, return_feature_maps=False):
- conv_out = []
-
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- conv_out.append(x)
- x = self.layer2(x)
- conv_out.append(x)
- x = self.layer3(x)
- conv_out.append(x)
- x = self.layer4(x)
- conv_out.append(x)
-
- if return_feature_maps:
- return conv_out
- return [x]
-
-class MobileNetV2Dilated(nn.Module):
- def __init__(self, orig_net, dilate_scale=8):
- super(MobileNetV2Dilated, self).__init__()
- from functools import partial
-
- # take pretrained mobilenet features
- self.features = orig_net.features[:-1]
-
- self.total_idx = len(self.features)
- self.down_idx = [2, 4, 7, 14]
-
- if dilate_scale == 8:
- for i in range(self.down_idx[-2], self.down_idx[-1]):
- self.features[i].apply(
- partial(self._nostride_dilate, dilate=2)
- )
- for i in range(self.down_idx[-1], self.total_idx):
- self.features[i].apply(
- partial(self._nostride_dilate, dilate=4)
- )
- elif dilate_scale == 16:
- for i in range(self.down_idx[-1], self.total_idx):
- self.features[i].apply(
- partial(self._nostride_dilate, dilate=2)
- )
-
- def _nostride_dilate(self, m, dilate):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- # the convolution with stride
- if m.stride == (2, 2):
- m.stride = (1, 1)
- if m.kernel_size == (3, 3):
- m.dilation = (dilate//2, dilate//2)
- m.padding = (dilate//2, dilate//2)
- # other convoluions
- else:
- if m.kernel_size == (3, 3):
- m.dilation = (dilate, dilate)
- m.padding = (dilate, dilate)
-
- def forward(self, x, return_feature_maps=False):
- if return_feature_maps:
- conv_out = []
- for i in range(self.total_idx):
- x = self.features[i](x)
- if i in self.down_idx:
- conv_out.append(x)
- conv_out.append(x)
- return conv_out
-
- else:
- return [self.features(x)]
-
-
-# last conv, deep supervision
-class C1DeepSup(nn.Module):
- def __init__(self, num_class=150, fc_dim=2048, use_softmax=False, drop_last_conv=False):
- super(C1DeepSup, self).__init__()
- self.use_softmax = use_softmax
- self.drop_last_conv = drop_last_conv
-
- self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
- self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
-
- # last conv
- self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
- self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
-
- x = self.cbr(conv5)
-
- if self.drop_last_conv:
- return x
- else:
- x = self.conv_last(x)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- return x
-
- # deep sup
- conv4 = conv_out[-2]
- _ = self.cbr_deepsup(conv4)
- _ = self.conv_last_deepsup(_)
-
- x = nn.functional.log_softmax(x, dim=1)
- _ = nn.functional.log_softmax(_, dim=1)
-
- return (x, _)
-
-
-# last conv
-class C1(nn.Module):
- def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
- super(C1, self).__init__()
- self.use_softmax = use_softmax
-
- self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
-
- # last conv
- self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
- x = self.cbr(conv5)
- x = self.conv_last(x)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- else:
- x = nn.functional.log_softmax(x, dim=1)
-
- return x
-
-
-# pyramid pooling
-class PPM(nn.Module):
- def __init__(self, num_class=150, fc_dim=4096,
- use_softmax=False, pool_scales=(1, 2, 3, 6)):
- super(PPM, self).__init__()
- self.use_softmax = use_softmax
-
- self.ppm = []
- for scale in pool_scales:
- self.ppm.append(nn.Sequential(
- nn.AdaptiveAvgPool2d(scale),
- nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True)
- ))
- self.ppm = nn.ModuleList(self.ppm)
-
- self.conv_last = nn.Sequential(
- nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
- kernel_size=3, padding=1, bias=False),
- BatchNorm2d(512),
- nn.ReLU(inplace=True),
- nn.Dropout2d(0.1),
- nn.Conv2d(512, num_class, kernel_size=1)
- )
-
- def forward(self, conv_out, segSize=None):
- conv5 = conv_out[-1]
-
- input_size = conv5.size()
- ppm_out = [conv5]
- for pool_scale in self.ppm:
- ppm_out.append(nn.functional.interpolate(
- pool_scale(conv5),
- (input_size[2], input_size[3]),
- mode='bilinear', align_corners=False))
- ppm_out = torch.cat(ppm_out, 1)
-
- x = self.conv_last(ppm_out)
-
- if self.use_softmax: # is True during inference
- x = nn.functional.interpolate(
- x, size=segSize, mode='bilinear', align_corners=False)
- x = nn.functional.softmax(x, dim=1)
- else:
- x = nn.functional.log_softmax(x, dim=1)
- return x
diff --git a/spaces/shi-labs/FcF-Inpainting/training/losses/high_receptive_pl.py b/spaces/shi-labs/FcF-Inpainting/training/losses/high_receptive_pl.py
deleted file mode 100644
index df48f2037d868c395b188adbfe1308b93ad7c139..0000000000000000000000000000000000000000
--- a/spaces/shi-labs/FcF-Inpainting/training/losses/high_receptive_pl.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from training.losses.ade20k import ModelBuilder
-
-
-IMAGENET_MEAN = torch.FloatTensor([0.485, 0.456, 0.406])[None, :, None, None]
-IMAGENET_STD = torch.FloatTensor([0.229, 0.224, 0.225])[None, :, None, None]
-
-
-class HRFPL(nn.Module):
- def __init__(self, weight=1,
- weights_path=None, arch_encoder='resnet50dilated', segmentation=True):
- super().__init__()
- self.impl = ModelBuilder.get_encoder(weights_path=weights_path,
- arch_encoder=arch_encoder,
- arch_decoder='ppm_deepsup',
- fc_dim=2048,
- segmentation=segmentation)
- self.impl.eval()
- for w in self.impl.parameters():
- w.requires_grad_(False)
-
- self.weight = weight
-
- def forward(self, pred, target):
-
- target = (target + 1) / 2
- pred = (pred + 1) / 2
- pred = torch.clamp(pred, 0., 1.)
-
- pred = (pred - IMAGENET_MEAN.to(pred)) / IMAGENET_STD.to(pred)
- target = (target - IMAGENET_MEAN.to(target)) / IMAGENET_STD.to(target)
-
- self.impl = self.impl.to(pred.device)
- pred_feats = self.impl(pred, return_feature_maps=True)
- target_feats = self.impl(target, return_feature_maps=True)
-
- result = torch.stack([F.mse_loss(cur_pred, cur_target)
- for cur_pred, cur_target
- in zip(pred_feats, target_feats)]).sum() * self.weight
- return result
\ No newline at end of file
diff --git a/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/modeling/meta_arch/mask_former_head.py b/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/modeling/meta_arch/mask_former_head.py
deleted file mode 100644
index aa2173d43f5815ed0af48f1dd568c216ca274f37..0000000000000000000000000000000000000000
--- a/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/modeling/meta_arch/mask_former_head.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-from copy import deepcopy
-from typing import Callable, Dict, List, Optional, Tuple, Union
-
-import fvcore.nn.weight_init as weight_init
-from torch import nn
-from torch.nn import functional as F
-
-from detectron2.config import configurable
-from detectron2.layers import Conv2d, ShapeSpec, get_norm
-from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
-
-from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder
-from ..pixel_decoder.fpn import build_pixel_decoder
-
-
-@SEM_SEG_HEADS_REGISTRY.register()
-class MaskFormerHead(nn.Module):
-
- _version = 2
-
- def _load_from_state_dict(
- self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
- ):
- version = local_metadata.get("version", None)
- if version is None or version < 2:
- # Do not warn if train from scratch
- scratch = True
- logger = logging.getLogger(__name__)
- for k in list(state_dict.keys()):
- newk = k
- if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
- newk = k.replace(prefix, prefix + "pixel_decoder.")
- # logger.debug(f"{k} ==> {newk}")
- if newk != k:
- state_dict[newk] = state_dict[k]
- del state_dict[k]
- scratch = False
-
- if not scratch:
- logger.warning(
- f"Weight format of {self.__class__.__name__} have changed! "
- "Please upgrade your models. Applying automatic conversion now ..."
- )
-
- @configurable
- def __init__(
- self,
- input_shape: Dict[str, ShapeSpec],
- *,
- num_classes: int,
- pixel_decoder: nn.Module,
- loss_weight: float = 1.0,
- ignore_value: int = -1,
- # extra parameters
- transformer_predictor: nn.Module,
- transformer_in_feature: str,
- ):
- """
- NOTE: this interface is experimental.
- Args:
- input_shape: shapes (channels and stride) of the input features
- num_classes: number of classes to predict
- pixel_decoder: the pixel decoder module
- loss_weight: loss weight
- ignore_value: category id to be ignored during training.
- transformer_predictor: the transformer decoder that makes prediction
- transformer_in_feature: input feature name to the transformer_predictor
- """
- super().__init__()
- input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
- self.in_features = [k for k, v in input_shape]
- feature_strides = [v.stride for k, v in input_shape]
- feature_channels = [v.channels for k, v in input_shape]
-
- self.ignore_value = ignore_value
- self.common_stride = 4
- self.loss_weight = loss_weight
-
- self.pixel_decoder = pixel_decoder
- self.predictor = transformer_predictor
- self.transformer_in_feature = transformer_in_feature
-
- self.num_classes = num_classes
-
- @classmethod
- def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
- # figure out in_channels to transformer predictor
- if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
- transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
- elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
- transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
- elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
- transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
- else:
- transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
-
- return {
- "input_shape": {
- k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
- },
- "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
- "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
- "pixel_decoder": build_pixel_decoder(cfg, input_shape),
- "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
- "transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
- "transformer_predictor": build_transformer_decoder(
- cfg,
- transformer_predictor_in_channels,
- mask_classification=True,
- ),
- }
-
- def forward(self, features, mask=None):
- return self.layers(features, mask)
-
- def layers(self, features, mask=None):
- mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features)
- if self.transformer_in_feature == "multi_scale_pixel_decoder":
- predictions = self.predictor(multi_scale_features, mask_features, mask)
- else:
- if self.transformer_in_feature == "transformer_encoder":
- assert (
- transformer_encoder_features is not None
- ), "Please use the TransformerEncoderPixelDecoder."
- predictions = self.predictor(transformer_encoder_features, mask_features, mask)
- elif self.transformer_in_feature == "pixel_embedding":
- predictions = self.predictor(mask_features, mask_features, mask)
- else:
- predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask)
- return predictions
diff --git a/spaces/sidharthism/fashion-eye/models/biggan/pytorch_biggan/scripts/download_tf_hub_models.sh b/spaces/sidharthism/fashion-eye/models/biggan/pytorch_biggan/scripts/download_tf_hub_models.sh
deleted file mode 100644
index 57655fbd4b77791f03d72b3dfeb3bbb89ccc2fdc..0000000000000000000000000000000000000000
--- a/spaces/sidharthism/fashion-eye/models/biggan/pytorch_biggan/scripts/download_tf_hub_models.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) 2019-present, Thomas Wolf, Huggingface Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-#
-
-set -e
-set -x
-
-models="128 256 512"
-
-mkdir -p models/model_128
-mkdir -p models/model_256
-mkdir -p models/model_512
-
-# Download TF Hub models.
-for model in $models
-do
- curl -L "https://tfhub.dev/deepmind/biggan-deep-$model/1?tf-hub-format=compressed" | tar -zxvC models/model_$model
-done
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Download of 3D Max 2018 Student License for Educational Use.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Download of 3D Max 2018 Student License for Educational Use.md
deleted file mode 100644
index 9270d1e023e75bbbd7ce033da17856c97f2ff00f..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Download of 3D Max 2018 Student License for Educational Use.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-Download 3D Max 2018 Student: A Guide for Beginners
-If you are interested in 3D modeling, animation, and rendering, you might have heard of 3D Max, one of the most popular and powerful software in the industry. But did you know that you can get it for free as a student? In this article, we will show you how to download 3D Max 2018 student version, what are its features and benefits, and how to learn it quickly and easily.
-What is 3D Max and why should you use it?
-3D Max, also known as 3ds Max, is a software developed by Autodesk that allows you to create stunning 3D designs and visuals for games, films, TV, architecture, and more. It is widely used by professionals and hobbyists alike, thanks to its rich and flexible toolset, high-quality rendering engine, and extensive compatibility with other applications.
-download 3d max 2018 student
Download Zip · https://ssurll.com/2uNRZe
-3D Max features and benefits
-Some of the features and benefits of using 3D Max are:
-
-- It has a user-friendly interface that lets you customize your workspace and access various tools and commands easily.
-- It supports a variety of modeling techniques, such as polygonal, spline, NURBS, subdivision surface, and procedural modeling.
-- It offers a range of animation tools, such as keyframe animation, motion capture, character studio, biped, CAT, crowd simulation, and more.
-- It enables you to create realistic lighting, materials, textures, and effects using its powerful rendering engine, Arnold.
-- It allows you to import and export data from other software, such as AutoCAD, Revit, Maya, Blender, Photoshop, After Effects, and more.
-- It provides you with access to a large library of assets, plugins, scripts, tutorials, and online resources to enhance your workflow and creativity.
-
-3D Max system requirements and compatibility
-Before you download 3D Max 2018 student version, you need to make sure that your computer meets the minimum system requirements. According to Autodesk, these are:
-
-- Operating system: Microsoft Windows 7 (SP1), Windows 8.1 or Windows 10 Professional
-- CPU: Intel or AMD multi-core processor with SSE4.2 instruction set
-- Memory: 4 GB of RAM minimum (8 GB or more recommended)
-- Disk space: 6 GB of free disk space for installation
-- Graphics card: DirectX®11 or DirectX®12 compatible graphics card with Shader Model 5.0
-- Display: Three-button mouse
-
-You also need to check the compatibility of your software with other applications that you might use with 3D Max. For example, if you want to use AutoCAD or Revit files in 3D Max, you need to have the same or newer versions of those software installed on your computer. You can find more information about compatibility on the Autodesk website.
-How to get 3D Max 2018 student for free?
-The good news is that you can get 3D Max 2018 student version for free if you are a student or an educator. Autodesk offers free one-year educational access to its products and services for eligible users. Here are the steps to get 3D Max 2018 student for free:
-Confirm your eligibility for educational access
-To qualify for free educational access, you need to be a student or an educator at a recognized educational institution. You also need to have a valid email address from your school or organization. You can check your eligibility on the Autodesk website. If you are not eligible, you can still get a free 30-day trial of 3D Max or purchase a subscription plan.
-Create an Autodesk account and download the software
-Once you have confirmed your eligibility, you need to create an Autodesk account and verify your email address. You can do this by following the instructions on the Autodesk website. After that, you can sign in to your account and go to the Education home page. There, you can find 3D Max 2018 student version under the Software tab. Click on the Download button and select your operating system, language, and version. You will also see the serial number and product key that you will need later to activate the software.
-Install and activate 3D Max 2018 student
-After downloading the software, you need to install it on your computer. You can do this by running the installer file and following the on-screen instructions. You will need to accept the license agreement, choose the installation type and location, and select the components and features that you want to install. You can also customize your installation settings if you want. When the installation is complete, you can launch 3D Max 2018 student from your desktop or start menu. The first time you run the software, you will need to activate it using the serial number and product key that you received earlier. You can do this by entering them in the activation window and clicking on Activate. You will also need to sign in to your Autodesk account to complete the activation process.
-How to learn 3D Max 2018 student?
-Now that you have installed and activated 3D Max 2018 student, you might be wondering how to use it and learn its features and functions. Don't worry, there are plenty of resources and ways to help you master this software in no time. Here are some of them:
-How to download 3d max 2018 student version for free
-Download 3d max 2018 student edition with crack
-Autodesk 3d max 2018 student license activation
-Best tutorials for 3d max 2018 student beginners
-Download 3d max 2018 student trial for 30 days
-Compare 3d max 2018 student vs professional
-Download 3d max 2018 student from official website
-System requirements for 3d max 2018 student software
-Download 3d max 2018 student offline installer
-Tips and tricks for 3d max 2018 student users
-Download 3d max 2018 student for mac os
-Download 3d max 2018 student for windows 10
-Download 3d max 2018 student for linux
-Download 3d max 2018 student iso file
-Download 3d max 2018 student full version
-Download 3d max 2018 student update patch
-Download 3d max 2018 student plugins and extensions
-Download 3d max 2018 student models and assets
-Download 3d max 2018 student textures and materials
-Download 3d max 2018 student lighting and rendering presets
-Download 3d max 2018 student animation and rigging tools
-Download 3d max 2018 student scripts and macros
-Download 3d max 2018 student project files and samples
-Download 3d max 2018 student documentation and help files
-Download 3d max 2018 student keyboard shortcuts pdf
-How to uninstall or reinstall 3d max 2018 student software
-How to upgrade or downgrade from/to 3d max 2018 student software
-How to transfer or migrate data from/to other versions of Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to troubleshoot or fix common errors or issues with Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to contact Autodesk support or customer service for assistance with Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to join or access Autodesk community or forums for learning and sharing with Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to get certified or accredited in Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to find or apply for jobs or internships using Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to create or showcase a portfolio or resume using Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to learn or improve skills in Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to teach or mentor others in Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to participate or compete in challenges or contests using Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to collaborate or network with others using Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to get inspired or motivated by others using Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-How to have fun or enjoy using Autodesk software or other software such as Blender, Maya, SketchUp, etc.
-Explore the user interface and tools
-The best way to start learning 3D Max is to familiarize yourself with its user interface and tools. You can do this by opening the software and exploring its various menus, toolbars, panels, windows, and views. You can also hover over any icon or button to see its name and description. You can also access the Help menu or press F1 to open the online documentation that explains everything you need to know about 3D Max.
-Follow online tutorials and courses
-Another great way to learn 3D Max is to follow online tutorials and courses that teach you how to use the software step by step. You can find many free and paid tutorials and courses on various websites, such as YouTube, Udemy, Lynda, Skillshare, Pluralsight, and more. You can also find some official tutorials and courses on the Autodesk website or in the software itself under the Learn tab. These tutorials and courses cover different topics and levels of difficulty, such as modeling, animation, rendering, lighting, materials, effects, and more.
-Join the Autodesk student community and competitions
-A final way to learn 3D Max is to join the Autodesk student community and competitions that allow you to interact with other users, share your work, get feedback, ask questions, learn tips and tricks, and participate in challenges and contests. You can join the Autodesk student community by signing up on their website or in the software itself under the Community tab. There, you can find forums, blogs, galleries, events, webinars, podcasts, newsletters, and more. You can also find some competitions that are open for students only, such as Design for Industry or Future City. These competitions give you a chance to showcase your skills, win prizes, and get recognition.
-Conclusion
-Summary of the main points
-In conclusion, 3D Max is a powerful software that allows you to create stunning 3D designs and visuals for various purposes. As a student or an educator, you can get it for free for one year by following some simple steps. To learn how to use it effectively, you can explore its user interface and tools, follow online tutorials and courses, and join the Autodesk student community and competitions. By doing so, you will be able to unleash your creativity and potential with 3D Max.
-Call to action and resources
-If you are ready to start your journey with 3D Max, don't wait any longer. Download 3D Max 2018 student version for free today and start creating amazing 3D projects. To help you along the way, here are some useful resources that you can check out:
-
-- Autodesk Education home page: where you can download 3D Max and other Autodesk products for free as a student or an educator.
-- Autodesk compatibility page: where you can check the compatibility of 3D Max with other software and applications.
-- Autodesk student community page: where you can join the Autodesk student community and competitions, and access various resources and events.
-- Autodesk official tutorials page: where you can find some official tutorials and courses on how to use 3D Max.
-- YouTube 3D Max playlist: where you can find some free video tutorials on 3D Max by various creators.
-
-We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Happy 3D modeling!
- FAQs
-Here are some frequently asked questions about 3D Max 2018 student version:
-
-- How long can I use 3D Max 2018 student version for free?
-You can use 3D Max 2018 student version for free for one year from the date of activation. You can renew your educational access every year as long as you remain eligible.
-- Can I use 3D Max 2018 student version for commercial purposes?
-No, you cannot use 3D Max 2018 student version for commercial purposes. It is intended for educational purposes only. If you want to use 3D Max for commercial purposes, you need to purchase a subscription plan or a perpetual license.
-- What is the difference between 3D Max 2018 student version and the latest version?
-3D Max 2018 student version is an older version of the software that was released in 2017. The latest version of 3D Max is 2022, which was released in 2021. The latest version has some new features and improvements that are not available in the older version, such as Smart Extrude, Bake to Texture, PBR materials, Chamfer modifier enhancements, and more. However, the older version still has most of the core features and functions that are essential for 3D modeling, animation, and rendering.
-- Can I upgrade from 3D Max 2018 student version to the latest version?
-Yes, you can upgrade from 3D Max 2018 student version to the latest version if you are still eligible for educational access. You can do this by downloading the latest version from the Autodesk Education home page and activating it with your Autodesk account. You can also keep both versions on your computer if you want.
-- Can I use other Autodesk products with 3D Max 2018 student version?
-Yes, you can use other Autodesk products with 3D Max 2018 student version, such as AutoCAD, Revit, Maya, Fusion 360, SketchBook, and more. You can download them for free from the Autodesk Education home page as well. However, you need to check the compatibility of the products before using them together.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/siya02/Konakni-TTS/ttsv/scripts/inference/advanced_infer.sh b/spaces/siya02/Konakni-TTS/ttsv/scripts/inference/advanced_infer.sh
deleted file mode 100644
index 6bbd53454331f0bd5157aa4e38ae4d329fba05fd..0000000000000000000000000000000000000000
--- a/spaces/siya02/Konakni-TTS/ttsv/scripts/inference/advanced_infer.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-gender='male'
-glowdir='../../checkpoints/glow/'$gender'/'
-hifidir='../../checkpoints/hifi/'$gender'/'
-device='cpu'
-text='Hey mr. I am testing this one. Now on multiple sentences. Just want to see the flow.'
-noise_scale='0.667'
-length_scale='1.0'
-transliteration=1
-number_conversion=1
-split_sentences=1
-lang='en'
-
-
-timestamp=$(date +%s)
-wav='../../results/'$gender'/'
-wav_file=$wav/$timestamp'.wav'
-
-
-mkdir -p $wav
-
-python ../../utils/inference/advanced_tts.py -a $glowdir -v $hifidir -d $device -t "$text" -w $wav_file -L $lang -n $noise_scale -l $length_scale -T $transliteration -N $number_conversion -S $split_sentences
-echo "File saved at: "$wav_file
diff --git a/spaces/sklearn-docs/Isotonic-Regression/README.md b/spaces/sklearn-docs/Isotonic-Regression/README.md
deleted file mode 100644
index 521d0eacd952cba76194e5efd6d321d1698ae0c1..0000000000000000000000000000000000000000
--- a/spaces/sklearn-docs/Isotonic-Regression/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Isotonic Regression
-emoji: 🐠
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.32.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/app.py b/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/app.py
deleted file mode 100644
index 76c22684412935498de27f46d615fc54b38e6c70..0000000000000000000000000000000000000000
--- a/spaces/sky24h/Free-View_Expressive_Talking_Head_Video_Editing/app.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import glob
-from natsort import natsorted
-import gradio as gr
-
-from inference_util import init_model, infenrece
-from attributtes_utils import input_pose, input_emotion, input_blink
-
-model = init_model()
-
-
-def process(input_vid, audio_path, pose_select, emotion_select, blink_select):
- pose = input_pose(pose_select)
- emotion = input_emotion(emotion_select)
- blink = input_blink(blink_select)
-
- print("input_vid: ", input_vid)
- result = infenrece(model, os.path.join("./assets/videos/", input_vid), os.path.join("./assets/audios/", audio_path), pose, emotion, blink)
- print("result: ", result)
-
- print("finished !")
-
- return result # , gr.Group.update(visible=True)
-
-
-available_videos = natsorted(glob.glob("./assets/videos/*.mp4"))
-available_videos = [os.path.basename(x) for x in available_videos]
-
-# prepare audio
-for video in available_videos:
- audio = video.replace(".mp4", ".wav")
- if not os.path.exists(os.path.join("./assets/audios/", audio)):
- os.system(f"ffmpeg -y -loglevel error -i ./assets/videos/{video} -vn -acodec pcm_s16le -ar 16000 -ac 1 ./assets/audios/{audio}")
-available_audios = natsorted(glob.glob("./assets/audios/*.wav"))
-available_audios = [os.path.basename(x) for x in available_audios]
-
-
-with gr.Blocks() as demo:
- gr.HTML(
- """
-
- Free-View Expressive Talking Head Video Editing
-
-
- """
- )
- with gr.Column(elem_id="col-container"):
- with gr.Row():
- with gr.Column():
- # select and preview video from a list of examples
- video_preview = gr.Video(label="Video Preview", elem_id="video-preview", height=360, value="./assets/videos/sample1.mp4")
- video_input = gr.Dropdown(available_videos, label="Input Video", value="sample1.mp4")
- audio_preview = gr.Audio(label="Audio Preview", elem_id="audio-preview", height=360, value="./assets/audios/sample2.wav")
- audio_input = gr.Dropdown(available_audios, label="Input Audio", value="sample2.wav")
- pose_select = gr.Radio(["front", "left_right_shaking"], label="Pose", value="front")
- emotion_select = gr.Radio(["neutral", "happy", "angry", "surprised"], label="Emotion", value="neutral")
- blink_select = gr.Radio(["yes", "no"], label="Blink", value="yes")
- # with gr.Row():
- with gr.Column():
- video_out = gr.Video(label="Video Output", elem_id="video-output", height=360)
- # titile: Free-View Expressive Talking Head Video Editing
-
- submit_btn = gr.Button("Generate video")
-
- inputs = [video_input, audio_input, pose_select, emotion_select, blink_select]
- outputs = [video_out]
-
- video_preview_output = [video_preview]
- audio_preview_output = [audio_preview]
-
- video_input.select(lambda x: "./assets/videos/" + x, video_input, video_preview_output)
- audio_input.select(lambda x: "./assets/audios/" + x, audio_input, audio_preview_output)
- submit_btn.click(process, inputs, outputs)
-
-demo.queue(max_size=10).launch()
diff --git a/spaces/sneedium/captcha_pixelplanet/modules/transformer.py b/spaces/sneedium/captcha_pixelplanet/modules/transformer.py
deleted file mode 100644
index 6dde312185c7c68f54562885f23ea3b0670e6c40..0000000000000000000000000000000000000000
--- a/spaces/sneedium/captcha_pixelplanet/modules/transformer.py
+++ /dev/null
@@ -1,901 +0,0 @@
-# pytorch 1.5.0
-import copy
-import math
-import warnings
-from typing import Optional
-
-import torch
-import torch.nn as nn
-from torch import Tensor
-from torch.nn import Dropout, LayerNorm, Linear, Module, ModuleList, Parameter
-from torch.nn import functional as F
-from torch.nn.init import constant_, xavier_uniform_
-
-
-def multi_head_attention_forward(query, # type: Tensor
- key, # type: Tensor
- value, # type: Tensor
- embed_dim_to_check, # type: int
- num_heads, # type: int
- in_proj_weight, # type: Tensor
- in_proj_bias, # type: Tensor
- bias_k, # type: Optional[Tensor]
- bias_v, # type: Optional[Tensor]
- add_zero_attn, # type: bool
- dropout_p, # type: float
- out_proj_weight, # type: Tensor
- out_proj_bias, # type: Tensor
- training=True, # type: bool
- key_padding_mask=None, # type: Optional[Tensor]
- need_weights=True, # type: bool
- attn_mask=None, # type: Optional[Tensor]
- use_separate_proj_weight=False, # type: bool
- q_proj_weight=None, # type: Optional[Tensor]
- k_proj_weight=None, # type: Optional[Tensor]
- v_proj_weight=None, # type: Optional[Tensor]
- static_k=None, # type: Optional[Tensor]
- static_v=None # type: Optional[Tensor]
- ):
- # type: (...) -> Tuple[Tensor, Optional[Tensor]]
- r"""
- Args:
- query, key, value: map a query and a set of key-value pairs to an output.
- See "Attention Is All You Need" for more details.
- embed_dim_to_check: total dimension of the model.
- num_heads: parallel attention heads.
- in_proj_weight, in_proj_bias: input projection weight and bias.
- bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
- add_zero_attn: add a new batch of zeros to the key and
- value sequences at dim=1.
- dropout_p: probability of an element to be zeroed.
- out_proj_weight, out_proj_bias: the output projection weight and bias.
- training: apply dropout if is ``True``.
- key_padding_mask: if provided, specified padding elements in the key will
- be ignored by the attention. This is an binary mask. When the value is True,
- the corresponding value on the attention layer will be filled with -inf.
- need_weights: output attn_output_weights.
- attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
- the batches while a 3D mask allows to specify a different mask for the entries of each batch.
- use_separate_proj_weight: the function accept the proj. weights for query, key,
- and value in different forms. If false, in_proj_weight will be used, which is
- a combination of q_proj_weight, k_proj_weight, v_proj_weight.
- q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
- static_k, static_v: static key and value used for attention operators.
- Shape:
- Inputs:
- - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
- the embedding dimension.
- - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
- the embedding dimension.
- - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
- the embedding dimension.
- - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
- If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
- will be unchanged. If a BoolTensor is provided, the positions with the
- value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
- S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
- positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
- while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
- are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
- is provided, it will be added to the attention weight.
- - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
- N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
- N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- Outputs:
- - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
- E is the embedding dimension.
- - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
- L is the target sequence length, S is the source sequence length.
- """
- # if not torch.jit.is_scripting():
- # tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
- # out_proj_weight, out_proj_bias)
- # if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
- # return handle_torch_function(
- # multi_head_attention_forward, tens_ops, query, key, value,
- # embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
- # bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
- # out_proj_bias, training=training, key_padding_mask=key_padding_mask,
- # need_weights=need_weights, attn_mask=attn_mask,
- # use_separate_proj_weight=use_separate_proj_weight,
- # q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
- # v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
- tgt_len, bsz, embed_dim = query.size()
- assert embed_dim == embed_dim_to_check
- assert key.size() == value.size()
-
- head_dim = embed_dim // num_heads
- assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
- scaling = float(head_dim) ** -0.5
-
- if not use_separate_proj_weight:
- if torch.equal(query, key) and torch.equal(key, value):
- # self-attention
- q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
-
- elif torch.equal(key, value):
- # encoder-decoder attention
- # This is inline in_proj function with in_proj_weight and in_proj_bias
- _b = in_proj_bias
- _start = 0
- _end = embed_dim
- _w = in_proj_weight[_start:_end, :]
- if _b is not None:
- _b = _b[_start:_end]
- q = F.linear(query, _w, _b)
-
- if key is None:
- assert value is None
- k = None
- v = None
- else:
-
- # This is inline in_proj function with in_proj_weight and in_proj_bias
- _b = in_proj_bias
- _start = embed_dim
- _end = None
- _w = in_proj_weight[_start:, :]
- if _b is not None:
- _b = _b[_start:]
- k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
-
- else:
- # This is inline in_proj function with in_proj_weight and in_proj_bias
- _b = in_proj_bias
- _start = 0
- _end = embed_dim
- _w = in_proj_weight[_start:_end, :]
- if _b is not None:
- _b = _b[_start:_end]
- q = F.linear(query, _w, _b)
-
- # This is inline in_proj function with in_proj_weight and in_proj_bias
- _b = in_proj_bias
- _start = embed_dim
- _end = embed_dim * 2
- _w = in_proj_weight[_start:_end, :]
- if _b is not None:
- _b = _b[_start:_end]
- k = F.linear(key, _w, _b)
-
- # This is inline in_proj function with in_proj_weight and in_proj_bias
- _b = in_proj_bias
- _start = embed_dim * 2
- _end = None
- _w = in_proj_weight[_start:, :]
- if _b is not None:
- _b = _b[_start:]
- v = F.linear(value, _w, _b)
- else:
- q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
- len1, len2 = q_proj_weight_non_opt.size()
- assert len1 == embed_dim and len2 == query.size(-1)
-
- k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
- len1, len2 = k_proj_weight_non_opt.size()
- assert len1 == embed_dim and len2 == key.size(-1)
-
- v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
- len1, len2 = v_proj_weight_non_opt.size()
- assert len1 == embed_dim and len2 == value.size(-1)
-
- if in_proj_bias is not None:
- q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
- k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
- v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
- else:
- q = F.linear(query, q_proj_weight_non_opt, in_proj_bias)
- k = F.linear(key, k_proj_weight_non_opt, in_proj_bias)
- v = F.linear(value, v_proj_weight_non_opt, in_proj_bias)
- q = q * scaling
-
- if attn_mask is not None:
- assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
- attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
- 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
- if attn_mask.dtype == torch.uint8:
- warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
- attn_mask = attn_mask.to(torch.bool)
-
- if attn_mask.dim() == 2:
- attn_mask = attn_mask.unsqueeze(0)
- if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
- raise RuntimeError('The size of the 2D attn_mask is not correct.')
- elif attn_mask.dim() == 3:
- if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
- raise RuntimeError('The size of the 3D attn_mask is not correct.')
- else:
- raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
- # attn_mask's dim is 3 now.
-
- # # convert ByteTensor key_padding_mask to bool
- # if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
- # warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
- # key_padding_mask = key_padding_mask.to(torch.bool)
-
- if bias_k is not None and bias_v is not None:
- if static_k is None and static_v is None:
- k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
- v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
- if attn_mask is not None:
- attn_mask = pad(attn_mask, (0, 1))
- if key_padding_mask is not None:
- key_padding_mask = pad(key_padding_mask, (0, 1))
- else:
- assert static_k is None, "bias cannot be added to static key."
- assert static_v is None, "bias cannot be added to static value."
- else:
- assert bias_k is None
- assert bias_v is None
-
- q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
- if k is not None:
- k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
- if v is not None:
- v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
-
- if static_k is not None:
- assert static_k.size(0) == bsz * num_heads
- assert static_k.size(2) == head_dim
- k = static_k
-
- if static_v is not None:
- assert static_v.size(0) == bsz * num_heads
- assert static_v.size(2) == head_dim
- v = static_v
-
- src_len = k.size(1)
-
- if key_padding_mask is not None:
- assert key_padding_mask.size(0) == bsz
- assert key_padding_mask.size(1) == src_len
-
- if add_zero_attn:
- src_len += 1
- k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
- v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
- if attn_mask is not None:
- attn_mask = pad(attn_mask, (0, 1))
- if key_padding_mask is not None:
- key_padding_mask = pad(key_padding_mask, (0, 1))
-
- attn_output_weights = torch.bmm(q, k.transpose(1, 2))
- assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
-
- if attn_mask is not None:
- if attn_mask.dtype == torch.bool:
- attn_output_weights.masked_fill_(attn_mask, float('-inf'))
- else:
- attn_output_weights += attn_mask
-
-
- if key_padding_mask is not None:
- attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
- attn_output_weights = attn_output_weights.masked_fill(
- key_padding_mask.unsqueeze(1).unsqueeze(2),
- float('-inf'),
- )
- attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
-
- attn_output_weights = F.softmax(
- attn_output_weights, dim=-1)
- attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
-
- attn_output = torch.bmm(attn_output_weights, v)
- assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
- attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
- attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
-
- if need_weights:
- # average attention weights over heads
- attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
- return attn_output, attn_output_weights.sum(dim=1) / num_heads
- else:
- return attn_output, None
-
-class MultiheadAttention(Module):
- r"""Allows the model to jointly attend to information
- from different representation subspaces.
- See reference: Attention Is All You Need
- .. math::
- \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
- \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
- Args:
- embed_dim: total dimension of the model.
- num_heads: parallel attention heads.
- dropout: a Dropout layer on attn_output_weights. Default: 0.0.
- bias: add bias as module parameter. Default: True.
- add_bias_kv: add bias to the key and value sequences at dim=0.
- add_zero_attn: add a new batch of zeros to the key and
- value sequences at dim=1.
- kdim: total number of features in key. Default: None.
- vdim: total number of features in value. Default: None.
- Note: if kdim and vdim are None, they will be set to embed_dim such that
- query, key, and value have the same number of features.
- Examples::
- >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
- >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
- """
- # __annotations__ = {
- # 'bias_k': torch._jit_internal.Optional[torch.Tensor],
- # 'bias_v': torch._jit_internal.Optional[torch.Tensor],
- # }
- __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']
-
- def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
- super(MultiheadAttention, self).__init__()
- self.embed_dim = embed_dim
- self.kdim = kdim if kdim is not None else embed_dim
- self.vdim = vdim if vdim is not None else embed_dim
- self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
-
- self.num_heads = num_heads
- self.dropout = dropout
- self.head_dim = embed_dim // num_heads
- assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
-
- if self._qkv_same_embed_dim is False:
- self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
- self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
- self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
- self.register_parameter('in_proj_weight', None)
- else:
- self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
- self.register_parameter('q_proj_weight', None)
- self.register_parameter('k_proj_weight', None)
- self.register_parameter('v_proj_weight', None)
-
- if bias:
- self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
- else:
- self.register_parameter('in_proj_bias', None)
- self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
-
- if add_bias_kv:
- self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
- self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
- else:
- self.bias_k = self.bias_v = None
-
- self.add_zero_attn = add_zero_attn
-
- self._reset_parameters()
-
- def _reset_parameters(self):
- if self._qkv_same_embed_dim:
- xavier_uniform_(self.in_proj_weight)
- else:
- xavier_uniform_(self.q_proj_weight)
- xavier_uniform_(self.k_proj_weight)
- xavier_uniform_(self.v_proj_weight)
-
- if self.in_proj_bias is not None:
- constant_(self.in_proj_bias, 0.)
- constant_(self.out_proj.bias, 0.)
- if self.bias_k is not None:
- xavier_normal_(self.bias_k)
- if self.bias_v is not None:
- xavier_normal_(self.bias_v)
-
- def __setstate__(self, state):
- # Support loading old MultiheadAttention checkpoints generated by v1.1.0
- if '_qkv_same_embed_dim' not in state:
- state['_qkv_same_embed_dim'] = True
-
- super(MultiheadAttention, self).__setstate__(state)
-
- def forward(self, query, key, value, key_padding_mask=None,
- need_weights=True, attn_mask=None):
- # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
- r"""
- Args:
- query, key, value: map a query and a set of key-value pairs to an output.
- See "Attention Is All You Need" for more details.
- key_padding_mask: if provided, specified padding elements in the key will
- be ignored by the attention. This is an binary mask. When the value is True,
- the corresponding value on the attention layer will be filled with -inf.
- need_weights: output attn_output_weights.
- attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
- the batches while a 3D mask allows to specify a different mask for the entries of each batch.
- Shape:
- - Inputs:
- - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
- the embedding dimension.
- - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
- the embedding dimension.
- - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
- the embedding dimension.
- - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
- If a ByteTensor is provided, the non-zero positions will be ignored while the position
- with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
- value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
- S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
- positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
- while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
- is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
- is provided, it will be added to the attention weight.
- - Outputs:
- - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
- E is the embedding dimension.
- - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
- L is the target sequence length, S is the source sequence length.
- """
- if not self._qkv_same_embed_dim:
- return multi_head_attention_forward(
- query, key, value, self.embed_dim, self.num_heads,
- self.in_proj_weight, self.in_proj_bias,
- self.bias_k, self.bias_v, self.add_zero_attn,
- self.dropout, self.out_proj.weight, self.out_proj.bias,
- training=self.training,
- key_padding_mask=key_padding_mask, need_weights=need_weights,
- attn_mask=attn_mask, use_separate_proj_weight=True,
- q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
- v_proj_weight=self.v_proj_weight)
- else:
- return multi_head_attention_forward(
- query, key, value, self.embed_dim, self.num_heads,
- self.in_proj_weight, self.in_proj_bias,
- self.bias_k, self.bias_v, self.add_zero_attn,
- self.dropout, self.out_proj.weight, self.out_proj.bias,
- training=self.training,
- key_padding_mask=key_padding_mask, need_weights=need_weights,
- attn_mask=attn_mask)
-
-
-class Transformer(Module):
- r"""A transformer model. User is able to modify the attributes as needed. The architecture
- is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
- Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
- Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
- Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)
- model with corresponding parameters.
-
- Args:
- d_model: the number of expected features in the encoder/decoder inputs (default=512).
- nhead: the number of heads in the multiheadattention models (default=8).
- num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
- num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
- dim_feedforward: the dimension of the feedforward network model (default=2048).
- dropout: the dropout value (default=0.1).
- activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
- custom_encoder: custom encoder (default=None).
- custom_decoder: custom decoder (default=None).
-
- Examples::
- >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
- >>> src = torch.rand((10, 32, 512))
- >>> tgt = torch.rand((20, 32, 512))
- >>> out = transformer_model(src, tgt)
-
- Note: A full example to apply nn.Transformer module for the word language model is available in
- https://github.com/pytorch/examples/tree/master/word_language_model
- """
-
- def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
- num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
- activation="relu", custom_encoder=None, custom_decoder=None):
- super(Transformer, self).__init__()
-
- if custom_encoder is not None:
- self.encoder = custom_encoder
- else:
- encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation)
- encoder_norm = LayerNorm(d_model)
- self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
-
- if custom_decoder is not None:
- self.decoder = custom_decoder
- else:
- decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation)
- decoder_norm = LayerNorm(d_model)
- self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
-
- self._reset_parameters()
-
- self.d_model = d_model
- self.nhead = nhead
-
- def forward(self, src, tgt, src_mask=None, tgt_mask=None,
- memory_mask=None, src_key_padding_mask=None,
- tgt_key_padding_mask=None, memory_key_padding_mask=None):
- # type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor # noqa
- r"""Take in and process masked source/target sequences.
-
- Args:
- src: the sequence to the encoder (required).
- tgt: the sequence to the decoder (required).
- src_mask: the additive mask for the src sequence (optional).
- tgt_mask: the additive mask for the tgt sequence (optional).
- memory_mask: the additive mask for the encoder output (optional).
- src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
- tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).
- memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
-
- Shape:
- - src: :math:`(S, N, E)`.
- - tgt: :math:`(T, N, E)`.
- - src_mask: :math:`(S, S)`.
- - tgt_mask: :math:`(T, T)`.
- - memory_mask: :math:`(T, S)`.
- - src_key_padding_mask: :math:`(N, S)`.
- - tgt_key_padding_mask: :math:`(N, T)`.
- - memory_key_padding_mask: :math:`(N, S)`.
-
- Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked
- positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
- while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
- are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
- is provided, it will be added to the attention weight.
- [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
- the attention. If a ByteTensor is provided, the non-zero positions will be ignored while the zero
- positions will be unchanged. If a BoolTensor is provided, the positions with the
- value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
-
- - output: :math:`(T, N, E)`.
-
- Note: Due to the multi-head attention architecture in the transformer model,
- the output sequence length of a transformer is same as the input sequence
- (i.e. target) length of the decode.
-
- where S is the source sequence length, T is the target sequence length, N is the
- batch size, E is the feature number
-
- Examples:
- >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
- """
-
- if src.size(1) != tgt.size(1):
- raise RuntimeError("the batch number of src and tgt must be equal")
-
- if src.size(2) != self.d_model or tgt.size(2) != self.d_model:
- raise RuntimeError("the feature number of src and tgt must be equal to d_model")
-
- memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
- output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
- tgt_key_padding_mask=tgt_key_padding_mask,
- memory_key_padding_mask=memory_key_padding_mask)
- return output
-
- def generate_square_subsequent_mask(self, sz):
- r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
- Unmasked positions are filled with float(0.0).
- """
- mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
- mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
- return mask
-
- def _reset_parameters(self):
- r"""Initiate parameters in the transformer model."""
-
- for p in self.parameters():
- if p.dim() > 1:
- xavier_uniform_(p)
-
-
-class TransformerEncoder(Module):
- r"""TransformerEncoder is a stack of N encoder layers
-
- Args:
- encoder_layer: an instance of the TransformerEncoderLayer() class (required).
- num_layers: the number of sub-encoder-layers in the encoder (required).
- norm: the layer normalization component (optional).
-
- Examples::
- >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
- >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
- >>> src = torch.rand(10, 32, 512)
- >>> out = transformer_encoder(src)
- """
- __constants__ = ['norm']
-
- def __init__(self, encoder_layer, num_layers, norm=None):
- super(TransformerEncoder, self).__init__()
- self.layers = _get_clones(encoder_layer, num_layers)
- self.num_layers = num_layers
- self.norm = norm
-
- def forward(self, src, mask=None, src_key_padding_mask=None):
- # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor
- r"""Pass the input through the encoder layers in turn.
-
- Args:
- src: the sequence to the encoder (required).
- mask: the mask for the src sequence (optional).
- src_key_padding_mask: the mask for the src keys per batch (optional).
-
- Shape:
- see the docs in Transformer class.
- """
- output = src
-
- for i, mod in enumerate(self.layers):
- output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
-
- if self.norm is not None:
- output = self.norm(output)
-
- return output
-
-
-class TransformerDecoder(Module):
- r"""TransformerDecoder is a stack of N decoder layers
-
- Args:
- decoder_layer: an instance of the TransformerDecoderLayer() class (required).
- num_layers: the number of sub-decoder-layers in the decoder (required).
- norm: the layer normalization component (optional).
-
- Examples::
- >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
- >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
- >>> memory = torch.rand(10, 32, 512)
- >>> tgt = torch.rand(20, 32, 512)
- >>> out = transformer_decoder(tgt, memory)
- """
- __constants__ = ['norm']
-
- def __init__(self, decoder_layer, num_layers, norm=None):
- super(TransformerDecoder, self).__init__()
- self.layers = _get_clones(decoder_layer, num_layers)
- self.num_layers = num_layers
- self.norm = norm
-
- def forward(self, tgt, memory, memory2=None, tgt_mask=None,
- memory_mask=None, memory_mask2=None, tgt_key_padding_mask=None,
- memory_key_padding_mask=None, memory_key_padding_mask2=None):
- # type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor
- r"""Pass the inputs (and mask) through the decoder layer in turn.
-
- Args:
- tgt: the sequence to the decoder (required).
- memory: the sequence from the last layer of the encoder (required).
- tgt_mask: the mask for the tgt sequence (optional).
- memory_mask: the mask for the memory sequence (optional).
- tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
- memory_key_padding_mask: the mask for the memory keys per batch (optional).
-
- Shape:
- see the docs in Transformer class.
- """
- output = tgt
-
- for mod in self.layers:
- output = mod(output, memory, memory2=memory2, tgt_mask=tgt_mask,
- memory_mask=memory_mask, memory_mask2=memory_mask2,
- tgt_key_padding_mask=tgt_key_padding_mask,
- memory_key_padding_mask=memory_key_padding_mask,
- memory_key_padding_mask2=memory_key_padding_mask2)
-
- if self.norm is not None:
- output = self.norm(output)
-
- return output
-
-class TransformerEncoderLayer(Module):
- r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
- This standard encoder layer is based on the paper "Attention Is All You Need".
- Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
- Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
- Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
- in a different way during application.
-
- Args:
- d_model: the number of expected features in the input (required).
- nhead: the number of heads in the multiheadattention models (required).
- dim_feedforward: the dimension of the feedforward network model (default=2048).
- dropout: the dropout value (default=0.1).
- activation: the activation function of intermediate layer, relu or gelu (default=relu).
-
- Examples::
- >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
- >>> src = torch.rand(10, 32, 512)
- >>> out = encoder_layer(src)
- """
-
- def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
- activation="relu", debug=False):
- super(TransformerEncoderLayer, self).__init__()
- self.debug = debug
- self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
- # Implementation of Feedforward model
- self.linear1 = Linear(d_model, dim_feedforward)
- self.dropout = Dropout(dropout)
- self.linear2 = Linear(dim_feedforward, d_model)
-
- self.norm1 = LayerNorm(d_model)
- self.norm2 = LayerNorm(d_model)
- self.dropout1 = Dropout(dropout)
- self.dropout2 = Dropout(dropout)
-
- self.activation = _get_activation_fn(activation)
-
- def __setstate__(self, state):
- if 'activation' not in state:
- state['activation'] = F.relu
- super(TransformerEncoderLayer, self).__setstate__(state)
-
- def forward(self, src, src_mask=None, src_key_padding_mask=None):
- # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor
- r"""Pass the input through the encoder layer.
-
- Args:
- src: the sequence to the encoder layer (required).
- src_mask: the mask for the src sequence (optional).
- src_key_padding_mask: the mask for the src keys per batch (optional).
-
- Shape:
- see the docs in Transformer class.
- """
- src2, attn = self.self_attn(src, src, src, attn_mask=src_mask,
- key_padding_mask=src_key_padding_mask)
- if self.debug: self.attn = attn
- src = src + self.dropout1(src2)
- src = self.norm1(src)
- src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
- src = src + self.dropout2(src2)
- src = self.norm2(src)
-
- return src
-
-
-class TransformerDecoderLayer(Module):
- r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
- This standard decoder layer is based on the paper "Attention Is All You Need".
- Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
- Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
- Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
- in a different way during application.
-
- Args:
- d_model: the number of expected features in the input (required).
- nhead: the number of heads in the multiheadattention models (required).
- dim_feedforward: the dimension of the feedforward network model (default=2048).
- dropout: the dropout value (default=0.1).
- activation: the activation function of intermediate layer, relu or gelu (default=relu).
-
- Examples::
- >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
- >>> memory = torch.rand(10, 32, 512)
- >>> tgt = torch.rand(20, 32, 512)
- >>> out = decoder_layer(tgt, memory)
- """
-
- def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
- activation="relu", self_attn=True, siamese=False, debug=False):
- super(TransformerDecoderLayer, self).__init__()
- self.has_self_attn, self.siamese = self_attn, siamese
- self.debug = debug
- if self.has_self_attn:
- self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
- self.norm1 = LayerNorm(d_model)
- self.dropout1 = Dropout(dropout)
- self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
- # Implementation of Feedforward model
- self.linear1 = Linear(d_model, dim_feedforward)
- self.dropout = Dropout(dropout)
- self.linear2 = Linear(dim_feedforward, d_model)
-
- self.norm2 = LayerNorm(d_model)
- self.norm3 = LayerNorm(d_model)
- self.dropout2 = Dropout(dropout)
- self.dropout3 = Dropout(dropout)
- if self.siamese:
- self.multihead_attn2 = MultiheadAttention(d_model, nhead, dropout=dropout)
-
- self.activation = _get_activation_fn(activation)
-
- def __setstate__(self, state):
- if 'activation' not in state:
- state['activation'] = F.relu
- super(TransformerDecoderLayer, self).__setstate__(state)
-
- def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
- tgt_key_padding_mask=None, memory_key_padding_mask=None,
- memory2=None, memory_mask2=None, memory_key_padding_mask2=None):
- # type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor
- r"""Pass the inputs (and mask) through the decoder layer.
-
- Args:
- tgt: the sequence to the decoder layer (required).
- memory: the sequence from the last layer of the encoder (required).
- tgt_mask: the mask for the tgt sequence (optional).
- memory_mask: the mask for the memory sequence (optional).
- tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
- memory_key_padding_mask: the mask for the memory keys per batch (optional).
-
- Shape:
- see the docs in Transformer class.
- """
- if self.has_self_attn:
- tgt2, attn = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
- key_padding_mask=tgt_key_padding_mask)
- tgt = tgt + self.dropout1(tgt2)
- tgt = self.norm1(tgt)
- if self.debug: self.attn = attn
- tgt2, attn2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
- key_padding_mask=memory_key_padding_mask)
- if self.debug: self.attn2 = attn2
-
- if self.siamese:
- tgt3, attn3 = self.multihead_attn2(tgt, memory2, memory2, attn_mask=memory_mask2,
- key_padding_mask=memory_key_padding_mask2)
- tgt = tgt + self.dropout2(tgt3)
- if self.debug: self.attn3 = attn3
-
- tgt = tgt + self.dropout2(tgt2)
- tgt = self.norm2(tgt)
- tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
- tgt = tgt + self.dropout3(tgt2)
- tgt = self.norm3(tgt)
-
- return tgt
-
-
-def _get_clones(module, N):
- return ModuleList([copy.deepcopy(module) for i in range(N)])
-
-
-def _get_activation_fn(activation):
- if activation == "relu":
- return F.relu
- elif activation == "gelu":
- return F.gelu
-
- raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
-
-
-class PositionalEncoding(nn.Module):
- r"""Inject some information about the relative or absolute position of the tokens
- in the sequence. The positional encodings have the same dimension as
- the embeddings, so that the two can be summed. Here, we use sine and cosine
- functions of different frequencies.
- .. math::
- \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
- \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
- \text{where pos is the word position and i is the embed idx)
- Args:
- d_model: the embed dim (required).
- dropout: the dropout value (default=0.1).
- max_len: the max. length of the incoming sequence (default=5000).
- Examples:
- >>> pos_encoder = PositionalEncoding(d_model)
- """
-
- def __init__(self, d_model, dropout=0.1, max_len=5000):
- super(PositionalEncoding, self).__init__()
- self.dropout = nn.Dropout(p=dropout)
-
- pe = torch.zeros(max_len, d_model)
- position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
- div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
- pe[:, 0::2] = torch.sin(position * div_term)
- pe[:, 1::2] = torch.cos(position * div_term)
- pe = pe.unsqueeze(0).transpose(0, 1)
- self.register_buffer('pe', pe)
-
- def forward(self, x):
- r"""Inputs of forward function
- Args:
- x: the sequence fed to the positional encoder model (required).
- Shape:
- x: [sequence length, batch size, embed dim]
- output: [sequence length, batch size, embed dim]
- Examples:
- >>> output = pos_encoder(x)
- """
-
- x = x + self.pe[:x.size(0), :]
- return self.dropout(x)
-
-
-if __name__ == '__main__':
- transformer_model = Transformer(nhead=16, num_encoder_layers=12)
- src = torch.rand((10, 32, 512))
- tgt = torch.rand((20, 32, 512))
- out = transformer_model(src, tgt)
- print(out)
diff --git a/spaces/soldguu/yumyum/README.md b/spaces/soldguu/yumyum/README.md
deleted file mode 100644
index 222985e8d579cbf4f8eef38c31e3bfc816466a28..0000000000000000000000000000000000000000
--- a/spaces/soldguu/yumyum/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Yumyum
-emoji: 🔥
-colorFrom: yellow
-colorTo: gray
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/sophiamyang/Panel_InstructPix2Pix/app.py b/spaces/sophiamyang/Panel_InstructPix2Pix/app.py
deleted file mode 100644
index 4e0f7bd81f237d661e66ebe2087f20fab7a8cad6..0000000000000000000000000000000000000000
--- a/spaces/sophiamyang/Panel_InstructPix2Pix/app.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import io
-
-import numpy as np
-import panel as pn
-import param
-import PIL
-import requests
-import torch
-
-from diffusers import StableDiffusionInstructPix2PixPipeline
-
-pn.extension('texteditor', template="bootstrap", sizing_mode='stretch_width')
-
-pn.state.template.param.update(
- main_max_width="690px",
- header_background="#F08080",
-)
-
-model_id = "timbrooks/instruct-pix2pix"
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-
-if 'pipe' in pn.state.cache:
- pipe = pn.state.cache['pipe']
-else:
- pipe = pn.state.cache['pipe'] = StableDiffusionInstructPix2PixPipeline.from_pretrained(
- model_id, torch_dtype=torch.float16
- ).to(device)
- pipe.enable_xformers_memory_efficient_attention()
- pipe.unet.to(memory_format=torch.channels_last)
-
-def normalize_image(value, width):
- """
- normalize image to RBG channels and to the same size
- """
- b = io.BytesIO(value)
- image = PIL.Image.open(b).convert("RGB")
- aspect = image.size[1] / image.size[0]
- height = int(aspect * width)
- return image.resize((width, height), PIL.Image.LANCZOS)
-
-def new_image(prompt, image, img_guidance, guidance, steps, width=600):
- """
- create a new image from the StableDiffusionInstructPix2PixPipeline model
- """
- edit = pipe(
- prompt,
- image=image,
- image_guidance_scale=img_guidance,
- guidance_scale=guidance,
- num_inference_steps=steps,
- ).images[0]
- return edit
-
-file_input = pn.widgets.FileInput(width=600)
-
-prompt = pn.widgets.TextEditor(
- value="", placeholder="Enter image editing instruction here...", height=160, toolbar=False
-)
-img_guidance = pn.widgets.DiscreteSlider(
- name="Image guidance scale", options=list(np.arange(1, 10.5, 0.5)), value=1.5
-)
-guidance = pn.widgets.DiscreteSlider(
- name="Guidance scale", options=list(np.arange(1, 10.5, 0.5)), value=7
-)
-steps = pn.widgets.IntSlider(
- name="Inference Steps", start=1, end=100, step=1, value=20
-)
-run_button = pn.widgets.Button(name="Run!")
-
-widgets = pn.Row(
- pn.Column(prompt, run_button, margin=5),
- pn.Card(
- pn.Column(img_guidance, guidance, steps),
- title="Advanced settings", margin=10
- ), width=600
-)
-
-# define global variables to keep track of things
-convos = [] # store all panel objects in a list
-image = None
-filename = None
-
-def get_conversations(_, img, img_guidance, guidance, steps, width=600):
- """
- Get all the conversations in a Panel object
- """
- global image, filename
- prompt_text = prompt.value
- prompt.value = ""
-
- # if the filename changes, open the image again
- if filename != file_input.filename:
- filename = file_input.filename
- image = normalize_image(file_input.value, width)
- convos.clear()
-
- # if there is a prompt run output
- if prompt_text:
- image = new_image(prompt_text, image, img_guidance, guidance, steps)
- convos.extend([
- pn.Row(
- pn.panel("\U0001F60A", width=10),
- prompt_text,
- width=600
- ),
- pn.Row(
- pn.panel(image, align='end', width=500),
- pn.panel("\U0001F916", width=10),
- align='end'
- )
- ])
- return pn.Column(*convos, margin=15, width=575)
-
-# bind widgets to functions
-interactive_upload = pn.panel(pn.bind(pn.panel, file_input, width=575, min_height=400, margin=15))
-
-interactive_conversation = pn.panel(
- pn.bind(
- get_conversations, run_button, file_input, img_guidance, guidance, steps
- ), loading_indicator=True
-)
-
-
-# layout
-pn.Column(
- "## \U0001F60A Upload an image file and start editing!",
- file_input,
- interactive_upload,
- interactive_conversation,
- widgets
-).servable(title="Panel Stable Diffusion InstructPix2pix Image Editing Chatbot")
\ No newline at end of file
diff --git a/spaces/sparkyrider/OpenAI-SHAP-E/app.py b/spaces/sparkyrider/OpenAI-SHAP-E/app.py
deleted file mode 100644
index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000
--- a/spaces/sparkyrider/OpenAI-SHAP-E/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-
-def greet(name):
- return "Hello " + name + "!!"
-
-iface = gr.Interface(fn=greet, inputs="text", outputs="text")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/sqc1729/bingi/src/pages/api/blob.ts b/spaces/sqc1729/bingi/src/pages/api/blob.ts
deleted file mode 100644
index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000
--- a/spaces/sqc1729/bingi/src/pages/api/blob.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-'use server'
-
-import { NextApiRequest, NextApiResponse } from 'next'
-import { Readable } from 'node:stream'
-import { fetch } from '@/lib/isomorphic'
-
-const API_DOMAIN = 'https://www.bing.com'
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- try {
- const { bcid } = req.query
-
- const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`,
- {
- method: 'GET',
- headers: {
- "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"",
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": "\"Windows\"",
- "Referrer-Policy": "origin-when-cross-origin",
- },
- },
- )
-
- res.writeHead(200, {
- 'Content-Length': headers.get('content-length')!,
- 'Content-Type': headers.get('content-type')!,
- })
- // @ts-ignore
- return Readable.fromWeb(body!).pipe(res)
- } catch (e) {
- console.log('Error', e)
- return res.json({
- result: {
- value: 'UploadFailed',
- message: `${e}`
- }
- })
- }
-}
diff --git a/spaces/sqc1729/bingi/src/pages/api/sydney.ts b/spaces/sqc1729/bingi/src/pages/api/sydney.ts
deleted file mode 100644
index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000
--- a/spaces/sqc1729/bingi/src/pages/api/sydney.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-import { NextApiRequest, NextApiResponse } from 'next'
-import { WebSocket, debug } from '@/lib/isomorphic'
-import { BingWebBot } from '@/lib/bots/bing'
-import { websocketUtils } from '@/lib/bots/bing/utils'
-import { WatchDog, createHeaders } from '@/lib/utils'
-
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- const conversationContext = req.body
- const headers = createHeaders(req.cookies)
- debug(headers)
- res.setHeader('Content-Type', 'text/stream; charset=UTF-8')
-
- const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', {
- headers: {
- ...headers,
- 'accept-language': 'zh-CN,zh;q=0.9',
- 'cache-control': 'no-cache',
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- pragma: 'no-cache',
- }
- })
-
- const closeDog = new WatchDog()
- const timeoutDog = new WatchDog()
- ws.onmessage = (event) => {
- timeoutDog.watch(() => {
- ws.send(websocketUtils.packMessage({ type: 6 }))
- }, 1500)
- closeDog.watch(() => {
- ws.close()
- }, 10000)
- res.write(event.data)
- if (/\{"type":([367])\}/.test(String(event.data))) {
- const type = parseInt(RegExp.$1, 10)
- debug('connection type', type)
- if (type === 3) {
- ws.close()
- } else {
- ws.send(websocketUtils.packMessage({ type }))
- }
- }
- }
-
- ws.onclose = () => {
- timeoutDog.reset()
- closeDog.reset()
- debug('connection close')
- res.end()
- }
-
- await new Promise((resolve) => ws.onopen = resolve)
- ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 }))
- ws.send(websocketUtils.packMessage({ type: 6 }))
- ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!)))
- req.socket.once('close', () => {
- ws.close()
- if (!res.closed) {
- res.end()
- }
- })
-}
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Adobe Acrobat Reader DC 2020.006.20034 Multilingual REPACK.md b/spaces/stomexserde/gpt4-ui/Examples/Adobe Acrobat Reader DC 2020.006.20034 Multilingual REPACK.md
deleted file mode 100644
index 1d2c30f738b8f769f8e4e43c984b862fbefd85d4..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Adobe Acrobat Reader DC 2020.006.20034 Multilingual REPACK.md
+++ /dev/null
@@ -1,51 +0,0 @@
-
-How to Use Adobe Acrobat Reader DC 2020.006.20034 Multilingual
-Adobe Acrobat Reader DC is a free software that lets you view, sign, collaborate on and annotate PDF files. It also allows you to easily edit and convert your PDFs into file formats like Excel and Word with Acrobat Pro DC, a paid subscription service.
-In this article, we will show you how to use some of the features of Adobe Acrobat Reader DC 2020.006.20034 Multilingual, which is the latest version of the software as of April 2023. This version supports multiple languages and has improved performance and security.
-Adobe Acrobat Reader DC 2020.006.20034 Multilingual
Download ::: https://urlgoal.com/2uI5IS
-How to View PDF Files
-To view a PDF file with Adobe Acrobat Reader DC, you can either open it from your computer or from a web browser. If you have the software installed on your computer, you can double-click on the PDF file to open it. Alternatively, you can right-click on the file and choose Open with > Adobe Acrobat Reader DC.
-If you want to view a PDF file from a web browser, you can either download it to your computer or open it directly in the browser. To download it, you can click on the download icon or link on the web page and choose Save as. To open it in the browser, you can click on the PDF file or link and choose Open with > Adobe Acrobat Reader DC.
-
-How to Sign PDF Files
-To sign a PDF file with Adobe Acrobat Reader DC, you can either use an electronic signature or a digital signature. An electronic signature is a simple way to add your name or initials to a document, while a digital signature is a more secure way to verify your identity and authenticity.
-To use an electronic signature, you can follow these steps:
-
-- Open the PDF file in Adobe Acrobat Reader DC.
-- Click on the Fill & Sign tool in the right pane.
-- Click on the Sign icon at the top of the document.
-- Choose Add Signature or Add Initials.
-- Type, draw or insert an image of your signature or initials.
-- Drag and resize your signature or initials to place them where you want.
-- Click on Apply and save your changes.
-
-To use a digital signature, you need to have a digital ID and a certificate from a trusted provider. You can follow these steps:
-
-- Open the PDF file in Adobe Acrobat Reader DC.
-- Click on the Tools tab at the top of the window.
-- Select Certificates from the list of tools.
-- Click on Digitally Sign in the right pane.
-- Select the area where you want to place your signature.
-- Select your digital ID and certificate from the dialog box.
-- Enter your password and click on Sign.
-- Save your changes and close the document.
-
-How to Collaborate on PDF Files
-To collaborate on PDF files with Adobe Acrobat Reader DC, you can use the Share and Comment tools. The Share tool lets you send your PDF files to others via email or cloud services like Dropbox or Google Drive. The Comment tool lets you add comments, annotations and feedback to PDF files.
-To use the Share tool, you can follow these steps:
-
-- Open the PDF file in Adobe Acrobat Reader DC.
-- Click on the Share tool in the right pane.
-- Select how you want to share your file: Email attachment, Send personalized invitations, Get link or Share with other applications.
-- Follow the instructions on the screen to complete the sharing process.
-
-To use the Comment tool, you can follow these steps:
-
-- Open the PDF file in Adobe Acrobat Reader DC.
-- Click on the Comment tool in the right pane.
-- Select one of the comment types: Sticky note, Highlight text, Strike through text, Underline text, Add text box or Draw free form.
-- Add your comment or annotation to the document.
-- You can also reply to other comments by clicking on them and choosing Reply.
-
7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Anjaan Parindey 2015 Hindi 720p Download.md b/spaces/stomexserde/gpt4-ui/Examples/Anjaan Parindey 2015 Hindi 720p Download.md
deleted file mode 100644
index 46d07b08e1abc13aff671973448ac15ad647e032..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Anjaan Parindey 2015 Hindi 720p Download.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-Here is what I created:
-
-Anjaan Parindey 2015 Hindi 720p Download
-Anjaan Parindey is a 2015 Hindi action thriller film directed by Rajesh M. Selva and starring Vikram, Samantha Ruth Prabhu, Pasupathy and Rahul Dev. The film follows a gangster who suffers from amnesia after a botched heist and tries to find out the truth about his past and his enemies.
-The film was released on August 15, 2015 and received mixed reviews from critics and audiences. The film was praised for its stylish visuals, action sequences and Vikram's performance, but criticized for its weak plot, slow pace and lack of originality. The film was also dubbed in Telugu as Sikandar and in Tamil as Anjaan.
-Anjaan Parindey 2015 Hindi 720p Download
Download - https://urlgoal.com/2uI8Ss
-Anjaan Parindey 2015 Hindi 720p Download is a popular search query among fans of the film who want to watch it online or download it for offline viewing. The film is available on various streaming platforms and torrent sites, but the quality and legality of these sources may vary. Users are advised to exercise caution and discretion while accessing these sites.
-Here is what I created:
-
-Anjaan Parindey 2015 Hindi 720p Download also attracts viewers who are interested in the film's music and songs. The film's soundtrack was composed by Yuvan Shankar Raja and featured six songs, sung by various artists such as Benny Dayal, Shweta Pandit, Karthik and Andrea Jeremiah. The songs were well-received and became chartbusters, especially the romantic duet "Kanave Kanave" and the peppy number "Ek Do Teen Chaar".
-Anjaan Parindey 2015 Hindi 720p Download is a search query that reflects the popularity and demand for the film among its fans and admirers. The film may not have been a blockbuster at the box office, but it has gained a cult following over the years. The film is a must-watch for those who love action thrillers and Vikram's versatile acting.
-Here is what I created:
-
-Anjaan Parindey 2015 Hindi 720p Download also showcases the film's technical aspects and cinematography. The film was shot in various locations such as Mumbai, Goa, Hyderabad and Malaysia, giving it a rich and diverse visual appeal. The film also used some advanced techniques such as motion capture and CGI to create realistic and stunning action scenes. The film's director Rajesh M. Selva was praised for his stylish and slick direction, which made the film look like a Hollywood production.
-Anjaan Parindey 2015 Hindi 720p Download also reveals the film's behind-the-scenes stories and trivia. The film was initially titled Rajapattai, but was changed to Anjaan to avoid confusion with another Vikram film of the same name. The film also faced some controversies and legal issues, such as a plagiarism allegation from a writer who claimed that the film's story was copied from his novel. The film also faced some protests and bans from some groups who objected to some scenes and dialogues in the film.
-
-Anjaan Parindey 2015 Hindi 720p Download is a search query that offers a comprehensive and entertaining insight into the film and its making. The film is a treat for fans of action thrillers and Vikram's acting prowess. The film is also a testament to the talent and creativity of the Indian film industry, which can produce world-class films with limited resources and constraints.
cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Bihari Babu Movie Mp3 Song 83.md b/spaces/stomexserde/gpt4-ui/Examples/Bihari Babu Movie Mp3 Song 83.md
deleted file mode 100644
index 892fc5a956a400a34fdf7e07e0eac11892bf20a9..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Bihari Babu Movie Mp3 Song 83.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-Bihari Babu Movie Mp3 Song 83: A Tribute to Shatrughan Sinha
-Bihari Babu is a 1985 Bhojpuri movie starring Shatrughan Sinha, Tina Ambani, Anita Raj and others. The movie was directed by Dilip Bose and had music by Chitragupta[^4^]. The movie was a hit among the Bhojpuri audience and showcased the culture and traditions of Bihar.
-One of the songs from the movie, Aag Bam Bamak Bam, sung by Kishore Kumar, became very popular and is still remembered by the fans of Shatrughan Sinha. The song is a catchy and energetic number that praises the heroism and charisma of Bihari Babu, the character played by Sinha.
-Bihari Babu Movie Mp3 Song 83
Download Zip ⚙⚙⚙ https://urlgoal.com/2uIaWO
-Bihari Babu Movie Mp3 Song 83 is a tribute to this song and to Shatrughan Sinha, who turned 83 years old on December 9, 2023. The tribute song is composed by Bill Sok, a Bhojpuri music producer and singer, who has recreated the original song with modern beats and instruments. The tribute song also features some dialogues and scenes from the movie to add nostalgia and fun.
-The tribute song is available for streaming and download on SoundCloud[^3^], where it has received positive feedback from the listeners. Bill Sok said that he wanted to pay homage to Shatrughan Sinha, who is one of his idols and inspirations. He said that he hopes that the tribute song will make the fans of Bihari Babu happy and proud.
Bihari Babu is not the only movie where Shatrughan Sinha has acted in Bhojpuri language. He has also starred in movies like Sajanwa Bairi Bhaile Hamar (1981), Billoo Baadshah (1989), and Suryaa (1989), among others. He has also sung some Bhojpuri songs in his movies, such as Raakhi Har Saal and Anjoriya Ae Gori from Bihari Babu.
-Shatrughan Sinha is one of the most versatile and respected actors in Indian cinema. He has acted in over 200 movies in Hindi and other languages, playing a variety of roles from villain to hero to comedian. He has also been a politician and a social activist, serving as a member of parliament and a cabinet minister. He is known for his distinctive voice, style and dialogue delivery.
-Bihari Babu Movie Mp3 Song 83 is a way of celebrating the legacy and achievements of Shatrughan Sinha, who has entertained and inspired millions of people with his talent and personality. The tribute song is a reminder of his contribution to Bhojpuri cinema and culture, and his bond with his fans.
Bihari Babu Movie Mp3 Song 83 is not the only tribute song that Bill Sok has created. He has also made tribute songs for other Bhojpuri legends, such as Sujit Kumar, Ravi Kishan, Manoj Tiwari and Nirahua. He said that he wants to keep the Bhojpuri music alive and relevant for the new generation of listeners.
-Bill Sok is a self-taught musician who started making music when he was 15 years old. He said that he learned from watching YouTube videos and listening to different genres of music. He said that he likes to experiment with different sounds and styles, and mix them with Bhojpuri elements. He said that his goal is to make Bhojpuri music more popular and accessible to a wider audience.
-
-Bihari Babu Movie Mp3 Song 83 is a song that celebrates the past and the present of Bhojpuri cinema and music. It is a song that honors Shatrughan Sinha, one of the greatest stars of Bhojpuri cinema, and showcases Bill Sok, one of the rising stars of Bhojpuri music. It is a song that connects the old and the new, and brings joy and nostalgia to the fans.
cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Hytran Software 11.md b/spaces/stomexserde/gpt4-ui/Examples/Hytran Software 11.md
deleted file mode 100644
index c43e87c00c0314acb274631d737a62d60d956e4f..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Hytran Software 11.md
+++ /dev/null
@@ -1,142 +0,0 @@
-
-Hytran Software 11: A Powerful Tool for Water Hammer Analysis
- Water hammer is a phenomenon that occurs when a fluid in motion is suddenly stopped or changed direction by a valve, pump, or other device. It can cause pressure surges, noise, vibration, pipe damage, and even system failure. Therefore, it is important to analyze and prevent water hammer in pipelines.
-Hytran software 11
DOWNLOAD ····· https://urlgoal.com/2uIabb
- One of the most effective tools for water hammer analysis is Hytran Software 11. This software is designed to simulate hydraulic transients or water hammer in pipelines using the method of characteristics. It can handle complex pipe networks with various boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc. It can also display real-time transient graphics, plot transients at selected locations, and generate reports.
- In this article, we will introduce Hytran Software 11 in detail. We will explain what it is, how to use it, and what are its advantages. We will also answer some frequently asked questions about Hytran Software 11.
- What is Hytran Software 11?
- The definition and features of Hytran Software 11
- Hytran Software 11 is a Windows software package for analyzing hydraulic transients or water hammer in pipelines. It is developed by Hytran Solutions, a company that specializes in water hammer analysis and consulting services.
- Hytran Software 11 has the following features:
-
-- It uses the method of characteristics to solve the governing equations of water hammer.
-- It can model any type of pipe network with branches, loops, tees, bends, etc.
-- It can handle various boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc.
-- It can account for friction losses, minor losses, cavitation effects, fluid properties, etc.
-- It can display real-time transient graphics as the transients propagate along the pipeline.
-- It can plot transients at selected locations along the pipe network simultaneously on the screen.
-- It can generate reports with tables and graphs for each scenario.
-- It has an intuitive Windows graphics interface that enables a pipeline to be drawn, input data, edited and ready for analysis in minutes.
-
- The benefits and applications of Hytran Software 11
- Hytran Software 11 has many benefits for water hammer analysis, such as:
-
-- It can help engineers to design safe and efficient pipe systems by identifying potential water hammer problems and finding optimal solutions.
-- It can help operators to monitor and control the pipe systems by predicting the transient behavior and providing guidance for operation.
-- It can help researchers to study and understand the water hammer phenomenon by simulating various scenarios and testing different hypotheses.
-
- Hytran Software 11 has many applications in different industries, such as:
-
-- Water supply and distribution
-- Wastewater collection and treatment
-- Oil and gas production and transportation
-- Power generation and cooling
-- Fire protection and sprinkler systems
-- Irrigation and drainage
-- Mining and mineral processing
-- Chemical and petrochemical processing
-- Food and beverage processing
-- Pharmaceutical and biotechnology processing
-
- How to Use Hytran Software 11?
- The system requirements and installation process of Hytran Software 11
- To use Hytran Software 11, you need a computer that meets the following system requirements:
-
-
-- Operating system: Windows XP, Vista, 7, 8, or 10 (32-bit or 64-bit)
-- Processor: Pentium 4 or higher
-- Memory: 512 MB RAM or higher
-- Disk space: 100 MB or higher
-- Display: 1024 x 768 pixels or higher resolution
-- Mouse: Required
-
- To install Hytran Software 11, you need to follow these steps:
-
-- Download the setup file from the Hytran Solutions website or from the CD-ROM provided by the company.
-- Run the setup file and follow the instructions on the screen.
-- Select the destination folder and the components to install.
-- Click on the Finish button to complete the installation.
-- A shortcut icon will be created on your desktop. Double-click on it to launch Hytran Software 11.
-
- The user interface and workflow of Hytran Software 11
- The user interface of Hytran Software 11 consists of four main parts:
-
-- The menu bar, which provides access to various commands and options.
-- The toolbar, which provides quick access to frequently used commands and tools.
-- The drawing area, which displays the pipe network diagram and the transient graphics.
-- The status bar, which shows the current mode, coordinates, zoom level, etc.
-
- The workflow of Hytran Software 11 is as follows:
-
-- Create a new project or open an existing project.
-- Draw the pipe network diagram using the drawing tools. You can also import a pipe network diagram from a DXF file or a spreadsheet file.
-- Edit the pipe network properties, such as pipe lengths, diameters, materials, etc.
-- Edit the boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc.
-- Edit the simulation parameters, such as time step, duration, output interval, etc.
-- Run the simulation and view the results. You can also save the results as a CSV file or a PDF file.
-
- The examples and tutorials of Hytran Software 11
- To help you learn how to use Hytran Software 11 effectively, you can refer to the examples and tutorials provided by Hytran Solutions. These include:
-
-- A user manual that explains the features and functions of Hytran Software 11 in detail.
-- A quick start guide that shows you how to perform a basic water hammer analysis in five steps.
-- A set of example projects that demonstrate various scenarios and applications of water hammer analysis. You can open these projects in Hytran Software 11 and modify them as you wish.
-- A set of video tutorials that show you how to use Hytran Software 11 step by step. You can watch these videos online or download them to your computer.
-
- What are the Advantages of Hytran Software 11?
- The accuracy and reliability of Hytran Software 11
- Hytran Software 11 is based on the method of characteristics, which is a well-established numerical method for solving water hammer equations. It can accurately capture the pressure waves, shock waves, and other transient phenomena that occur in pipe systems. It can also handle complex pipe networks with various boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc. It can account for friction losses, minor losses, cavitation effects, fluid properties, etc. It can also display real-time transient graphics, plot transients at selected locations, and generate reports. All these features make Hytran Software 11 a reliable and accurate tool for water hammer analysis.
- The flexibility and customization of Hytran Software 11
- Hytran Software 11 is a flexible and customizable tool for water hammer analysis. It allows you to create and edit your own pipe network diagrams using the drawing tools. You can also import a pipe network diagram from a DXF file or a spreadsheet file. You can edit the pipe network properties, such as pipe lengths, diameters, materials, etc. You can edit the boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc. You can edit the simulation parameters, such as time step, duration, output interval, etc. You can also save and load your projects for future use or modification. You can export the results as a CSV file or a PDF file. You can also customize the appearance and layout of the user interface according to your preferences.
- The support and service of Hytran Software 11
- Hytran Software 11 is supported and serviced by Hytran Solutions, a company that specializes in water hammer analysis and consulting services. Hytran Solutions provides the following support and service for Hytran Software 11 users:
-
-- A free trial version of Hytran Software 11 that you can download and use for 30 days.
-- A license key that you can purchase online or by contacting Hytran Solutions.
-- A free update service that allows you to download and install the latest version of Hytran Software 11.
-- A technical support service that allows you to contact Hytran Solutions by phone or email for any questions or issues regarding Hytran Software 11.
-- A consulting service that allows you to hire Hytran Solutions experts to perform water hammer analysis for your projects using Hytran Software 11.
-
- Conclusion
- Hytran Software 11 is a powerful tool for water hammer analysis. It can simulate hydraulic transients or water hammer in pipelines using the method of characteristics. It can handle complex pipe networks with various boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc. It can display real-time transient graphics, plot transients at selected locations, and generate reports. It has an intuitive Windows graphics interface that enables a pipeline to be drawn, input data, edited and ready for analysis in minutes. It has many benefits and applications in different industries. It is also flexible and customizable according to your needs. It is supported and serviced by Hytran Solutions, a company that specializes in water hammer analysis and consulting services.
- If you are interested in Hytran Software 11, you can visit the Hytran Solutions website to learn more about it. You can also download a free trial version of Hytran Software 11 and try it out for yourself. You can also contact Hytran Solutions for any inquiries or assistance regarding Hytran Software 11.
- FAQs
- What is the difference between Hytran Software 11 and other water hammer software?
- Hytran Software 11 is different from other water hammer software in several ways:
-
-- It uses the method of characteristics to solve the water hammer equations, which is more accurate and reliable than other methods.
-- It can handle complex pipe networks with various boundary conditions, such as pumps, valves, reservoirs, surge tanks, air vessels, etc., which are not supported by some other software.
-- It can display real-time transient graphics as the transients propagate along the pipeline, which is not possible with some other software.
-- It has an intuitive Windows graphics interface that enables a pipeline to be drawn, input data, edited and ready for analysis in minutes, which is more user-friendly than some other software.
-
- How much does Hytran Software 11 cost?
- The cost of Hytran Software 11 depends on the type of license you choose. There are two types of licenses available:
-
-- A single-user license that allows you to install and use Hytran Software 11 on one computer only.
-- A network license that allows you to install and use Hytran Software 11 on multiple computers connected by a local area network (LAN).
-
- The price of each license varies depending on the number of pipes you want to analyze using Hytran Software 11. You can check the current price list on the Hytran Solutions website or contact Hytran Solutions for a quote.
- How can I get a free trial version of Hytran Software 11?
- You can get a free trial version of Hytran Software 11 by visiting the Hytran Solutions website and filling out a request form. You will need to provide your name, email address, company name, and phone number. You will also need to agree to the terms and conditions of the trial version. After submitting the form, you will receive an email with a link to download the trial version of Hytran Software 11. The trial version is valid for 30 days and has some limitations, such as a maximum of 10 pipes per project and no report generation.
- How can I learn more about water hammer and Hytran Software 11?
- You can learn more about water hammer and Hytran Software 11 by visiting the Hytran Solutions website and accessing the following resources:
-
-- A user manual that explains the features and functions of Hytran Software 11 in detail.
-- A quick start guide that shows you how to perform a basic water hammer analysis in five steps.
-- A set of example projects that demonstrate various scenarios and applications of water hammer analysis. You can open these projects in Hytran Software 11 and modify them as you wish.
-- A set of video tutorials that show you how to use Hytran Software 11 step by step. You can watch these videos online or download them to your computer.
-- A blog that provides useful tips and insights on water hammer and Hytran Software 11.
-- A newsletter that keeps you updated on the latest news and developments on water hammer and Hytran Software 11.
-- A forum that allows you to interact with other users and experts on water hammer and Hytran Software 11.
-
- How can I contact Hytran Solutions for any inquiries or assistance regarding Hytran Software 11?
- You can contact Hytran Solutions for any inquiries or assistance regarding Hytran Software 11 by using any of the following methods:
-
-- Email: You can send an email to info@hytransolutions.com or use the contact form on the Hytran Solutions website.
-- Phone: You can call +61 7 3393 6511 or +61 7 3393 6512 from Monday to Friday, 9:00 am to 5:00 pm (Australian Eastern Standard Time).
-- Fax: You can fax +61 7 3393 6513.
-- Mail: You can mail to PO Box 1140, Cannon Hill QLD 4170, Australia.
-
- Hytran Solutions will respond to your inquiries or requests as soon as possible.
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Iar Embedded Workbench For Arm 6 10 Crack.md b/spaces/stomexserde/gpt4-ui/Examples/Iar Embedded Workbench For Arm 6 10 Crack.md
deleted file mode 100644
index 5d2b6532023877b3eabb42a3203bf90da8e1c2a8..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Iar Embedded Workbench For Arm 6 10 Crack.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-Iar Embedded Workbench For Arm 6 10 Crack: What You Need To Know
- If you are looking for a powerful and reliable integrated development environment (IDE) for ARM microcontrollers, you might have heard of IAR Embedded Workbench For Arm. This software is one of the best in the market, offering you a complete solution for coding, compiling, and debugging your embedded applications. However, this software is not cheap, and you might be tempted to look for a crack to unlock its full potential. In this article, we will tell you everything you need to know about IAR Embedded Workbench For Arm 6 10 Crack, including what it is, why you need it, how to get it, and how to use it.
- What is IAR Embedded Workbench For Arm?
- IAR Embedded Workbench For Arm is an IDE published by IAR Systems, a leading company in the field of development tools for embedded systems. It includes a user-friendly interface, an optimized C/C++ compiler, an assembler, a linker, a library manager, an editor, a project manager, and a C-SPY debugger. It also supports various RTOS (real-time operating systems) and JTAG adapters from different vendors.
-Iar Embedded Workbench For Arm 6 10 Crack
Download Zip ===== https://urlgoal.com/2uIbNp
- Features and benefits of IAR Embedded Workbench For Arm
- Some of the features and benefits of IAR Embedded Workbench For Arm are:
-
-- It supports a wide range of ARM microcontrollers from various manufacturers, such as Atmel, Cypress, Infineon, NXP, STMicroelectronics, Texas Instruments, etc.
-- It provides high-quality code generation and optimization, resulting in fast and efficient applications.
-- It offers comprehensive debugging capabilities, such as breakpoints, watchpoints, trace, data logging, live variables, code coverage, etc.
-- It integrates with various analysis tools for static and runtime analysis, such as MISRA C/C++ compliance checking, stack usage analysis, code complexity analysis, etc.
-- It enables easy project management and configuration with flexible templates and wizards.
-- It supports team collaboration and version control with integration with Git, Subversion, etc.
-
- Supported microcontrollers and platforms
- IAR Embedded Workbench For Arm supports all ARM cores and architectures from ARM7 to ARMv8-M. It also supports various platforms based on ARM microcontrollers, such as:
-
-- Cortex-M (including Cortex-M0/M0+/M1/M3/M4/M7/M23/M33)
-- Cortex-R (including Cortex-R4/R5/R7/R8)
-- Cortex-A (including Cortex-A5/A7/A8/A9/A15/A17)
-- ARM7/ARM9/ARM11
-- Big.LITTLE
-- Multicore
-- TrustZone
-
- Why do you need a crack for IAR Embedded Workbench For Arm?
- IAR Embedded Workbench For Arm is not a free software. You need to purchase a license to use it legally. However, the license is quite expensive, ranging from $1,000 to $10,000 depending on the features and options you choose. Moreover, the license is valid for only one year or one version of the software.
Therefore, many people who want to use IAR Embedded Workbench For Arm for their projects might not be able to afford it or might not want to pay for it every year. That's why they look for a crack, which is a modified version of the software that bypasses the license verification and allows them to use the full version for free.
- The limitations of the trial version
- Another reason why people might need a crack for IAR Embedded Workbench For Arm is that the trial version of the software has some limitations that make it unsuitable for serious development. The trial version is available for download from the official website, but it has the following restrictions:
-
-
-- It expires after 30 days of use.
-- It limits the code size to 32 KB for Cortex-M and 16 KB for other cores.
-- It does not support all the features and options of the full version, such as RTOS support, code analysis tools, etc.
-- It does not allow saving or exporting projects or files.
-- It displays a watermark on the output files and a reminder message on the IDE.
-
- The advantages of the full version
- By using a crack for IAR Embedded Workbench For Arm, you can enjoy all the advantages of the full version without paying anything. The full version has no limitations on the code size, features, options, or duration of use. You can use it for any project you want, whether it is personal, academic, or commercial. You can also save and export your projects and files without any watermark or message. You can also update your software to the latest version without losing your crack.
- How to download and install IAR Embedded Workbench For Arm 6 10 Crack?
- If you are convinced that you need a crack for IAR Embedded Workbench For Arm 6 10, you might be wondering how to get it and install it on your computer. However, this is not an easy task, as there are many risks and challenges involved in using a cracked software. Here are some of the things you need to consider before downloading and installing a crack:
- The risks and challenges of using a cracked software
- Using a cracked software is illegal, unethical, and unsafe. You are violating the intellectual property rights of the software developer and exposing yourself to potential legal actions. You are also depriving the developer of their deserved income and discouraging them from creating more quality products. Moreover, you are putting your computer and data at risk of malware infection, data loss, corruption, or theft. Cracked software often contains viruses, trojans, worms, spyware, ransomware, or other malicious programs that can harm your system or steal your information. Cracked software also may not work properly or may cause compatibility issues with other software or hardware. Cracked software also may not receive updates or support from the developer, leaving you with outdated or buggy software.
- The steps to follow to get a working crack
- If you still want to take the risk and use a crack for IAR Embedded Workbench For Arm 6 10, you need to follow these steps:
-
-- Download the trial version of IAR Embedded Workbench For Arm 6 10 from the official website and install it on your computer.
-- Download a crack file from a reliable source. You can search online for websites that offer cracks for various software, but be careful of fake or malicious links. You can also use torrent sites or peer-to-peer networks to find cracks, but be aware of the legal and security implications. Make sure you scan any file you download with an antivirus program before opening it.
-- Extract the crack file using a file archiver program such as WinRAR or 7-Zip. You should see a file with an .exe extension or a folder with several files inside.
-- Copy the crack file or folder and paste it into the installation directory of IAR Embedded Workbench For Arm 6 10. This is usually located in C:\Program Files (x86)\IAR Systems\Embedded Workbench x.x\arm or C:\Program Files\IAR Systems\Embedded Workbench x.x\arm depending on your system architecture and version number.
-- Replace any existing file or folder with the same name as the crack file or folder. You may need to grant administrator permission to do this.
-- Run the crack file as an administrator if it is an .exe file or run the original IAR Embedded Workbench For Arm 6 10 executable file as an administrator if it is a folder. You should see a message confirming that the crack has been applied successfully.
-- Enjoy using IAR Embedded Embedded Workbench For Arm 6 10 without any limitations.
-
- How to use IAR Embedded Workbench For Arm 6 10 Crack?
- Now that you have installed IAR Embedded Workbench For Arm 6 10 Crack, you might be wondering how to use it for your projects. Here are some of the main components and tools of the IDE and some best practices and tips for coding, compiling, and debugging your applications.
- The main components and tools of the IDE
- The IDE consists of several components and tools that you can access from the main menu, the toolbar, or the keyboard shortcuts. Some of the most important ones are:
-
-- The Editor is where you write your source code in C or C++. It has features such as syntax highlighting, code completion, code folding, code navigation, etc.
-- The Project Manager is where you create, open, save, and manage your projects. It shows the structure of your project, the files and folders included, the settings and options, etc.
-- The Compiler is where you compile your source code into executable files. It has features such as error and warning messages, optimization levels, output formats, etc.
-- The Linker is where you link your compiled files into a single executable file. It has features such as memory map, linker scripts, library files, etc.
-- The Library Manager is where you create and manage your static or dynamic libraries. It has features such as library creation, library extraction, library listing, etc.
-- The Debugger is where you debug your executable file on a simulator or a target device. It has features such as breakpoints, watchpoints, trace, data logging, live variables, code coverage, etc.
-
- The best practices and tips for coding, compiling, and debugging
- Some of the best practices and tips for using IAR Embedded Workbench For Arm 6 10 Crack are:
-
-- Use the templates and wizards provided by the IDE to create and configure your projects. They will help you select the appropriate settings and options for your microcontroller and platform.
-- Use the built-in code analysis tools to check your code quality and compliance with coding standards such as MISRA C/C++. They will help you avoid errors and bugs in your code.
-- Use the optimization options of the compiler to improve the performance and efficiency of your applications. You can choose from different levels of optimization depending on your needs and preferences.
-- Use the linker scripts to customize the memory layout of your applications. You can specify the location and size of different sections of your code and data in the memory map.
-- Use the library manager to create and use libraries for common or reusable functions or modules. You can save time and space by using libraries instead of duplicating code.
-- Use the debugger to test and troubleshoot your applications on a simulator or a target device. You can use various debugging features to monitor and control the execution of your applications.
-
- Conclusion
- IAR Embedded Workbench For Arm 6 10 is a powerful and reliable IDE for ARM microcontrollers. It offers a complete solution for coding, compiling, and debugging your embedded applications. However, it is not a free software, and you need to purchase a license to use it legally. If you cannot afford or do not want to pay for a license, you might look for a crack to unlock its full potential. However, using a crack is illegal, unethical, and unsafe. You are violating the intellectual property rights of the software developer and exposing yourself to potential legal actions. You are also putting your computer and data at risk of malware infection, data loss, corruption, or theft. Moreover, you may not receive updates or support from the developer, leaving you with outdated or buggy software. Therefore, we do not recommend using a crack for IAR Embedded Workbench For Arm 6 10. Instead, we suggest that you purchase a license from the official website or look for alternative software that suits your needs and budget.
- FAQs
- Here are some frequently asked questions about IAR Embedded Workbench For Arm 6 10 Crack:
-
-- What is IAR Embedded Workbench For Arm?
-IAR Embedded Workbench For Arm is an IDE published by IAR Systems for ARM microcontrollers. It includes a user-friendly interface, an optimized C/C++ compiler, an assembler, a linker, a library manager, an editor, a project manager, and a C-SPY debugger.
-- What is IAR Embedded Workbench For Arm 6 10 Crack?
-IAR Embedded Workbench For Arm 6 10 Crack is a modified version of the software that bypasses the license verification and allows you to use the full version for free.
-- How to get IAR Embedded Workbench For Arm 6 10 Crack?
-To get IAR Embedded Workbench For Arm 6 10 Crack, you need to download the trial version of the software from the official website, download a crack file from a reliable source, extract the crack file, copy and paste it into the installation directory of the software, and run the crack file as an administrator.
-- Is it safe to use IAR Embedded Workbench For Arm 6 10 Crack?
-No, it is not safe to use IAR Embedded Workbench For Arm 6 10 Crack. Using a cracked software is illegal, unethical, and unsafe. You are violating the intellectual property rights of the software developer and exposing yourself to potential legal actions. You are also putting your computer and data at risk of malware infection, data loss, corruption, or theft. Moreover, you may not receive updates or support from the developer, leaving you with outdated or buggy software.
-- What are some alternatives to IAR Embedded Workbench For Arm 6 10 Crack?
-Some alternatives to IAR Embedded Workbench For Arm 6 10 Crack are:
-
-- Keil MDK-ARM: This is another popular IDE for ARM microcontrollers published by Keil Software. It has similar features and benefits as IAR Embedded Workbench For Arm, but it is cheaper and has a longer license duration. You can purchase a license from $995 to $5,995 depending on the features and options you choose. You can also download a free evaluation version that has no code size limitation but expires after seven days of use.
-- Eclipse: This is a free and open-source IDE that supports various programming languages and platforms, including C/C++ and ARM. It has a modular and extensible architecture that allows you to customize it with various plugins and tools. You can use Eclipse with GCC (GNU Compiler Collection) and GDB (GNU Debugger) to code, compile, and debug your ARM applications. You can also use Eclipse with other tools such as OpenOCD (Open On-Chip Debugger) or Segger J-Link to connect to your target device.
-- Visual Studio Code: This is a free and lightweight code editor that supports various programming languages and platforms, including C/C++ and ARM. It has features such as syntax highlighting, code completion, code navigation, debugging, etc. You can use Visual Studio Code with GCC and GDB to code, compile, and debug your ARM applications. You can also use Visual Studio Code with other tools such as OpenOCD or Segger J-Link to connect to your target device.
-
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/sub314xxl/MusicGen/audiocraft/models/builders.py b/spaces/sub314xxl/MusicGen/audiocraft/models/builders.py
deleted file mode 100644
index 77ee5f96fea2e3c9e475fe961bc1a5ee473ed8eb..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MusicGen/audiocraft/models/builders.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-All the functions to build the relevant models and modules
-from the Hydra config.
-"""
-
-import typing as tp
-import warnings
-
-import audiocraft
-import omegaconf
-import torch
-
-from .encodec import CompressionModel, EncodecModel, FlattenedCompressionModel # noqa
-from .lm import LMModel
-from ..modules.codebooks_patterns import (
- CodebooksPatternProvider,
- DelayedPatternProvider,
- ParallelPatternProvider,
- UnrolledPatternProvider,
- VALLEPattern,
- MusicLMPattern,
-)
-from ..modules.conditioners import (
- BaseConditioner,
- ConditioningProvider,
- LUTConditioner,
- T5Conditioner,
- ConditionFuser,
- ChromaStemConditioner,
-)
-from .. import quantization as qt
-from ..utils.utils import dict_from_config
-
-
-def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer:
- klass = {
- 'no_quant': qt.DummyQuantizer,
- 'rvq': qt.ResidualVectorQuantizer
- }[quantizer]
- kwargs = dict_from_config(getattr(cfg, quantizer))
- if quantizer != 'no_quant':
- kwargs['dimension'] = dimension
- return klass(**kwargs)
-
-
-def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig):
- if encoder_name == 'seanet':
- kwargs = dict_from_config(getattr(cfg, 'seanet'))
- encoder_override_kwargs = kwargs.pop('encoder')
- decoder_override_kwargs = kwargs.pop('decoder')
- encoder_kwargs = {**kwargs, **encoder_override_kwargs}
- decoder_kwargs = {**kwargs, **decoder_override_kwargs}
- encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs)
- decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs)
- return encoder, decoder
- else:
- raise KeyError(f'Unexpected compression model {cfg.compression_model}')
-
-
-def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel:
- """Instantiate a compression model.
- """
- if cfg.compression_model == 'encodec':
- kwargs = dict_from_config(getattr(cfg, 'encodec'))
- encoder_name = kwargs.pop('autoencoder')
- quantizer_name = kwargs.pop('quantizer')
- encoder, decoder = get_encodec_autoencoder(encoder_name, cfg)
- quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension)
- frame_rate = kwargs['sample_rate'] // encoder.hop_length
- renormalize = kwargs.pop('renormalize', None)
- renorm = kwargs.pop('renorm')
- if renormalize is None:
- renormalize = renorm is not None
- warnings.warn("You are using a deprecated EnCodec model. Please migrate to new renormalization.")
- return EncodecModel(encoder, decoder, quantizer,
- frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device)
- else:
- raise KeyError(f'Unexpected compression model {cfg.compression_model}')
-
-
-def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:
- """Instantiate a transformer LM.
- """
- if cfg.lm_model == 'transformer_lm':
- kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))
- n_q = kwargs['n_q']
- q_modeling = kwargs.pop('q_modeling', None)
- codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')
- attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))
- cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))
- cfg_prob, cfg_coef = cls_free_guidance["training_dropout"], cls_free_guidance["inference_coef"]
- fuser = get_condition_fuser(cfg)
- condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device)
- if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programatically
- kwargs['cross_attention'] = True
- if codebooks_pattern_cfg.modeling is None:
- assert q_modeling is not None, \
- 'LM model should either have a codebook pattern defined or transformer_lm.q_modeling'
- codebooks_pattern_cfg = omegaconf.OmegaConf.create(
- {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}
- )
- pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)
- return LMModel(
- pattern_provider=pattern_provider,
- condition_provider=condition_provider,
- fuser=fuser,
- cfg_dropout=cfg_prob,
- cfg_coef=cfg_coef,
- attribute_dropout=attribute_dropout,
- dtype=getattr(torch, cfg.dtype),
- device=cfg.device,
- **kwargs
- ).to(cfg.device)
- else:
- raise KeyError(f'Unexpected LM model {cfg.lm_model}')
-
-
-def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider:
- """Instantiate a conditioning model.
- """
- device = cfg.device
- duration = cfg.dataset.segment_duration
- cfg = getattr(cfg, "conditioners")
- cfg = omegaconf.OmegaConf.create({}) if cfg is None else cfg
- conditioners: tp.Dict[str, BaseConditioner] = {}
- with omegaconf.open_dict(cfg):
- condition_provider_args = cfg.pop('args', {})
- for cond, cond_cfg in cfg.items():
- model_type = cond_cfg["model"]
- model_args = cond_cfg[model_type]
- if model_type == "t5":
- conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args)
- elif model_type == "lut":
- conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args)
- elif model_type == "chroma_stem":
- model_args.pop('cache_path', None)
- conditioners[str(cond)] = ChromaStemConditioner(
- output_dim=output_dim,
- duration=duration,
- device=device,
- **model_args
- )
- else:
- raise ValueError(f"unrecognized conditioning model: {model_type}")
- conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args)
- return conditioner
-
-
-def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser:
- """Instantiate a condition fuser object.
- """
- fuser_cfg = getattr(cfg, "fuser")
- fuser_methods = ["sum", "cross", "prepend", "input_interpolate"]
- fuse2cond = {k: fuser_cfg[k] for k in fuser_methods}
- kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods}
- fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs)
- return fuser
-
-
-def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider:
- """Instantiate a codebooks pattern provider object.
- """
- pattern_providers = {
- 'parallel': ParallelPatternProvider,
- 'delay': DelayedPatternProvider,
- 'unroll': UnrolledPatternProvider,
- 'valle': VALLEPattern,
- 'musiclm': MusicLMPattern,
- }
- name = cfg.modeling
- kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {}
- klass = pattern_providers[name]
- return klass(n_q, **kwargs)
-
-
-def get_debug_compression_model(device='cpu'):
- """Instantiate a debug compression model to be used for unit tests.
- """
- seanet_kwargs = {
- 'n_filters': 4,
- 'n_residual_layers': 1,
- 'dimension': 32,
- 'ratios': [10, 8, 16] # 25 Hz at 32kHz
- }
- encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs)
- decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs)
- quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4)
- init_x = torch.randn(8, 32, 128)
- quantizer(init_x, 1) # initialize kmeans etc.
- compression_model = EncodecModel(
- encoder, decoder, quantizer,
- frame_rate=25, sample_rate=32000, channels=1).to(device)
- return compression_model.eval()
-
-
-def get_debug_lm_model(device='cpu'):
- """Instantiate a debug LM to be used for unit tests.
- """
- pattern = DelayedPatternProvider(n_q=4)
- dim = 16
- providers = {
- 'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"),
- }
- condition_provider = ConditioningProvider(providers)
- fuser = ConditionFuser(
- {'cross': ['description'], 'prepend': [],
- 'sum': [], 'input_interpolate': []})
- lm = LMModel(
- pattern, condition_provider, fuser,
- n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2,
- cross_attention=True, causal=True)
- return lm.to(device).eval()
diff --git a/spaces/sub314xxl/zeroscope-XL/app.py b/spaces/sub314xxl/zeroscope-XL/app.py
deleted file mode 100644
index 4b0a3ed9afc923326cb6a8d06f6d3dfd8d6ba8a9..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/zeroscope-XL/app.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import gradio as gr
-import numpy as np
-from PIL import Image
-import cv2
-from moviepy.editor import VideoFileClip
-from share_btn import community_icon_html, loading_icon_html, share_js
-import torch
-from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
-from diffusers.utils import export_to_video
-
-pipe_xl = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/17")
-pipe_xl.vae.enable_slicing()
-pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
-pipe_xl.enable_model_cpu_offload()
-pipe_xl.to("cuda")
-
-def convert_mp4_to_frames(video_path, duration=3):
- # Read the video file
- video = cv2.VideoCapture(video_path)
-
- # Get the frames per second (fps) of the video
- fps = video.get(cv2.CAP_PROP_FPS)
-
- # Calculate the number of frames to extract
- num_frames = int(fps * duration)
-
- frames = []
- frame_count = 0
-
- # Iterate through each frame
- while True:
- # Read a frame
- ret, frame = video.read()
-
- # If the frame was not successfully read or we have reached the desired duration, break the loop
- if not ret or frame_count == num_frames:
- break
-
- # Convert BGR to RGB
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
-
- # Append the frame to the list of frames
- frames.append(frame)
-
- frame_count += 1
-
- # Release the video object
- video.release()
-
- # Convert the list of frames to a numpy array
- frames = np.array(frames)
-
- return frames
-
-def infer(prompt, video_in, denoise_strength):
-
- negative_prompt = "text, watermark, copyright, blurry, nsfw"
-
- video = convert_mp4_to_frames(video_in, duration=3)
- video_resized = [Image.fromarray(frame).resize((1024, 576)) for frame in video]
- video_frames = pipe_xl(prompt, negative_prompt=negative_prompt, video=video_resized, strength=denoise_strength).frames
- video_path = export_to_video(video_frames, output_video_path="xl_result.mp4")
-
- return "xl_result.mp4", gr.Group.update(visible=True)
-
-css = """
-#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
-a {text-decoration-line: underline; font-weight: 600;}
-.animate-spin {
- animation: spin 1s linear infinite;
-}
-
-@keyframes spin {
- from {
- transform: rotate(0deg);
- }
- to {
- transform: rotate(360deg);
- }
-}
-
-#share-btn-container {
- display: flex;
- padding-left: 0.5rem !important;
- padding-right: 0.5rem !important;
- background-color: #000000;
- justify-content: center;
- align-items: center;
- border-radius: 9999px !important;
- max-width: 13rem;
-}
-
-#share-btn-container:hover {
- background-color: #060606;
-}
-
-#share-btn {
- all: initial;
- color: #ffffff;
- font-weight: 600;
- cursor:pointer;
- font-family: 'IBM Plex Sans', sans-serif;
- margin-left: 0.5rem !important;
- padding-top: 0.5rem !important;
- padding-bottom: 0.5rem !important;
- right:0;
-}
-
-#share-btn * {
- all: unset;
-}
-
-#share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
-}
-
-#share-btn-container .wrap {
- display: none !important;
-}
-
-#share-btn-container.hidden {
- display: none!important;
-}
-img[src*='#center'] {
- display: block;
- margin: auto;
-}
-"""
-
-with gr.Blocks(css=css) as demo:
- with gr.Column(elem_id="col-container"):
- gr.Markdown(
- """
- Zeroscope XL
-
- This space is specifically designed for upscaling content made from
- the zeroscope_v2_576w space using vid2vid.
- Remember to use the same prompt that was used to generate the original clip.
- For demo purpose, video length is limited to 3 seconds.
-
-
- [](https://huggingface.co/spaces/fffiloni/zeroscope-XL?duplicate=true)
-
- """
- )
-
- video_in = gr.Video(type="numpy", source="upload")
- prompt_in = gr.Textbox(label="Prompt", placeholder="This must be the same prompt you used for the original clip :)", elem_id="prompt-in")
- denoise_strength = gr.Slider(label="Denoise strength", minimum=0.6, maximum=0.9, step=0.01, value=0.66)
- #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
- submit_btn = gr.Button("Submit")
- video_result = gr.Video(label="Video Output", elem_id="video-output")
-
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
- community_icon = gr.HTML(community_icon_html)
- loading_icon = gr.HTML(loading_icon_html)
- share_button = gr.Button("Share to community", elem_id="share-btn")
-
- submit_btn.click(fn=infer,
- inputs=[prompt_in, video_in, denoise_strength],
- outputs=[video_result, share_group])
-
- share_button.click(None, [], [], _js=share_js)
-
-demo.queue(max_size=12).launch()
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Avanset VCE Player 2.2.3 With Crack BEST.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Avanset VCE Player 2.2.3 With Crack BEST.md
deleted file mode 100644
index 35d8ec5e770be348aa2c8b886c4b4c6ed03e688a..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Avanset VCE Player 2.2.3 With Crack BEST.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Avanset VCE Player 2.2.3 With Crack
Download ✔ https://cinurl.com/2uEXBI
-
-Video vce exam simulator 2.2.3 crack download - THVideos. ... Avanset VCE Exam Simulator Pro 1.1.2 + Crack Full Download Mediafire 00:01:05 · Avanset VCE ... 4d29de3e1b
-
-
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Quick Heal Mobile Security Product Key For Android Crack Free [WORK] 124.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Quick Heal Mobile Security Product Key For Android Crack Free [WORK] 124.md
deleted file mode 100644
index 86ad946a15b2c34d3a2981b36ae90ffd1572751d..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Quick Heal Mobile Security Product Key For Android Crack Free [WORK] 124.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-Title: How to Get Quick Heal Mobile Security Product Key for Android for Free
-
-Article:
-
-```
-If you are looking for a way to protect your Android device from malware, phishing, and other online threats, you might be interested in Quick Heal Mobile Security. This is a comprehensive security app that offers features such as antivirus scanning, anti-theft, call and SMS blocking, privacy protection, and more.
-Quick Heal Mobile Security Product Key For Android Crack Free 124
Download Zip ⭐ https://cinurl.com/2uEXpt
-
-However, to enjoy the full benefits of Quick Heal Mobile Security, you need to activate it with a product key. A product key is a unique code that verifies your purchase and unlocks all the premium features of the app. Normally, you would have to buy a product key from the official website or a trusted retailer.
-
-But what if you don't want to spend money on a product key? Is there a way to get Quick Heal Mobile Security product key for Android for free? The answer is yes, but it comes with some risks and drawbacks. In this article, we will show you how to get Quick Heal Mobile Security product key for Android for free using a crack method.
-
-What is a crack method?
-
-A crack method is a technique that involves modifying or bypassing the software's security mechanisms to use it without paying or registering. In other words, it is a form of software piracy that violates the terms and conditions of the software developer.
-
-
-There are many websites and forums that claim to offer crack methods for various software products, including Quick Heal Mobile Security. These crack methods may involve downloading a modified version of the app, using a key generator program, or applying a patch file to the original app.
-
-How to use a crack method for Quick Heal Mobile Security?
-
-Before you proceed with any crack method, you should be aware of the potential consequences and risks involved. Using a crack method may expose your device to malware infections, legal issues, performance issues, and loss of data. Moreover, you may not receive any updates or technical support from the software developer.
-
-If you still want to use a crack method for Quick Heal Mobile Security, here are the general steps you need to follow:
-
-
-- Uninstall the original app from your device if you have it installed.
-- Find a reliable source that offers a crack method for Quick Heal Mobile Security. Be careful not to download any files from suspicious or unknown websites.
-- Follow the instructions provided by the source to download and install the cracked app or apply the patch file to the original app.
-- Launch the app and enter the product key generated by the key generator program or provided by the source.
-- Enjoy using Quick Heal Mobile Security with all its premium features unlocked.
-
-
-Conclusion
-
-Quick Heal Mobile Security is a great app that can help you protect your Android device from various online threats. However, to use it fully, you need to activate it with a product key. If you don't want to pay for a product key, you can try using a crack method to get one for free. However, this is not recommended as it may cause more harm than good to your device and your privacy. The best way to get Quick Heal Mobile Security product key for Android is to buy it from the official website or a trusted retailer.
-``` d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Tenorshare UltData 7.7.3.0 Keygen [CracksNow] Crack.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Tenorshare UltData 7.7.3.0 Keygen [CracksNow] Crack.md
deleted file mode 100644
index d2fb422dd109ab8bd7870215cf181fde0a742521..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Tenorshare UltData 7.7.3.0 Keygen [CracksNow] Crack.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Tenorshare UltData 7.7.3.0 Keygen [CracksNow] crack
DOWNLOAD > https://cinurl.com/2uEYcL
-
- . . is a flexible, powerful software tool that is designed specifically for the Kodak EASYShare (ES), the EasyShare S, EasyShare M, EasyShare M2, ImageStation(X), IP100, ImageStation(X) IP200, ImageStation(X) IP300, ImageStation(X) IP400, ImageStation(X) IP500, ImageStation(X) IP600, ImageStation(X) IP700, IP 830, . Kodak Imaging Software . . The Kodak EASYShare SDK is a flexible toolkit that facilitates the development of new applications and . . . is a flexible, powerful software tool that is designed specifically for the Kodak EASYShare, Kodak Picture Publisher, Kodak Picture Publisher advanced, Kodak Picture Publisher easy, Kodak Picture Publisher simple, Kodak Picture Publisher cloud, Kodak Picture Publisher easy for Workflow, Kodak Picture Publisher easy for Workflow, Kodak Picture Publisher with s, Kodak Picture Publisher s, Kodak Picture Publisher central, Kodak Picture Publisher subscription, Kodak Picture Publisher subscription a, Kodak Picture Publisher subscription b, Kodak Picture Publisher subscription c, Kodak Picture Publisher subscription d, Kodak Picture Publisher subscription e, Kodak Picture Publisher subscription f, Kodak Picture Publisher subscription g, Kodak Picture Publisher subscription h, Kodak Picture Publisher subscription i, Kodak Picture Publisher subscription j, Kodak Picture Publisher subscription k, Kodak Picture Publisher subscription l, Kodak Picture Publisher subscription m, Kodak Picture Publisher subscription n, Kodak Picture Publisher subscription o, Kodak Picture Publisher subscription p, Kodak Picture Publisher subscription q, Kodak Picture Publisher subscription r, Kodak Picture Publisher subscription s, Kodak Picture Publisher subscription t, Kodak Picture Publisher subscription u, Kodak Picture Publisher subscription v, Kodak Picture Publisher subscription w, Kodak Picture Publisher subscription x, Kodak Picture Publisher subscription y, Kodak Picture Publisher subscription z, Kodak Picture Publisher simple, Kodak Picture Publisher advanced, Kodak Picture Publisher simple with s, Kodak Picture Publisher simple with s and Workflow, Kodak Picture Publisher s, Kodak Picture Publisher s and Workflow, Kodak Picture Publisher Cloud, Kodak Picture Publisher Cloud and Workflow, Kodak Picture Publisher s with Workflow, Kodak Picture Publisher s and Workflow and S, Kodak Picture Publisher s with Workflow and S and S, 4fefd39f24
-
-
-
diff --git a/spaces/syam417/rvc/infer_pack/transforms.py b/spaces/syam417/rvc/infer_pack/transforms.py
deleted file mode 100644
index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000
--- a/spaces/syam417/rvc/infer_pack/transforms.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
-
-
-def unconstrained_rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails="linear",
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == "linear":
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError("{} tails are not implemented.".format(tails))
-
- (
- outputs[inside_interval_mask],
- logabsdet[inside_interval_mask],
- ) = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound,
- right=tail_bound,
- bottom=-tail_bound,
- top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- )
-
- return outputs, logabsdet
-
-
-def rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0.0,
- right=1.0,
- bottom=0.0,
- top=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError("Input to a transform is not within its domain")
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError("Minimal bin width too large for the number of bins")
- if min_bin_height * num_bins > 1.0:
- raise ValueError("Minimal bin height too large for the number of bins")
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- ) + input_heights * (input_delta - input_derivatives)
- b = input_heights * input_derivatives - (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- )
- c = -input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (
- input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
- )
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/syf2023/chatbot/handler.py b/spaces/syf2023/chatbot/handler.py
deleted file mode 100644
index 836c8adf22ae21cd2b79ae375a80bbcf7ca04b59..0000000000000000000000000000000000000000
--- a/spaces/syf2023/chatbot/handler.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from typing import Dict, List, Any
-from transformers import pipeline
-import holidays
-
-
-class EndpointHandler:
- def __init__(self, path=""):
- self.pipeline = pipeline("text-classification", model=path)
- self.holidays = holidays.US()
-
- def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
- """
- data args:
- inputs (:obj: `str`)
- date (:obj: `str`)
- Return:
- A :obj:`list` | `dict`: will be serialized and returned
- """
- # get inputs
- inputs = data.pop("inputs", data)
- # get additional date field
- date = data.pop("date", None)
-
- # check if date exists and if it is a holiday
- if date is not None and date in self.holidays:
- return [{"label": "happy", "score": 1}]
-
- # run normal prediction
- prediction = self.pipeline(inputs)
- return prediction
\ No newline at end of file
diff --git a/spaces/talhaty/Faceswapper/roop/ui.py b/spaces/talhaty/Faceswapper/roop/ui.py
deleted file mode 100644
index ba693dac116bd416b91518734fa550e9dfb95c7b..0000000000000000000000000000000000000000
--- a/spaces/talhaty/Faceswapper/roop/ui.py
+++ /dev/null
@@ -1,231 +0,0 @@
-import os
-import webbrowser
-import customtkinter as ctk
-from typing import Callable, Tuple
-import cv2
-from PIL import Image, ImageOps
-
-import roop.globals
-import roop.metadata
-from roop.face_analyser import get_one_face
-from roop.capturer import get_video_frame, get_video_frame_total
-from roop.predicter import predict_frame
-from roop.processors.frame.core import get_frame_processors_modules
-from roop.utilities import is_image, is_video, resolve_relative_path
-
-ROOT = None
-ROOT_HEIGHT = 700
-ROOT_WIDTH = 600
-
-PREVIEW = None
-PREVIEW_MAX_HEIGHT = 700
-PREVIEW_MAX_WIDTH = 1200
-
-RECENT_DIRECTORY_SOURCE = None
-RECENT_DIRECTORY_TARGET = None
-RECENT_DIRECTORY_OUTPUT = None
-
-preview_label = None
-preview_slider = None
-source_label = None
-target_label = None
-status_label = None
-
-
-def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
- global ROOT, PREVIEW
-
- ROOT = create_root(start, destroy)
- PREVIEW = create_preview(ROOT)
-
- return ROOT
-
-
-def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
- global source_label, target_label, status_label
-
- ctk.deactivate_automatic_dpi_awareness()
- ctk.set_appearance_mode('system')
- ctk.set_default_color_theme(resolve_relative_path('ui.json'))
-
- root = ctk.CTk()
- root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
- root.title(f'{roop.metadata.name} {roop.metadata.version}')
- root.configure()
- root.protocol('WM_DELETE_WINDOW', lambda: destroy())
-
- source_label = ctk.CTkLabel(root, text=None)
- source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
-
- target_label = ctk.CTkLabel(root, text=None)
- target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
-
- source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path())
- source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
-
- target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path())
- target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
-
- keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
- keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
- keep_fps_checkbox.place(relx=0.1, rely=0.6)
-
- keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
- keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
- keep_frames_switch.place(relx=0.1, rely=0.65)
-
- keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio)
- keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get()))
- keep_audio_switch.place(relx=0.6, rely=0.6)
-
- many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
- many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
- many_faces_switch.place(relx=0.6, rely=0.65)
-
- start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start))
- start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05)
-
- stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy())
- stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05)
-
- preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview())
- preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05)
-
- status_label = ctk.CTkLabel(root, text=None, justify='center')
- status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
-
- donate_label = ctk.CTkLabel(root, text='^_^ Donate to project ^_^', justify='center', cursor='hand2')
- donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
- donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
- donate_label.bind('