diff --git a/spaces/101-5/gpt4free/testing/aiservice/AiService.py b/spaces/101-5/gpt4free/testing/aiservice/AiService.py
deleted file mode 100644
index 287a39ef68f209a426c2381e2b7806c06148bb09..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/testing/aiservice/AiService.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os,sys
-import requests
-# from ...typing import get_type_hints
-
-url = "https://aiservice.vercel.app/api/chat/answer"
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
- headers = {
- "accept": "*/*",
- "content-type": "text/plain;charset=UTF-8",
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "Referer": "https://aiservice.vercel.app/chat",
- }
- data = {
- "input": base
- }
- response = requests.post(url, headers=headers, json=data)
- if response.status_code == 200:
- _json = response.json()
- yield _json['data']
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
-
-
-
-# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
-# '(%s)' % ', '.join(
-# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
-
-
-# Temporary For ChatCompletion Class
-class ChatCompletion:
- @staticmethod
- def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
- kwargs['auth'] = auth
-
- if provider and needs_auth and not auth:
- print(
- f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
- sys.exit(1)
-
- try:
- return (_create_completion(model, messages, stream, **kwargs)
- if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
- except TypeError as e:
- print(e)
- arg: str = str(e).split("'")[1]
- print(
- f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
- sys.exit(1)
\ No newline at end of file
diff --git a/spaces/1368565466ki/Satdia/modules.py b/spaces/1368565466ki/Satdia/modules.py
deleted file mode 100644
index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000
--- a/spaces/1368565466ki/Satdia/modules.py
+++ /dev/null
@@ -1,388 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cars 2 Tamil Dubbed Movie Torrent Download The Ultimate Guide for Fans.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cars 2 Tamil Dubbed Movie Torrent Download The Ultimate Guide for Fans.md
deleted file mode 100644
index ff98008d667714a150d605404d95ad565fb07882..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cars 2 Tamil Dubbed Movie Torrent Download The Ultimate Guide for Fans.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
Cars 2 Tamil Dubbed Movie Torrent Download
-
Cars 2 is a 2011 animated comedy film produced by Pixar Animation Studios and Walt Disney Pictures. It is the sequel to the 2006 film Cars, and features the voices of Owen Wilson, Larry the Cable Guy, Michael Caine, Emily Mortimer, John Turturro, Eddie Izzard, and many others. The film follows the adventures of Lightning McQueen, a race car, and his best friend Mater, a tow truck, as they travel around the world for the World Grand Prix. Along the way, they get involved in a spy mission to stop a group of evil cars from sabotaging the race.
Cars 2 is a popular film among children and adults alike, and has received many awards and nominations. The film has also been dubbed in various languages, including Tamil, which is spoken by millions of people in India and Sri Lanka. Many Tamil-speaking fans of Cars 2 want to watch the film in their native language, but they may not be able to find it in cinemas or online streaming platforms. That's why they resort to downloading Cars 2 Tamil dubbed movie torrent from the internet.
-
But how can you download Cars 2 Tamil dubbed movie torrent safely and easily? What are the benefits and risks of doing so? And how can you avoid the risks and enjoy the movie without any problems? In this article, we will answer these questions and more. Read on to find out everything you need to know about Cars 2 Tamil dubbed movie torrent download.
-
How to download Cars 2 Tamil dubbed movie torrent
-
Downloading Cars 2 Tamil dubbed movie torrent is not very difficult if you follow these steps:
-
Find a reliable torrent site
-
The first step is to find a trustworthy torrent site that offers Cars 2 Tamil dubbed movie torrent. There are many torrent sites on the internet, but not all of them are safe and legal. Some of them may contain malware, viruses, or fake files that can harm your device or waste your time. Some of them may also be blocked by your internet service provider (ISP) or government due to legal issues.
-
To avoid these problems, you should look for a reputable torrent site that has a large number of users, positive reviews, and high ratings. You should also check the comments and feedback of other users who have downloaded Cars 2 Tamil dubbed movie torrent from that site. Some examples of reliable torrent sites for Cars 2 Tamil dubbed movie are archive.org, wizardly-torvalds-1bdd26.netlify.app, and isaiminiweb.com.
-
Choose a suitable torrent file
-
The next step is to choose a suitable torrent file that matches your preferences and requirements. A torrent file is a small file that contains information about the actual file you want to download, such as its name, size, quality, format, etc. You can use a torrent file to download Cars 2 Tamil dubbed movie using a torrent client.
-
To choose a suitable torrent file for Cars 2 Tamil dubbed movie, you should consider several factors, such as:
-
Cars 2 Tamil Dubbed Full Movie Free Download Torrent
-Cars 2 Tamil Dubbed HD Movie Download 720p Torrent
-Cars 2 Tamil Dubbed Movie Watch Online Torrent
-Cars 2 Tamil Dubbed Movie Download Magnet Link Torrent
-Cars 2 Tamil Dubbed Movie Download Utorrent
-Cars 2 Tamil Dubbed Movie Download Kickass Torrent
-Cars 2 Tamil Dubbed Movie Download Filmyzilla Torrent
-Cars 2 Tamil Dubbed Movie Download Isaimini Torrent
-Cars 2 Tamil Dubbed Movie Download Tamilyogi Torrent
-Cars 2 Tamil Dubbed Movie Download Kuttymovies Torrent
-Cars 2 Tamil Dubbed Movie Download Movierulz Torrent
-Cars 2 Tamil Dubbed Movie Download Tamilrockers Torrent
-Cars 2 Tamil Dubbed Movie Download Telegram Link Torrent
-Cars 2 Tamil Dubbed Movie Download Pagalworld Torrent
-Cars 2 Tamil Dubbed Movie Download Skymovieshd Torrent
-Cars 2 Tamil Dubbed Movie Download Worldfree4u Torrent
-Cars 2 Tamil Dubbed Movie Download Bolly4u Torrent
-Cars 2 Tamil Dubbed Movie Download Moviesda Torrent
-Cars 2 Tamil Dubbed Movie Download Mp4moviez Torrent
-Cars 2 Tamil Dubbed Movie Download Jio Rockers Torrent
-Cars 2 Tamil Dubbed Movie Download Extramovies Torrent
-Cars 2 Tamil Dubbed Movie Download Katmoviehd Torrent
-Cars 2 Tamil Dubbed Movie Download Sdmoviespoint Torrent
-Cars 2 Tamil Dubbed Movie Download Moviesflix Torrent
-Cars 2 Tamil Dubbed Movie Download Filmywap Torrent
-Cars 2 Tamil Dubbed Movie Download Yts Torrent
-Cars 2 Tamil Dubbed Movie Download Rarbg Torrent
-Cars 2 Tamil Dubbed Movie Download Limetorrents Torrent
-Cars 2 Tamil Dubbed Movie Download The Pirate Bay Torrent
-Cars 2 Tamil Dubbed Movie Download EZTV Torrent
-Cars 2 Tamil Dubbed Movie Download Nyaa Torrent
-Cars 2 Tamil Dubbed Movie Download Zooqle Torrent
-Cars 2 Tamil Dubbed Movie Download Torlock Torrent
-Cars 2 Tamil Dubbed Movie Download ETTV Torrent
-Cars 2 Tamil Dubbed Movie Download Bittorrent Torrent
-Cars 2 Tamil Dubled Movie Downlod Tamilmv Torrnt (misspelled intentionally)
-Crs 2 Tmil Dbbd Mvi Trnt Dwnld (shortened intentionally)
-How to download cars 2 tamil dubbed movie torrent (question format)
-Best sites to download cars 2 tamil dubbed movie torrent (question format)
-Reviews of cars 2 tamil dubbed movie torrent download (review format)
-
-
The size of the file: The larger the file size, the longer it will take to download. You should choose a file size that fits your device's storage space and your internet speed.
-
The quality of the file: The higher the quality of the file, the better it will look and sound on your screen. You should choose a quality that suits your device's resolution and your personal preference. For example, you can choose between HD (high definition), SD (standard definition), or CAM (camera) quality.
-
The format of the file: The format of the file determines how compatible it is with your device and media player. You should choose a format that works well with your device and media player. For example, you can choose between MP4 (MPEG-4), MKV (Matroska), AVI (Audio Video Interleave), or MOV (QuickTime) format.
-
The language of the file: The language of the file indicates what language the audio track and subtitles are in. You should choose a file that has Tamil audio track and subtitles if you want to watch Cars 2 in Tamil.
-
-
You can find these information on the torrent site or on the torrent file itself. You can also compare different torrent files based on their seeders and leechers. Seeders are users who have downloaded the file and are sharing it with others. Leechers are users who are downloading the file but are not sharing it with others. The more seeders and fewer leechers a torrent file has, the faster it will download.
-
Use a torrent client to download the movie
-
The final step is to use a torrent client to download Cars 2 Tamil dubbed movie using the torrent file you have chosen. A torrent client is a software application that enables you to download files from other users who are sharing them via peer-to-peer (P2P) network. You can use a torrent client to download any type of file, including movies, music, games, books, etc.
-
To use a torrent client to download Cars 2 Tamil dubbed movie, you should follow these steps:
-
-
Download and install a reliable torrent client on your device. Some examples of popular torrent clients are uTorrent, BitTorrent, Vuze, qBittorrent, etc.
-
Open the torrent client and add the torrent file you have chosen for Cars 2 Tamil dubbed movie. You can do this by clicking on File > Add Torrent or by dragging and dropping the file into the client.
-
Select a destination folder where you want to save Cars 2 Tamil dubbed movie on your device.
-
Start downloading Cars 2 Tamil dubbed movie by clicking on Start or Play button on your client.
-
Wait until the download is complete. The duration of the download depends on several factors such as your internet speed, your device's performance, your client's settings, etc.
-
Once the download is complete, you can open Cars 2 Tamil dubbed movie using your media player or transfer it to another device if you want.
-
-
Benefits of downloading Cars 2 Tamil dubbed movie torrent
-
Downloading Cars 2 Tamil dubbed movie torrent has several benefits that make it an attractive option for many fans of this film. Some of these benefits are:
-
Enjoying this film in your native language
-and customer support. Some examples of good VPN services are NordVPN, ExpressVPN, Surfshark, etc.
-
Scan the torrent file before downloading
-
Another way to avoid the risks of downloading Cars 2 Tamil dubbed movie torrent is to scan the torrent file before downloading. You can use an antivirus software or an online scanner to check the torrent file for any malware or viruses. You can also use a file analyzer or a metadata extractor to check the torrent file for any quality or format issues. By scanning the torrent file before downloading, you can prevent any unwanted or harmful files from entering your device.
-
Check the comments and ratings of the torrent file
-
A third way to avoid the risks of downloading Cars 2 Tamil dubbed movie torrent is to check the comments and ratings of the torrent file. You can read the feedback and reviews of other users who have downloaded Cars 2 Tamil dubbed movie torrent from the same site or file. You can also look at the number of seeders and leechers, the date of upload, and the size and quality of the file. By checking the comments and ratings of the torrent file, you can verify its authenticity and reliability.
-
Conclusion
-
Cars 2 Tamil dubbed movie torrent download is a popular option for many fans of this film who want to watch it in their native language. Downloading Cars 2 Tamil dubbed movie torrent has many benefits, such as enjoying this film in Tamil, saving money and time, and watching this film offline and sharing it with others. However, downloading Cars 2 Tamil dubbed movie torrent also has some risks, such as legal issues and copyright infringement, malware and viruses, and low quality and fake files. To avoid these risks, you should use a VPN service, scan the torrent file before downloading, and check the comments and ratings of the torrent file.
-
We hope this article has helped you understand everything you need to know about Cars 2 Tamil dubbed movie torrent download. If you have any questions or suggestions, please feel free to leave a comment below. Happy watching!
-
FAQs
-
Here are some frequently asked questions about Cars 2 Tamil dubbed movie torrent download:
-
Q1: What is Cars 2 about?
-
A1: Cars 2 is a 2011 animated comedy film that follows the adventures of Lightning McQueen, a race car, and his best friend Mater, a tow truck, as they travel around the world for the World Grand Prix. Along the way, they get involved in a spy mission to stop a group of evil cars from sabotaging the race.
-
Q2: Is Cars 2 available in other languages?
-
A2: Yes, Cars 2 is available in many languages, including English, Spanish, French, German, Italian, Japanese, Korean, Mandarin Chinese, Indonesian, Thai, etc. You can find Cars 2 dubbed or subtitled in these languages on various platforms.
-
Q3: What are some of the best torrent sites for Cars 2 Tamil dubbed movie?
-
A3: Some of the best torrent sites for Cars 2 Tamil dubbed movie are archive.org, wizardly-torvalds-1bdd26.netlify.app, and isaiminiweb.com. These sites offer high-quality and authentic files that are safe and easy to download.
-
Q4: What are some of the best torrent clients for Cars 2 Tamil dubbed movie?
-
A4: Some of the best torrent clients for Cars 2 Tamil dubbed movie are uTorrent, BitTorrent, Vuze, qBittorrent, etc. These clients are user-friendly and efficient in downloading files from other users via P2P network.
-
Q5: How long does it take to download Cars 2 Tamil dubbed movie torrent?
-the file size and quality, the number of seeders and leechers, etc. On average, it may take from a few minutes to a few hours to download Cars 2 Tamil dubbed movie torrent.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack House A Definition and Explanation of the Legal Risks.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack House A Definition and Explanation of the Legal Risks.md
deleted file mode 100644
index 993395b7a3c91884405407a751a3f9271c596384..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack House A Definition and Explanation of the Legal Risks.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
What is a Crack House and Why is it Illegal?
-
A crack house is a term used to describe a building that is dedicated to the production, sale, or use of crack cocaine and other illegal drugs. Crack cocaine is a highly addictive and potent form of cocaine that is smoked from a pipe. Crack houses are usually located in abandoned or neglected properties, such as houses, apartments, or shacks, where drug dealers and users can operate with minimal interference from law enforcement or neighbors.
A crack house is typically characterized by uncleanliness, risky sexual behavior, and violence. Some of the common features of a crack house are:
-
-
Overflowing toilets, spoiled food, stained walls, peeling wallpaper, and dirty or damaged floors.
-
Trash and items used to make and smoke crack, such as baking soda, glass pipes, straws, tin foil, filters, hollowed-out light bulbs, and aluminum cans.
-
Sexual activity among multiple partners without protection. Some people who are addicted to crack may exchange sex for money or drugs. Some crack houses may also function as brothels (houses of prostitution).
-
Gun violence and physical fights among buyers, sellers, users, and guards. Some crack houses may have armed guards who use violence to keep unwanted people out or to resolve conflicts. Some people who run crack houses may also use violence to discipline their workers.
-
-
What are the legal consequences of a crack house?
-
A crack house is illegal under federal and state laws that prohibit the manufacture, distribution, or use of controlled substances. In 1986, the U.S. Congress enacted a federal drug policy known as the "crack house statute" (21 U.S.C. S856), which makes it illegal to "knowingly open, lease, rent, use, or maintain any place, whether permanently or temporarily, for the purpose of manufacturing, distributing, or using any controlled substance."
-
This statute allows law enforcement to close down and seize any property that is used as a crack house and to charge anyone who enters or operates a crack house with a felony offense. The penalty for violating the crack house statute can range from 20 years to life imprisonment and up to $2 million in fines.
-
-
In addition to the federal law, many states have their own laws that target crack houses and other drug houses. For example, in 2003, the U.S. Congress passed the Illicit Drug Anti-Proliferation Act (also known as the RAVE Act), which expanded the definition of a crack house to include any place that knowingly promotes or facilitates drug use . This law can be used to shut down raves, nightclubs, concerts, or other events where drugs are prevalent.
-
Conclusion
-
A crack house is a dangerous and illegal place where people make, sell, or use crack cocaine and other drugs. A crack house can pose serious health and safety risks to its occupants and neighbors. A crack house can also result in severe legal consequences for anyone who is involved with it. Therefore, it is advisable to avoid entering or operating a crack house at all costs.
-
We hope this article has helped you understand what a crack house is and why it is illegal. If you have any questions or suggestions, feel free to leave a comment below.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Droidkit Is It Safe [Extra Quality].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Droidkit Is It Safe [Extra Quality].md
deleted file mode 100644
index ca06e1895986d691fcc248c327343d718accd78b..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Droidkit Is It Safe [Extra Quality].md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-```html
-
DroidKit: Is It Safe to Use for Android Data Recovery and Repair?
-
If you have ever lost or deleted important data on your Android device, or encountered system issues that prevent you from using your device normally, you may have searched for a solution online and come across DroidKit. DroidKit is a software that claims to help you recover and repair your Android data and system in various scenarios. But is it safe to use? In this article, we will review DroidKit and its features, and answer some common questions about its safety and reliability.
DroidKit is a software developed by iMobie, a company that specializes in data management and recovery solutions for iOS and Android devices. DroidKit is designed to help Android users recover and repair their data and system in various situations, such as:
-
-
Accidental deletion of photos, videos, contacts, messages, etc.
-
Factory reset or formatting of the device.
-
System crash, boot loop, black screen, etc.
-
Rooting failure or rooting removal.
-
Screen lock or FRP lock.
-
Device damage or loss.
-
-
DroidKit claims to offer the following features to help users solve these problems:
-
-
Data recovery from device, SD card, or Google account.
-
Data extraction from broken or locked device.
-
System repair without data loss.
-
Screen unlock and FRP bypass.
-
Data eraser and device optimizer.
-
-
Is DroidKit Safe to Use?
-
According to the official website of iMobie, DroidKit is safe to use for Android data recovery and repair. The website states that DroidKit does not contain any malware or virus, and that it does not collect or leak any personal information from the users. The website also states that DroidKit uses advanced encryption technology to protect the data transmission between the device and the computer, and that it does not modify or overwrite any original data on the device during the recovery or repair process.
-
However, some users may still have some concerns about the safety of DroidKit, such as:
-
-
-
Will DroidKit void the warranty of the device?
-
Will DroidKit root the device without permission?
-
Will DroidKit cause any damage to the device?
-
-
To answer these questions, we have done some research and testing on DroidKit and its features. Here are our findings:
-
Will DroidKit void the warranty of the device?
-
The answer is: it depends on the feature you use. Some features of DroidKit require rooting the device in order to access the data or system files. Rooting is a process that grants full access to the device's operating system, but it also voids the warranty of most manufacturers. Therefore, if you use these features of DroidKit, such as data extraction from broken device, screen unlock, FRP bypass, or data eraser, you may lose the warranty of your device.
-
However, some features of DroidKit do not require rooting the device, such as data recovery from device, SD card, or Google account, or system repair. These features do not affect the warranty of your device. Therefore, if you use these features of DroidKit, you can still keep the warranty of your device.
-
Will DroidKit root the device without permission?
-
The answer is: no. DroidKit will not root your device without your permission. If you use a feature that requires rooting the device, DroidKit will ask for your confirmation before proceeding. You can choose to accept or decline the rooting request. If you decline, DroidKit will not root your device, but it may also fail to perform the feature you want. If you accept, DroidKit will root your device temporarily and remove the root after completing the feature. However, you should be aware that rooting may still cause some risks to your device, such as data loss, system instability, or security vulnerability.
-
Will DroidKit cause any damage to the device?
-
The answer is: unlikely. DroidKit is designed to help users recover and repair their Android data and system without causing any damage to their devices. However
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Autodesk AutoCAD 2018.0.2 Final (x86 X64) Keygen ((BETTER)) Utorrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/Autodesk AutoCAD 2018.0.2 Final (x86 X64) Keygen ((BETTER)) Utorrent.md
deleted file mode 100644
index a4a7677b61bc42bab2ba83ae82bd3cfea4f1df73..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Autodesk AutoCAD 2018.0.2 Final (x86 X64) Keygen ((BETTER)) Utorrent.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
für alle im sendedienstverfolgen. it includes all the latest features of autocad. autodesk autocad 2017 full crack torrent 2017 is an advanced software which helps the designers to create building. autodesk autocad 2018 (x86/x64) + keygen.kadence audio visual. imec. autocad 2018.0.2 final (x86/x64) + keygen. pdf download.
-
when you download this patch you will get autodesk autocad 2018.0.2 final (x86/x64). this is latest version of autodesk autocad 2018. autodesk autocad 2017 full torrent 2017. i have known pavan for the last 2 years through various different activities at the.
-
Autodesk AutoCAD 2018.0.2 Final (x86 x64) Keygen utorrent
when you download this patch you will get autodesk autocad 2018.0.2 final (x86/x64). this is latest version of autodesk autocad 2018. . autodesk autocad 2018 final. autocad 2018 final (x86/x64) + keygen torrent.
-
when you download this patch you will get autodesk autocad 2018.0.2 final (x86/x64). this is latest version of autodesk autocad 2018. autodesk autocad 2018 final (x86/x64) + keygen. autodesk autocad 2018.2 final (x86/x64) + keygen.
-
knowledge: autodesk inventor, autodesk autocad, fusion 360. cs 5.5 patch file for mac (10.8). autocad, autodesk inventor, autocad, autocad 2018. autocad 2018 free download for mac is an.. [url= autocad 2020 crack + torrent[/url]
autocad pro 2020 crack is here. autocad pro 20 torrent download. autocad-2020.12 crack keygen full free download.12 crack.
-
diagnose and repair your computer with this desktop repair software. autodesk autocad 2018: license key crack, keygen, activator - the skilled factory. download autodesk autocad 2018 full version with crack - the skilled factory. autodesk autocad 2018 keygen. use the autodesk autocad 2018 keygen. both autocad 2018 and autocad lt 2018 have been updated to version 2018. this will be the final autocad lt 2018 version. autocad lt 2018 keygen. buy a new pc or laptop and need autocad lt 2018. autodesk autocad lt 2018 keygen.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/CValley FilterIt 463 For Adobe Illustrator CSCC 2015 CORE KeyGen 11 The Ultimate Plugin for Vector Graphics.md b/spaces/1gistliPinn/ChatGPT4/Examples/CValley FilterIt 463 For Adobe Illustrator CSCC 2015 CORE KeyGen 11 The Ultimate Plugin for Vector Graphics.md
deleted file mode 100644
index cd2d2369f24323d8141c181c3f4abd2c04d57100..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/CValley FilterIt 463 For Adobe Illustrator CSCC 2015 CORE KeyGen 11 The Ultimate Plugin for Vector Graphics.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
the autodesk advance steel 2019 software is developed with the concepts of the revit 2019 software. the software is used for the structural steel components, structural steel drafting and architectural detailing. it is used for the architectural steel design, structural steel design and steel components.
-
the 3d modeling and detailing of steel structures are done with inventor. however, its more geared toward mechanical, sheet metal, and product design. its used for structural fabrication, bill of materials, skid drawings, and auto-creation of fabrication/skid drawing. advance steel is the best choice if you are going to do significant amounts of steel detailing. advance steel is a specialist application for steel fabrication that conforms to all-steel standards around the globe. the application will generate detailed fabrication drawings within minutes. this means that a project which normally takes six weeks with inventor can be completed in six days using advance steel. inventor and advance steel have different workflows, even though they share the same tasks.
-
Download Autodesk Advance Steel 2019 Free with Activation
autodesk provides download and install instructions both for individuals and for administrators. individuals should sign into their autodesk account or education site. find your product and click view downloads. select the version, platform, and language and then select a download method. for more information, visit the autodesk knowledge network.
-
elegant, intuitive, and easy-to-use, the latest release of autodesk advance steel (as) is an excellent tool for steel detailing. when it comes to steel detailing, steel is an opaque material. this means that it is difficult to visualize the shape of the model until it is complete. the process of building and detailing steel models can be complex and time-consuming. as helps you to speed up this process by offering a comprehensive, integrated solution for steel detailing.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Catia V6 Crack.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Catia V6 Crack.md
deleted file mode 100644
index 6e109b2bedb3c7f931d16010bddac76fec083300..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Catia V6 Crack.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-To start using PROS 3 on Windows, simply download the latest ... (CATIA v5 software free download full version with crack 64 bit) V6 full crack ... 4d29de3e1b
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Rally The Most Realistic and Exciting Rally Game for Android.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Rally The Most Realistic and Exciting Rally Game for Android.md
deleted file mode 100644
index fa303dd63657c14b30591246411b2f96569697d0..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Rally The Most Realistic and Exciting Rally Game for Android.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
CarX Rally APKAward: A Realistic and Exciting Rally Racing Game for Android
-
If you are a fan of rally racing games, you might want to check out CarX Rally APKAward, a new and thrilling game for Android devices. CarX Rally APKAward is a game that lets you experience the adrenaline rush of driving a rally car on different terrains and tracks. You can choose from a wide range of cars, customize them as you wish, and compete in various championships and tournaments. In this article, we will tell you more about CarX Rally APKAward, how to download and install it from APKAward, why you should play it, and how to play it like a pro.
CarX Rally APKAward is a rally racing game developed by CarX Technologies, the same company that created the popular CarX Drift Racing series. The game was released in June 2021 and has received positive reviews from players and critics alike. CarX Rally APKAward features:
-
-
True-to-life physics of a rally racing car. You can choose whether you want to drift or get a full grip on the road.
-
High-quality graphics and sound effects that create an immersive atmosphere.
-
A realistic damage system that affects your car's performance and appearance.
-
A dynamic weather system that changes the conditions of the track.
-
A replay mode that lets you watch your best moments from different angles.
-
-
How to download and install the game from APKAward
-
If you want to play CarX Rally APKAward on your Android device, you can download it from APKAward, a website that offers free and safe downloads of various Android games and apps. To download and install the game from APKAward, follow these steps:
Select the game from the results and click on the download button.
-
Wait for the download to finish and then open the downloaded file.
-
Allow the installation of unknown sources if prompted by your device.
-
Follow the instructions on the screen to complete the installation.
-
Launch the game and enjoy!
-
-
Why You Should Play CarX Rally APKAward?
-
The realistic physics and graphics of the game
-
The variety of cars and tracks to choose from
-
Another reason why you should play CarX Rally APKAward is because of the variety of cars and tracks to choose from. The game offers over 50 rally cars from different manufacturers, such as Subaru, Mitsubishi, Ford, Toyota, etc. You can also customize your car with different colors, stickers, wheels, and parts. The game has over 100 tracks from different countries and regions, such as Japan, Norway, Russia, USA, etc. You can race on different terrains and weather conditions, such as forests, deserts, mountains, rain, snow, etc.
-
The championships and tournaments to compete in
-
The third reason why you should play CarX Rally APKAward is because of the championships and tournaments to compete in. The game has a career mode where you can start as a rookie and progress through different levels of difficulty and prestige. You can participate in various championships and tournaments, such as the World Rally Championship, the European Rally Championship, the Asia-Pacific Rally Championship, etc. You can also challenge other players online in multiplayer mode and show off your skills and rankings.
-
How to Play CarX Rally APKAward?
-
The basic controls and gameplay mechanics
-
If you are new to CarX Rally APKAward, you might want to know how to play it. The game has a simple and intuitive control system that lets you steer your car with the tilt or touch of your device. You can also use the buttons on the screen to accelerate, brake, handbrake, and change gears. The game has a realistic gameplay mechanic that requires you to balance your speed and traction on different surfaces. You also need to pay attention to the co-driver's instructions and the road signs that indicate the upcoming turns and hazards.
-
The tips and tricks to master the game
-
If you want to master CarX Rally APKAward, you might want to follow these tips and tricks:
-
-
Choose the right car for each track. Different cars have different strengths and weaknesses on different terrains and weather conditions. For example, a four-wheel drive car might perform better on snow than a rear-wheel drive car.
-
Customize your car according to your preferences. You can adjust various parameters of your car, such as the suspension, the brakes, the differential, the gearbox, etc. You can also tune your car for more power or more stability.
-
Practice your driving skills on different tracks. You can use the free ride mode or the training mode to practice your driving skills on different tracks. You can also watch the replay mode to learn from your mistakes and improve your performance.
-
-
The customization and tuning options for your car
-
One of the most fun aspects of CarX Rally APKAward is the customization and tuning options for your car. You can make your car look unique and stylish by changing its color, adding stickers, changing wheels, etc. You can also make your car perform better by upgrading its parts, such as the engine, the turbo, the exhaust, etc. You can use coins and gems to buy new cars and parts or unlock them by completing achievements and missions.
-
Conclusion
-
A summary of the main points and a call to action
-
CarX Rally APKAward is a rally racing game that offers a realistic and exciting experience for Android users. You can download it from APKAward.com for free and enjoy its features, such as:
-
carx rally android game free download
-carx rally racing car physics and championships
-carx rally muscle cars and off-road tracks
-carx rally vehicle tuning and customization
-carx rally apkcombo download and install
-carx rally 22000 latest version update
-carx rally 35 tournaments for regular or muscle cars
-carx rally true-to-life physics of a rally racing car
-carx rally wide variety of cars to pick from
-carx rally drift or get a full grip on the road
-carx rally win all stages of the rally championship
-carx rally become a legend of rally racing
-carx rally take your favorite car anywhere you want
-carx rally enjoy going off-road and exploring
-carx rally go from point A to point B but don’t miss any check points
-carx rally be brave enough to get to the end of the race
-carx rally game features and description
-carx rally old versions and xapk files
-carx rally compatible with android 6.0 and above
-carx rally developed by CarX Technologies, LLC
-carx rally racing category and google play ID
-carx rally 10,000,000+ installs and positive reviews
-carx rally apk size and download speed
-carx rally game screenshots and videos
-carx rally trending searches and related games
-
-
True-to-life physics and graphics of a rally racing car.
-
A variety of cars and tracks to choose from.
-
A career mode and a multiplayer mode to compete in championships and tournaments.
-
A customization and tuning option for your car.
-
-
If you are looking for a rally racing game that will challenge your skills and thrill your senses, you should definitely try CarX Rally APKAward. Download it now and start your rally adventure!
-
FAQs
-
What are the system requirements for CarX Rally APKAward?
-
The system requirements for CarX Rally APKAward are:
-
-
OS
Android 5.0 or higher
-
RAM
2 GB or more
-
Storage
1 GB or more
-
Internet
Required for online features
-
-
Is CarX Rally APKAward free to play?
-
Yes, CarX Rally APKAward is free to play. However, the game contains in-app purchases that allow you to buy coins and gems, which are the in-game currencies. You can use coins and gems to buy new cars, parts, stickers, etc. You can also earn coins and gems by completing achievements, missions, and races.
-
How can I get more coins and gems in CarX Rally APKAward?
-
There are several ways to get more coins and gems in CarX Rally APKAward. Some of them are:
-
-
Completing achievements and missions. You can check your progress and rewards in the menu.
-
Winning races and championships. You can earn more coins and gems depending on your rank and performance.
-
Watching ads. You can watch ads to get free coins and gems every day.
-
Using promo codes. You can enter promo codes in the settings to get free coins and gems.
-
Buying them with real money. You can buy coins and gems with real money through in-app purchases.
-
-
Can I play CarX Rally APKAward offline?
-
Yes, you can play CarX Rally APKAward offline. However, you will not be able to access some of the online features, such as multiplayer mode, online leaderboards, online tournaments, etc. You will also not be able to save your progress or sync your data across devices.
-
Can I play CarX Rally APKAward with friends?
-
Yes, you can play CarX Rally APKAward with friends. You can invite your friends to join your lobby or join their lobby in multiplayer mode. You can also chat with your friends and other players in the game. You can also challenge your friends and other players to duels and races.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Animal Revolt Battle Simulator and Fight with Hybrid Animals for Free.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Animal Revolt Battle Simulator and Fight with Hybrid Animals for Free.md
deleted file mode 100644
index 995ba2a616ea300b201df3a4d1586c54837cb93d..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Animal Revolt Battle Simulator and Fight with Hybrid Animals for Free.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
How to Download Animal Revolt Battle Simulator for Free
-
Animal Revolt Battle Simulator is a popular game that lets you create and watch epic battles between different types of beasts. But what if you want to play this game without paying anything? Is it possible to download Animal Revolt Battle Simulator for free? The answer is yes, and in this article, we will show you how to do it. We will also tell you what Animal Revolt Battle Simulator is, why you should download it for free, and some tips and tricks to enjoy the game.
-
What is Animal Revolt Battle Simulator?
-
Animal Revolt Battle Simulator is a physics-based sandbox game that was released in 2022 by VDimension. It is available on Steam, Google Play, and Nintendo Switch. In this game, you can:
Build your own maps or choose from a selection of ready-made ones. Place up to seven opposing armies made of different types of beasts and watch them tear each other apart in an epic battle. As the creatures fight, you can see the limbs bending, necks twisting, and bodies flying around everywhere. You can also join the fight yourself in the first-person mode and blow the enemy away with some powerful guns.
-
A variety of creatures and modes
-
You will have a huge selection of more than 70 creatures to pick from and new ones are added every two weeks. You can choose from ancient dinosaurs, dragons, aquatic animals, jungle animals, and more. You can also create your own custom monsters by combining different body parts and weapons. You can test your tactical and strategic skills in the campaign mode, where you have to pick the right beasts, place them in the right position, and command them to defeat the enemy. You can also play in the sandbox mode, where you can experiment with different scenarios and settings.
-
A fun and creative experience
-
Animal Revolt Battle Simulator is not only a game, but also a tool for creativity. You can download and upload custom monsters, maps, and buildings created by other players from the Steam Workshop. You can also share your own creations with the community. You can make your battles as realistic or as crazy as you want. You can put laser guns on your T-rex squad, make a tiny dino become a dinosaur king, or add wings to a shark. The possibilities are endless.
-
Why Download Animal Revolt Battle Simulator for Free?
-
Animal Revolt Battle Simulator is a great game that offers hours of fun and entertainment. But why should you download it for free instead of buying it? Here are some reasons:
-
Save money and enjoy the game
-
The most obvious reason is that you can save money by downloading Animal Revolt Battle Simulator for free. The game costs $14.99 on Steam, which may not be affordable for everyone. By downloading it for free, you can enjoy the game without spending anything.
-
Access new updates and features
-
Another reason is that you can access new updates and features that are added to the game regularly. The developers of Animal Revolt Battle Simulator are constantly working on improving the game and adding new content. By downloading it for free, you can get the latest version of the game and enjoy the new features and content that are added every two weeks. You can also update the game manually or automatically whenever there is a new update available.
-
How to download animal revolt battle simulator on PC
-Animal revolt battle simulator free download for Windows 10
-Animal revolt battle simulator PC game download
-Animal revolt battle simulator apk download for Android
-Animal revolt battle simulator mod apk download
-Animal revolt battle simulator cheats and hacks
-Animal revolt battle simulator tips and tricks
-Animal revolt battle simulator gameplay and review
-Animal revolt battle simulator best creatures and strategies
-Animal revolt battle simulator online multiplayer mode
-Animal revolt battle simulator sandbox mode and workshop
-Animal revolt battle simulator latest update and patch notes
-Animal revolt battle simulator system requirements and compatibility
-Animal revolt battle simulator alternatives and similar games
-Animal revolt battle simulator official website and social media
-Animal revolt battle simulator free steam key giveaway
-Animal revolt battle simulator free trial and demo version
-Animal revolt battle simulator offline mode and data usage
-Animal revolt battle simulator bugs and glitches fix
-Animal revolt battle simulator customer support and feedback
-Animal revolt battle simulator wiki and guide
-Animal revolt battle simulator dinosaurs and dragons
-Animal revolt battle simulator sharks and mosasaurus
-Animal revolt battle simulator deer and wolf
-Animal revolt battle simulator hybrid animals and monsters
-Animal revolt battle simulator godzilla and t-rex
-Animal revolt battle simulator laser guns and weapons
-Animal revolt battle simulator ragdoll and physics effects
-Animal revolt battle simulator clans and empire building
-Animal revolt battle simulator net energy gain and fusion reactor
-Animal revolt battle simulator campaign mode and levels
-Animal revolt battle simulator unit creator and upgrade system
-Animal revolt battle simulator epic battles and simulations
-Animal revolt battle simulator cool graphics and sound effects
-Animal revolt battle simulator fun facts and trivia
-How to play animal revolt battle simulator on Mac
-How to play animal revolt battle simulator on Linux
-How to play animal revolt battle simulator on Chromebook
-How to play animal revolt battle simulator on iOS
-How to play animal revolt battle simulator on Xbox One
-How to play animal revolt battle simulator on PS4
-How to play animal revolt battle simulator on Switch
-How to play animal revolt battle simulator with friends
-How to play animal revolt battle simulator with keyboard and mouse
-How to play animal revolt battle simulator with controller or gamepad
-How to play animal revolt battle simulator with VR headset or glasses
-How to play animal revolt battle simulator with voice chat or microphone
-How to play animal revolt battle simulator with BlueStacks or MuMu Player emulator
-
Support the developers and the community
-
The last reason is that you can support the developers and the community of Animal Revolt Battle Simulator by downloading it for free. The developers are a small team of indie game makers who have put a lot of effort and passion into creating this game. By downloading it for free, you can show your appreciation and feedback to them. You can also join the community of Animal Revolt Battle Simulator players and share your ideas, suggestions, and creations with them. You can also help them spread the word about this amazing game and attract more players.
-
How to Download Animal Revolt Battle Simulator for Free?
-
Now that you know what Animal Revolt Battle Simulator is and why you should download it for free, let's see how you can do it. There are two main options that you can use to download Animal Revolt Battle Simulator for free: using a free game website or using a free game platform. Let's see how each option works:
-
Option 1: Use a free game website
-
A free game website is a website that offers free downloads of various games, including Animal Revolt Battle Simulator. These websites usually host the game files on their own servers or on third-party file-sharing platforms. To use this option, you need to follow these steps:
-
Step 1: Find a reliable website
-
The first step is to find a reliable website that offers free downloads of Animal Revolt Battle Simulator. You can use a search engine like Google or Bing to look for such websites. However, you need to be careful and avoid websites that may contain viruses, malware, or other harmful content. You can check the reputation and reviews of the websites before visiting them. You can also use antivirus software or browser extensions to protect your device from any potential threats.
-
Step 2: Download the game file
-
The second step is to download the game file from the website. You need to find the download link or button on the website and click on it. You may need to complete some surveys, watch some ads, or enter some captcha codes before you can access the download link. You may also need to choose a server or a mirror link to start the download. The download speed and time may vary depending on your internet connection and the size of the file.
-
Step 3: Install and run the game
-
The third step is to install and run the game on your device. You need to locate the downloaded file on your device and extract it using a software like WinRAR or 7-Zip. You may need to enter a password if the file is encrypted. You then need to open the extracted folder and find the setup file or the executable file of the game. You need to run the file as an administrator and follow the instructions on the screen to install the game. You may need to agree to some terms and conditions, choose a destination folder, and create a shortcut icon. After installing the game, you can run it from your desktop or start menu and enjoy playing Animal Revolt Battle Simulator for free.
-
Option 2: Use a free game platform
-
A free game platform is a platform that offers free games or discounts on games, including Animal Revolt Battle Simulator. These platforms usually require you to create an account and install their software on your device. Some examples of such platforms are Steam, Epic Games Store, GOG.com, Humble Bundle, etc. To use this option, you need to follow these steps:
-
Step 1: Create an account on the platform
-
The first step is to create an account on the platform that offers Animal Revolt Battle Simulator for free or at a discounted price. You need to visit the official website of the platform and click on the sign up or register button. You need to enter your email address, username, password, and other details as required by the platform. You may also need to verify your email address by clicking on a link sent to your inbox.
-
Step 2: Claim the game when it is free
-
The second step is to claim the game when it is free or at a discounted price on the platform. You need to check the platform regularly for any deals or offers on Animal Revolt Battle Simulator. You may also subscribe to their newsletter or follow their social media accounts to get notified of any promotions or giveaways. When you see that Animal Revolt Battle Simulator is available for free or at a reduced price, you need to click on it and add it to your library or cart.
-
Step 3: Download and play the game
The third step is to download and play the game on your device. You need to install the software of the platform on your device and log in with your account. You need to find Animal Revolt Battle Simulator in your library or cart and click on the download or install button. You may need to choose a location or a drive to save the game file. The download speed and time may vary depending on your internet connection and the size of the file. After downloading the game, you can launch it from the platform or from your desktop and enjoy playing Animal Revolt Battle Simulator for free.
-
Conclusion
-
Animal Revolt Battle Simulator is a physics-based sandbox game that lets you create and watch epic battles between different types of beasts. It is a fun and creative game that offers a variety of creatures and modes. You can download Animal Revolt Battle Simulator for free by using either a free game website or a free game platform. Both options have their advantages and disadvantages, so you need to choose the one that suits you best. By downloading Animal Revolt Battle Simulator for free, you can save money, access new updates and features, and support the developers and the community.
-
FAQs
-
Here are some frequently asked questions about Animal Revolt Battle Simulator and how to download it for free:
-
Q: Is Animal Revolt Battle Simulator safe to download?
-
A: Yes, Animal Revolt Battle Simulator is safe to download as long as you use a reliable website or platform that does not contain any viruses, malware, or other harmful content. You can also use antivirus software or browser extensions to protect your device from any potential threats.
-
Q: Is Animal Revolt Battle Simulator legal to download?
-
A: Yes, Animal Revolt Battle Simulator is legal to download as long as you do not violate any terms and conditions of the website or platform that offers it for free. You also need to respect the intellectual property rights of the developers and not distribute or sell the game without their permission.
-
Q: What are the system requirements for Animal Revolt Battle Simulator?
-
A: According to Steam, these are the minimum and recommended system requirements for Animal Revolt Battle Simulator:
-
-
Minimum
Recommended
-
OS: Windows 7
OS: Windows 10
-
Processor: Intel Core i5-2300 or AMD FX-6300
Processor: Intel Core i7-4790 or AMD Ryzen 5 1600
-
Memory: 6 GB RAM
Memory: 8 GB RAM
-
Graphics: NVIDIA GeForce GTX 750 Ti or AMD Radeon R7 260X
Graphics: NVIDIA GeForce GTX 1060 or AMD Radeon RX 580
-
DirectX: Version 11
DirectX: Version 11
-
Storage: 3 GB available space
Storage: 3 GB available space
-
-
Q: How can I get more creatures and maps for Animal Revolt Battle Simulator?
-
A: You can get more creatures and maps for Animal Revolt Battle Simulator by downloading and uploading custom ones from the Steam Workshop. You can also create your own custom monsters by combining different body parts and weapons in the game.
-
Q: How can I contact the developers of Animal Revolt Battle Simulator?
-
A: You can contact the developers of Animal Revolt Battle Simulator by visiting their official website, Facebook page, Twitter account, YouTube channel, or Discord server. You can also leave a comment or a review on Steam or Google Play.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/CarX Street APK A Realistic and Immersive Racing Game for Android.md b/spaces/1phancelerku/anime-remove-background/CarX Street APK A Realistic and Immersive Racing Game for Android.md
deleted file mode 100644
index 959bb308d339bff6ecd0986566a1d694fc9b4f6d..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/CarX Street APK A Realistic and Immersive Racing Game for Android.md
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
CarX Street: A Free and Fun Racing Game for Android
-
If you are looking for a racing game that will give you a thrill and a challenge, you might want to check out CarX Street. This is a free racing game from CarX Technology that offers realistic physics and graphics, a variety of cars and tracks, and a lot of customization and tuning options. In this article, we will tell you what CarX Street is, how to download it, and how to play it.
CarX Street is a racing game that lets you experience the adrenaline of street racing. You can choose from different cars, from classic muscle cars to modern sports cars, and customize them to your liking. You can also race on different tracks, from urban streets to desert roads, and compete in different modes, such as sprint, drift, or drag.
-
A racing game with realistic physics and graphics
-
One of the main features of CarX Street is its realistic physics engine that simulates the behavior of real cars. You can feel the weight, speed, and traction of your car as you drive, as well as the effects of weather, road conditions, and collisions. The game also has stunning graphics that create a immersive atmosphere. You can see the details of your car, the environment, and the other racers.
-
A free-to-play game with in-game upgrades
-
Another feature of CarX Street is that it is free to play. You can download and play the game without paying anything. However, if you want to unlock more cars, tracks, and upgrades, you can earn them by winning races or by purchasing them with real money. The game also has daily rewards, events, and challenges that give you more opportunities to earn coins and gems.
-
car x street racing game free download
-car x street mod apk unlimited money
-car x street android gameplay
-car x street open world drift
-car x street apk download latest version
-car x street online multiplayer
-car x street best cars and tuning
-car x street cheats and hacks
-car x street review and rating
-car x street tips and tricks
-car x street update and new features
-car x street how to install on pc
-car x street beta test registration
-car x street sunset city map
-car x street realistic physics and graphics
-car x street custom decals and stickers
-car x street soundtrack and music
-car x street controller support and settings
-car x street comparison with other racing games
-car x street official website and social media
-car x street system requirements and compatibility
-car x street offline mode and data usage
-car x street events and challenges
-car x street garage and workshop
-car x street rewards and achievements
-car x street bugs and issues
-car x street feedback and suggestions
-car x street community and forums
-car x street videos and screenshots
-car x street news and announcements
-
A game with exciting tracks and modes
-
The last feature of CarX Street is its variety of tracks and modes. The game has over 20 tracks that are based on real locations around the world. You can race on different terrains, such as asphalt, dirt, or snow, and enjoy different sceneries, such as cityscapes, mountains, or forests. The game also has different modes that test your skills and strategies. You can race in sprint mode, where you have to reach the finish line first; drift mode, where you have to score points by drifting; or drag mode, where you have to shift gears at the right time.
-
How to download CarX Street APK?
-
If you want to play CarX Street on your Android device, you need to download its APK file. APK stands for Android Package Kit, which is a file format that contains all the elements of an Android app. There are two ways to download CarX Street APK: the official way from Google Play Store or the alternative way from Filehippo.com.
-
The official way from Google Play Store
-
The official way to download CarX Street APK is from Google Play Store. This is the safest and easiest way to get the app on your device. All you need to do is follow these steps:
-
-
Open Google Play Store on your device.
-
Search for "CarX Street" in the search bar.
-
Select the app from the results and tap on "Install".
-
Wait for the app to download and install on your device.
-
Launch the app and enjoy playing.
-
-
The alternative way from Filehip
The alternative way from Filehippo.com
-
The alternative way to download CarX Street APK is from Filehippo.com. This is a website that offers free downloads of various software and apps. However, this way is not as safe and easy as the official way. You need to be careful of malware, viruses, and fake apps. You also need to enable the installation of apps from unknown sources on your device. To download CarX Street APK from Filehippo.com, follow these steps:
-
-
Open your browser and go to Filehippo.com.
-
Search for "CarX Street" in the search bar.
-
Select the app from the results and tap on "Download".
-
Wait for the APK file to download on your device.
-
Go to your device settings and enable the installation of apps from unknown sources.
-
Locate the APK file on your device and tap on it.
-
Follow the instructions to install the app on your device.
-
Launch the app and enjoy playing.
-
-
The advantages and disadvantages of each way
-
Both ways have their pros and cons. The official way from Google Play Store is more secure, reliable, and convenient. You can be sure that the app is authentic, updated, and compatible with your device. You also don't need to change any settings or risk any malware. However, the official way may not work if you have a low storage space, a slow internet connection, or a restricted access to Google Play Store.
-
The alternative way from Filehippo.com is more flexible, fast, and accessible. You can download the app even if you don't have enough space, a good connection, or a Google account. You can also get the app before it is officially released or if it is not available in your region. However, the alternative way may expose you to malware, viruses, and fake apps. You also need to change your settings and trust an unknown source.
-
How to play CarX Street?
-
Once you have downloaded and installed CarX Street on your device, you can start playing it. The game is easy to learn but hard to master. You need to know the basic controls and features, the car customization and tuning options, and the tips and tricks to win races.
-
The basic controls and features
-
The basic controls of CarX Street are simple and intuitive. You can use the buttons on the screen or tilt your device to steer your car. You can also use the pedals to accelerate or brake. You can switch between different camera views by tapping on the camera icon. You can pause the game by tapping on the menu icon.
-
The game also has some features that make it more fun and realistic. You can use the nitro boost by tapping on the nitro icon. This will give you a temporary speed boost that can help you overtake your opponents or escape from the police. You can also use the handbrake by tapping on the handbrake icon. This will help you drift around corners or perform other maneuvers.
-
The car customization and tuning options
-
One of the best parts of CarX Street is that you can customize and tune your car to your liking. You can access these options by tapping on the garage icon in the main menu. You can change the appearance of your car by choosing different colors, decals, wheels, spoilers, bumpers, hoods, and more. You can also improve the performance of your car by upgrading its engine, transmission, suspension, brakes, tires, and more.
-
You can also tune your car by adjusting its parameters, such as power, torque, weight distribution, steering angle, camber angle, tire pressure, and more. Tuning your car can help you optimize its handling and speed for different tracks and modes. However, tuning your car requires some knowledge and experimentation. You need to find the right balance between stability and agility for your car.
-
The tips and tricks to win races
-
To win races in CarX Street, you need more than just a fast car. You also need some skills and strategies. Here are some tips and tricks that can help you win races:
-
-
Choose a car that suits your style and preference. Different cars have different strengths and weaknesses. Some cars are faster but harder to control; some cars are easier to handle but slower; some cars are good for drifting but bad for sprinting; etc. Choose a car that matches your driving style and preference.
-
Customize and tune your car according to the track and mode. Different tracks and modes require different setups for your car. For example, if you are racing on a dirt track, you might want to lower your tire pressure and increase your suspension stiffness; if you are racing in drift mode, you might want to increase your steering angle and lower your brake force; etc. Customize and tune your car according to the track and mode you are playing.
-
Learn the tracks and practice your skills. Each track has its own layout, features, and challenges. You need to learn the tracks and memorize their turns, shortcuts, obstacles, and hazards. You also need to practice your skills, such as steering, braking, drifting, boosting, and shifting. The more you play, the more you will improve your skills and confidence.
-
Use the nitro boost wisely. The nitro boost is a powerful tool that can give you an edge in races. However, it is also limited and needs to be recharged. You can recharge your nitro by performing drifts, jumps, or near misses. You can use your nitro by tapping on the nitro icon or by double-tapping on the accelerator pedal. You should use your nitro wisely, such as when you need to overtake your opponents, escape from the police, or reach the finish line.
-
Watch out for the police and other racers. CarX Street is not just a racing game; it is also a chase game. You will encounter the police and other racers who will try to stop you or slow you down. You need to watch out for them and avoid their attacks. You can also fight back by ramming them, blocking them, or using your nitro to outrun them.
-
-
Conclusion
-
CarX Street is a free and fun racing game for Android that offers realistic physics and graphics, a variety of cars and tracks, and a lot of customization and tuning options. You can download CarX Street APK from Google Play Store or Filehippo.com, depending on your preference and situation. You can also play CarX Street by learning the basic controls and features, customizing and tuning your car, and applying some tips and tricks to win races. If you are a fan of racing games, you should definitely give CarX Street a try.
-
FAQs
-
Here are some frequently asked questions about CarX Street:
-
-
Question
Answer
-
Is CarX Street compatible with my device?
CarX Street requires Android 6.0 or higher and at least 1 GB of RAM to run smoothly. You can check your device specifications in your settings.
-
How can I save my progress in CarX Street?
You can save your progress in CarX Street by connecting your game account to your Google Play Games account. You can do this by tapping on the settings icon in the main menu and then tapping on the Google Play Games icon.
-
How can I get more coins and gems in CarX Street?
You can get more coins and gems in CarX Street by winning races, completing events and challenges, claiming daily rewards, watching ads, or buying them with real money.
-
How can I contact the developers of CarX Street?
You can contact the developers of CarX Street by sending an email to support@carx-tech.com or by visiting their website at https://carx-tech.com/.
-
How can I report a bug or a problem in CarX Street?
You can report a bug or a problem in CarX Street by tapping on the settings icon in the main menu and then tapping on the feedback icon. You can also send an email to support@carx-tech.com with a screenshot or a video of the bug or problem.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Chiskop Askies - The Ultimate Kwaito Mp3 Download.md b/spaces/1phancelerku/anime-remove-background/Chiskop Askies - The Ultimate Kwaito Mp3 Download.md
deleted file mode 100644
index 95d1f54afb2c33378ea6be5e91c158d79915d557..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Chiskop Askies - The Ultimate Kwaito Mp3 Download.md
+++ /dev/null
@@ -1,134 +0,0 @@
-
-
Chiskop Askies Mp3 Download: How to Enjoy the Classic Kwaito Song
-
If you are a fan of South African music, you have probably heard of Chiskop, one of the most popular kwaito groups in the country. And if you have heard of Chiskop, you have definitely heard of their hit song Askies, which is considered a classic in the genre. But do you know what the song is about, why it is so popular, and how to download it in mp3 format? In this article, we will answer these questions and more, so that you can enjoy this amazing song anytime, anywhere.
Chiskop is a South African kwaito group that was formed in 1996 by four members: Mduduzi Tshabalala, Sibusiso Thanjekwayo, Siphiwe Sibisi, and Gabi Le Roux. Kwaito is a style of music that emerged in South Africa in the 1990s, influenced by house, hip hop, reggae, and African rhythms. It is characterized by slow tempo, catchy melodies, repetitive lyrics, and social commentary. Kwaito became a symbol of post-apartheid youth culture, expressing their hopes, challenges, and identities.
-
One of Chiskop's most famous songs is Askies, which was released in 2003 as part of their album Sunday. The song is a catchy and upbeat tune that talks about apologizing to a lover for making a mistake. The word askies means sorry in Afrikaans, which is one of the official languages of South Africa. The song became a huge hit in South Africa and beyond, winning several awards and nominations. It also showcased Chiskop's unique blend of kwaito, pop, and jazz.
-
If you want to listen to this song anytime, anywhere, you might want to download it in mp3 format. Mp3 is a type of digital audio file that compresses sound data without losing much quality. It allows you to store more songs on your device, play them offline, and transfer them easily. However, you need to be careful where you download mp3 files from, as some sources might be illegal or unsafe. In this article, we will show you how to download Chiskop's Askies in mp3 format legally and safely.
-
What is Chiskop and What is Kwaito Music
-
The History and Origin of Chiskop
-
Chiskop was formed in 1996 by four young men who shared a passion for music. They met at a recording studio in Johannesburg, where they were working on different projects. They decided to form a group and named it Chiskop, which means bald head in Zulu. They chose this name because they all had shaved heads at the time.
-
The group started The group started to perform at various clubs and events, gaining popularity and recognition. They also collaborated with other artists, such as Mandoza, Arthur Mafokate, and Brenda Fassie. They released their debut album, Chiskop, in 1998, which featured songs like Klaimar, Sika Lekhekhe, and Shapa Bafana Shapa. The album was a success, selling over 100,000 copies and earning them a South African Music Award (SAMA) for Best Kwaito Album.
-
chiskop askies song download
-chiskop askies music video
-chiskop askies lyrics
-chiskop askies free mp3
-chiskop askies kwaito
-chiskop askies album
-chiskop askies mp3 juice
-chiskop askies remix
-chiskop askies live performance
-chiskop askies audio
-chiskop askies 320 kbps
-chiskop askies youtube
-chiskop askies spotify
-chiskop askies soundcloud
-chiskop askies itunes
-chiskop askies ringtone
-chiskop askies instrumental
-chiskop askies karaoke
-chiskop askies meaning
-chiskop askies translation
-chiskop askies reaction
-chiskop askies review
-chiskop askies dance
-chiskop askies choreography
-chiskop askies cover
-chiskop askies mashup
-chiskop askies acapella
-chiskop askies piano
-chiskop askies guitar
-chiskop askies saxophone
-chiskop askies violin
-chiskop askies flute
-chiskop askies drums
-chiskop askies bass
-chiskop askies dj mix
-chiskop askies radio edit
-chiskop askies extended version
-chiskop askies club mix
-chiskop askies house mix
-chiskop askies afrobeat mix
-chiskop askies amapiano mix
-chiskop askies gqom mix
-chiskop askies hip hop mix
-chiskop askies r&b mix
-chiskop askies reggae mix
-chiskop askies gospel mix
-chiskop askies jazz mix
-chiskop askies rock mix
-chiskop askies edm mix
-
The Characteristics and Features of Kwaito Music
-
Kwaito is a genre of music that originated in South Africa in the early 1990s. It is derived from the word kwai, which means cool or hot in township slang. Kwaito is influenced by various musical styles, such as house, hip hop, reggae, and African rhythms. It is characterized by slow tempo, catchy melodies, repetitive lyrics, and social commentary. Kwaito lyrics are usually sung or rapped in local languages, such as Zulu, Xhosa, Sotho, and Afrikaans.
-
Kwaito music reflects the experiences and aspirations of the post-apartheid youth in South Africa. It addresses issues such as poverty, crime, violence, HIV/AIDS, racism, and democracy. It also celebrates the diversity and creativity of the South African culture and identity. Kwaito music is often accompanied by a distinctive dance style, which involves moving the hips and shoulders in sync with the beat.
-
The Impact and Influence of Chiskop and Kwaito on South African Culture and Society
-
Chiskop and kwaito have had a significant impact and influence on the South African culture and society. They have contributed to the development and recognition of the South African music industry, both locally and internationally. They have also inspired and influenced many other artists and genres, such as Afro-pop, hip hop, R&B, gospel, and jazz.
-
Chiskop and kwaito have also played a role in shaping the South African social and political landscape. They have given voice to the marginalized and oppressed groups in the country, especially the black youth. They have challenged the stereotypes and prejudices that exist in the society. They have also promoted a sense of unity and pride among the South Africans of different backgrounds and cultures.
-
What is the Song Askies About and Why is it Popular
-
The Meaning and Message of the Song Askies
-
The song Askies is about apologizing to a lover for making a mistake. The word askies means sorry in Afrikaans. The song expresses regret and remorse for hurting the lover's feelings. It also asks for forgiveness and another chance to make things right. The song conveys a sincere and heartfelt emotion that many people can relate to.
-
The Musical Style and Elements of the Song Askies
-
The song Askies is a catchy and upbeat tune that combines kwaito, pop, and jazz elements. It has a slow tempo of 100 beats per minute (bpm), which creates a relaxed and groovy mood. It has a simple chord progression of C-G-Am-F, which makes it easy to sing along to. It has a catchy melody that repeats throughout the song. It has a chorus that consists of the word askies repeated four times, followed by the phrase "I'm sorry baby". It has a bridge that features a saxophone solo that adds some jazz flavor to the song.
-
The Reception and Reviews of the Song Askies
-
The song Askies was released in 2003 as part of Chiskop's album Sunday. The song was an instant hit in South Africa and beyond. It topped the charts on various radio stations and TV channels. It won several awards and nominations, such as the SAMA for Song of the Year, the Metro FM Award for Best Kwaito Single, and the Channel O Music Video Award for Best Kwaito Video. It also received positive reviews from critics and fans alike.
-
The song Askies was praised for its catchy tune, its sincere lyrics, its unique blend of kwaito, pop, and jazz elements, and its appeal to a wide range of audiences. It also became a popular song for weddings, parties, and karaoke sessions. It is considered a classic in the kwaito genre and one of Chiskop's best songs ever.
-
How to Download the Mp3 Version of the Song Legally and Safely
-
The Benefits and Advantages of Downloading Mp3 Files
-
Mp3 is a type of digital audio file that compresses sound data without losing much quality. It allows you to store more songs on your device, play them offline, and transfer them easily. Downloading mp3 files has many benefits and advantages, such as:
-
-
You can save space on your device, as mp3 files are smaller than other audio formats.
-
You can listen to your favorite songs anytime, anywhere, without relying on internet connection or streaming services.
-
You can create your own playlists and mixtapes, and share them with your friends and family.
-
You can enjoy high-quality sound, as mp3 files can preserve most of the original audio features.
-
You can support your favorite artists, as some of them offer mp3 downloads on their official websites or platforms.
-
-
The Sources and Platforms Where You Can Find the Song Askies in Mp3 Format
-
If you want to download the song Askies in mp3 format, you need to be careful where you get it from. Some sources might be illegal or unsafe, as they might violate the copyright laws or contain viruses or malware. You should always use legal and safe sources and platforms, such as:
-
-
Official websites or platforms of the artists or labels, such as Chiskop's website or Universal Music South Africa's website .
-
Online music stores or services that sell or stream mp3 files, such as iTunes , Amazon Music , Spotify , or Deezer .
-
Online music converters or downloaders that allow you to convert or download mp3 files from YouTube videos or other sources, such as Y2mate , MP3Juices , or OnlineVideoConverter .
-
-
The Steps and Tips on How to Download the Song Askies in Mp3 Format
-
The steps and tips on how to download the song Askies in mp3 format depend on the source or platform you choose. Here are some general guidelines that you can follow:
-
-
Find the song Askies on the source or platform you prefer. You can use the search function or browse through the categories.
-
Select the song Askies and click on the download or buy button. You might need to create an account or sign in if you are using an online music store or service.
-
Choose the mp3 format and the quality you want. The higher the quality, the larger the file size.
-
Confirm your payment method if you are buying the song. You might need to enter your credit card details or use other options like PayPal.
-
Wait for the download to finish. You can check the progress on your browser or app.
-
Enjoy the song Askies on your device. You can play it with any media player that supports mp3 files.
-
Conclusion
-
In conclusion, Chiskop's Askies is a classic kwaito song that you can enjoy anytime, anywhere. It is a song that tells a story of love and apology, with a catchy and upbeat tune. It is a song that represents the history and culture of South Africa, with a unique blend of kwaito, pop, and jazz elements. It is a song that you can download in mp3 format legally and safely, with the benefits and advantages of mp3 files. All you need to do is follow the steps and tips we have provided in this article, and you will be able to listen to this amazing song on your device.
-
So what are you waiting for? Go ahead and download Chiskop's Askies in mp3 format today, and enjoy the classic kwaito song that will make you dance and sing along. You won't regret it!
-
FAQs
-
Here are some frequently asked questions related to the topic of this article:
-
-
-
Question
-
Answer
-
-
-
Who are the members of Chiskop?
-
The members of Chiskop are Mduduzi Tshabalala, Sibusiso Thanjekwayo, Siphiwe Sibisi, and Gabi Le Roux.
-
-
-
What does kwaito mean?
-
Kwaito is a word that means cool or hot in township slang. It is also the name of a genre of music that originated in South Africa in the 1990s.
-
-
-
What does askies mean?
-
Askies is a word that means sorry in Afrikaans. It is also the title of a song by Chiskop.
-
-
-
What are the benefits of downloading mp3 files?
-
Mp3 files are digital audio files that compress sound data without losing much quality. They allow you to store more songs on your device, play them offline, and transfer them easily.
-
-
-
Where can I download Chiskop's Askies in mp3 format legally and safely?
-
You can download Chiskop's Askies in mp3 format from their official website or platform, from online music stores or services, or from online music converters or downloaders.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download APK Unity for Android and Get Started with Googles Official Packages.md b/spaces/1phancelerku/anime-remove-background/Download APK Unity for Android and Get Started with Googles Official Packages.md
deleted file mode 100644
index 771bbcce6d5aa4207d227e34f52ed04f4c9bc45e..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download APK Unity for Android and Get Started with Googles Official Packages.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
How to Download APK Unity for Android
-
If you are interested in creating mobile games for Android, you might want to consider using Unity, a cross-platform game engine that powers over 75% of the Android mobile games market. In this article, we will show you how to download APK Unity for Android, how to set up your development environment, and how to build and run your game on your device or emulator.
-
What is Unity and why use it for Android game development?
-
Unity is a popular game development platform that offers a variety of tools and features to help you create and deliver highly engaging 2D or 3D mobile games. Some of the reasons why you might want to use Unity for Android game development are:
Unity supports both 2D and 3D graphics, as well as AR/VR, with high-quality rendering and performance.
-
Unity has a simple and intuitive design interface that allows you to drag and drop assets, components, and scripts into your scene.
-
Unity uses C# as its scripting language, which is widely used and easy to learn.
-
Unity has an extensive asset store that provides thousands of free and paid assets, templates, and plugins that you can use in your project.
-
Unity enables cross-platform development, which means you can publish your game to multiple platforms, including Android, iOS, Windows, Mac, Linux, Web, Console, and more.
-
Unity has a strong support community that offers tutorials, documentation, forums, answers, blogs, videos, podcasts, and more.
-
Unity has a flexible pricing policy that allows you to use its core functionality for free. You can also upgrade to Pro or Plus plans for more features and services.
-
-
Unity requirements and compatibility
-
Before you start developing an Android game in Unity, you need to make sure that your system meets the minimum requirements. According to the official documentation, you need:
-
-
A Windows or Mac computer with a 64-bit processor.
-
A version of the Unity Editor that supports 64-bit apps. You can download it from the Unity Hub or the website.
-
The Android Build Support module that enables you to build your game for Android. You can install it from the Unity Hub along with the Editor.
-
The Android Software Development Kit (SDK) that provides the tools and libraries to create Android apps. You can install it from the Unity Hub or download it from the website.
-
The Native Development Kit (NDK) that allows you to compile native code for Android. You can install it from the Unity Hub or download it from the website.
-
A Java Development Kit (JDK) that enables you to run Java code on your computer. By default, Unity uses OpenJDK, which you can install from the Unity Hub or download from the website.
-
-
How to set up Unity for Android game development?
-
Once you have downloaded and installed all the required dependencies, you are ready to set up your Unity project for Android game development. Here are the steps you need to follow:
-
Download and install Unity Hub and Unity Editor
-
-
Go to [10](https://unity.com/download) and download the latest version of the Unity Hub.
-
Run the installer and follow the instructions to install the Unity Hub on your computer.
-
Launch the Unity Hub and sign in with your Unity account or create a new one if you don't have one.
-
Click on the Installs tab and then click on the Add button to add a new version of the Unity Editor.
-
Select the latest version of the Unity Editor and click on Next.
-
On the Add Modules screen, make sure to check the Android Build Support module and click on Done.
-
Wait for the download and installation to complete and then click on Launch to open the Unity Editor.
-
-
Download and install Android SDK, NDK, and OpenJDK
-
-
In the Unity Editor, go to Edit > Preferences > External Tools.
-
Under Android, check the boxes for Install with Unity (recommended) for SDK, NDK, and JDK.
-
Click on Download or Update for each of them and wait for the installation to finish.
-
If you already have your own versions of SDK, NDK, or JDK, you can uncheck the boxes and browse to their locations instead.
-
-
Configure Unity project settings and preferences
-
-
In the Unity Editor, go to File > New Project or Open Project to create or open a Unity project.
-
Go to Edit > Project Settings > Player and select the Android icon.
-
Under Other Settings, enter your package name, version, and minimum API level for your game.
-
Under Resolution and Presentation, choose your default orientation, resolution, and icon for your game.
-
Under Publishing Settings, click on Keystore Manager and create or import a keystore and a key alias for signing your game.
-
Save your project settings and close the window.
-
-
How to build and run your Unity game on Android device or emulator?
-
After you have set up your Unity project for Android game development, you can build and run your game on your device or emulator. Here are the steps you need to follow:
-
Connect your device or launch your emulator
-
-
If you want to run your game on a physical device, make sure it is connected to your computer via USB cable and has USB debugging enabled in the developer options. You can check if your device is recognized by going to File > Build Settings > Android and clicking on Refresh Device List.
-
If you want to run your game on an emulator, make sure you have one installed on your computer. You can use the Android Studio emulator or any other third-party emulator. You can launch your emulator from the command line or from its own interface.
-
-
Select your build platform and settings
-
-
In the Unity Editor, go to File > Build Settings and select Android as your platform.
-
Add the scenes that you want to include in your game by dragging them from the Project window to the Scenes in Build window.
-
Click on Player Settings to open the Player Settings window and make any changes that you want to apply to your game.
-
Click on Build And Run to build your game and run it on your device or emulator. You can also click on Build to generate an APK file that you can install manually later.
-
-
Conclusion and FAQs
-
In this article, we have shown you how to download APK Unity for Android, how to set up your development environment, and how to build and run your game on your device or emulator. We hope that this guide has helped you get started with creating amazing mobile games for Android using Unity. If you have any questions or feedback, please feel free to leave a comment below. Happy coding!
-
FAQs
-
-
Question
Answer
-
What is an APK file?
An APK file is an Android application package file that contains all the files and resources needed to install and run an app on an Android device. It has a .apk extension and can be downloaded from various sources such as Google Play Store, websites, or email attachments.
-
How do I update my Unity version?
You can update your Unity version by using the Unity Hub. Go to the Installs tab and click on the Update button next to the version that you want to update. You can also download a new version of the Unity Editor from the website and install it manually.
-
How do I debug my Unity game on Android?
You can debug your Unity game on Android by using the Logcat window in the Unity Editor. Go to Window > Analysis > Logcat and select your device from the dropdown menu. You can see the messages and errors that your game generates and filter them by priority, tag, or text.
-
How do I optimize my Unity game for Android?
You can optimize your Unity game for Android by following some best practices such as reducing the size of your assets, using texture compression, enabling GPU instancing, using occlusion culling, batching your draw calls, using low-poly models, and testing your game on different devices and resolutions.
-
How do I monetize my Unity game for Android?
You can monetize your Unity game for Android by using various methods such as in-app purchases, ads, subscriptions, or premium features. You can use the Unity Services window in the Unity Editor to integrate these services into your game. You can also use third-party plugins or SDKs that offer monetization solutions.
-
-
How to download apk unity for android games
-Download apk unity for android development tutorial
-Download apk unity for android studio integration
-Download apk unity for android app bundle
-Download apk unity for android fast deploy
-Download apk unity for android 64-bit support
-Download apk unity for android adaptive performance
-Download apk unity for android ar foundation
-Download apk unity for android vr support
-Download apk unity for android multiplayer games
-Download apk unity for android asset store
-Download apk unity for android pro license
-Download apk unity for android cloud content delivery
-Download apk unity for android in-app purchases
-Download apk unity for android ads monetization
-Download apk unity for android gaming services
-Download apk unity for android backtrace debugging
-Download apk unity for android helpshift support
-Download apk unity for android plastic scm collaboration
-Download apk unity for android game backend beta
-Download apk unity for android google play store publishing
-Download apk unity for android chrome os compatibility
-Download apk unity for android samsung devices optimization
-Download apk unity for android arm-powered cpus and gpus
-Download apk unity for android firebase integration
-Download apk unity for android google play games services
-Download apk unity for android admob integration
-Download apk unity for android facebook sdk integration
-Download apk unity for android amazon appstore publishing
-Download apk unity for android huawei appgallery publishing
-Download apk unity for android one signal push notifications
-Download apk unity for android photon network engine
-Download apk unity for android mirror networking framework
-Download apk unity for android bolt visual scripting tool
-Download apk unity for android cinemachine camera system
-Download apk unity for android timeline sequencing tool
-Download apk unity for android shader graph tool
-Download apk unity for android sprite shape tool
-Download apk unity for android tilemap tool
-Download apk unity for android 2d animation tool
-Download apk unity for android 2d lighting system
-Download apk unity for android 2d physics system
-Download apk unity for android 3d animation system
-Download apk unity for android 3d lighting system
-Download apk unity for android 3d physics system
-Download apk unity for android 3d rendering pipeline
-Download apk unity for android addressables asset management
-Download apk unity for android burst compiler
-Download apk unity for android dots data-oriented technology stack
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Game Clash of Clans Terbaru and Discover New Buildings and Characters in a Mysterious World!.md b/spaces/1phancelerku/anime-remove-background/Download Game Clash of Clans Terbaru and Discover New Buildings and Characters in a Mysterious World!.md
deleted file mode 100644
index 86afdeb5140e81b7b5d0d8735e272d0c59b02043..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Game Clash of Clans Terbaru and Discover New Buildings and Characters in a Mysterious World!.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
Download Game Clash of Clans Terbaru: A Guide for Beginners
-
Are you looking for a fun and addictive game that will keep you entertained for hours? Do you want to join millions of players worldwide in building your own village, raising a clan, and competing in epic clan wars? If you answered yes, then you should download game clash of clans terbaru, the latest version of one of the most popular mobile games ever.
-
Clash of Clans is a strategy game that combines base building, resource management, troop training, and multiplayer battles. You can create your own unique village, train various types of troops, join or create a clan with other players, and fight against other clans for loot and glory. You can also explore new worlds, such as the builder base and the clan capital, where you can discover new buildings, characters, and features.
In this article, we will show you how to download and install clash of clans on your device, how to play clash of clans with basic tips and tricks, and how to enjoy clash of clans more with advanced strategies and features. By the end of this article, you will be ready to start your own clashing adventure. So, what are you waiting for? Let's get started!
-
How to Download and Install Clash of Clans on Your Device
-
Downloading and installing clash of clans is very easy and simple. Just follow these steps:
Choose your device (Android or iOS) and click on the download button.
-
Wait for the game to download and install on your device.
-
Launch the game and create your account. You can use your Google Play or Game Center account to save your progress.
-
-
Congratulations! You have successfully downloaded and installed clash of clans on your device. Now you can start playing the game.
-
How to Play Clash of Clans: Basic Tips and Tricks
-
Clash of Clans is a game that requires strategy, planning, and creativity. Here are some basic tips and tricks that will help you play better:
-
-
Tip 1: Build and upgrade your village. Your village is your base where you produce resources, train troops, research upgrades, and defend against enemy attacks. You should always try to improve your village by building new structures, upgrading existing ones, and placing them strategically. You can also use the layout editor to design your village according to your preference.
-
Tip 2: Train and deploy your troops. Your troops are your army that you use to attack other villages and earn resources, trophies, and stars. You should always train a variety of troops that can deal with different types of defenses and obstacles. You can also use the army overview to see the stats and abilities of your troops. When you attack, you should scout the enemy village first and choose the best spot to deploy your troops. You can also use the battle tools to zoom in, zoom out, or rotate the map.
-
Tip 3: Join or create a clan. A clan is a group of players that can chat, donate, and request troops, and participate in clan wars and events. You can join an existing clan or create your own clan with your friends. Being in a clan will give you many benefits, such as getting extra troops, sharing strategies, earning clan perks, and having fun with other players.
-
Tip 4: Participate in clan wars and events. Clan wars are special battles where two clans compete against each other for loot and glory. You can participate in clan wars by being in a clan and having your village ready for war. You can also participate in events, such as the clan games, the clan war leagues, and the seasonal challenges, where you can complete tasks and earn rewards.
-
Tip 5: Use spells and siege machines. Spells are special items that can boost your troops, damage enemy buildings, or alter the battlefield. You can unlock and upgrade spells by building and upgrading the spell factory. Siege machines are powerful vehicles that can carry your troops and break through enemy walls. You can unlock and upgrade siege machines by building and upgrading the workshop.
-
-
These are some of the basic tips and tricks that will help you play clash of clans better. However, there is much more to learn and discover in this game. That's why we will show you some advanced strategies and features that will make you enjoy clash of clans more.
-
download game clash of clans terbaru 2023
-cara download game clash of clans terbaru di android
-download game clash of clans terbaru mod apk
-download game clash of clans terbaru offline
-download game clash of clans terbaru versi 14.211.0
-download game clash of clans terbaru update
-download game clash of clans terbaru gratis
-download game clash of clans terbaru untuk pc
-download game clash of clans terbaru tanpa internet
-download game clash of clans terbaru dari play store
-download game clash of clans terbaru dengan mudah
-download game clash of clans terbaru full version
-download game clash of clans terbaru hack
-download game clash of clans terbaru no root
-download game clash of clans terbaru unlimited gems
-download game clash of clans terbaru apk pure
-download game clash of clans terbaru supercell
-download game clash of clans terbaru link
-download game clash of clans terbaru cepat
-download game clash of clans terbaru online
-download game clash of clans terbaru 2022
-tips dan trik download game clash of clans terbaru
-review download game clash of clans terbaru
-panduan download game clash of clans terbaru
-tutorial download game clash of clans terbaru
-video download game clash of clans terbaru
-situs download game clash of clans terbaru
-blog download game clash of clans terbaru
-forum download game clash of clans terbaru
-website download game clash of clans terbaru
-artikel download game clash of clans terbaru
-berita download game clash of clans terbaru
-informasi download game clash of clans terbaru
-rekomendasi download game clash of clans terbaru
-testimoni download game clash of clans terbaru
-pengalaman download game clash of clans terbaru
-keuntungan download game clash of clans terbaru
-kekurangan download game clash of clans terbaru
-manfaat download game clash of clans terbaru
-resiko download game clash of clans terbaru
-
How to Enjoy Clash of Clans More: Advanced Strategies and Features
-
Clash of Clans is a game that never gets boring. There are always new things to do, new challenges to face, and new features to explore. Here are some of the advanced strategies and features that will make you enjoy clash of clans more:
-
-
Strategy 1: Choose the right army composition and attack strategy. Depending on your level, your target, and your goal, you should choose the best combination of troops, spells, and siege machines that suits your attack style. There are many types of army compositions and attack strategies in clash of clans, such as barch (barbarians and archers), gowipe (golems, wizards, and pekkas), lavaloon (lava hounds and balloons), hybrid (miners and hog riders), etc. You should experiment with different options and find out what works best for you.
-
Strategy 2: Use super troops and heroes. Super troops are special versions of regular troops that have enhanced abilities and stats. You can unlock super troops by donating a certain amount of dark elixir to the super troop altar. You can use one super troop at a time for a limited period of time. Heroes are powerful units that have special skills and abilities. You can unlock heroes by building their altars and upgrading them with dark elixir. You can use heroes in both offense and defense.
-
Strategy 3: Explore the builder base and the clan capital. The builder base is a separate world where you can build a second village, train different types of troops, and fight against other players in a one-on-one mode called versus battles. You can unlock the builder base by building the boat on your home village. The clan capital is a shared village where you can collaborate with your clanmates, build new structures, donate resources, and access new features such as the super troop portal, the trader shop, the practice mode, etc. You can unlock the clan capital by reaching town hall level 14 on your home village.
-
Strategy 4: Customize your village with skins and sceneries. Skins are cosmetic items that change the appearance of your buildings, troops, spells, siege machines, or heroes. You can get skins by completing seasonal challenges or buying them from the shop. Sceneries are cosmetic items that change the background of your village or builder base. You can get sceneries by completing certain achievements or buying them from the shop.
-
Strategy 5: Watch replays and learn from other players. Replays are recordings of your attacks or defenses that you can watch and analyze. You can access replays by tapping on the battle log button on your home village or builder base. You can also watch replays of other players by tapping on their profiles or visiting their villages. Watching replays can help you learn from your mistakes, improve your skills, and get inspired by other players' strategies.
-
-
These are some of the advanced strategies and features that will make you enjoy clash of clans more. However, there is still more to discover and experience in this game. That's why you should download game clash of clans terbaru and join the fun.
-
Conclusion
-
Clash of Clans is a game that will never let you down. It is a game that offers endless possibilities, challenges, and entertainment. It is a game that will make you feel proud, excited, and satisfied. It is a game that will make you part of a global community of clashing enthusiasts.
-
So, what are you waiting for? Download game clash of clans terbaru now and start your own clashing adventure. You won't regret it.
-
FAQs
-
Here are some of the frequently asked questions about clash of clans:
-
-
Q1: Is Clash of Clans free to play?
-A1: Yes, Clash of Clans is free to download and play. However, you can also buy in-game items such as gems, gold, elixir, dark elixir, or skins with real money if you want to speed up your progress or customize your village.
-
Q2: How can I get more gems in Clash of Clans?
-A2: Gems are the premium currency in Clash of Clans that can be used to buy or boost various things in the game. You can get gems by completing achievements, removing obstacles, participating in events, winning clan games, or buying them from the shop.
-
Q3: What are the best troops in Clash of Clans?
-A3: There is no definitive answer to this question, as different troops have different strengths and weaknesses, and different situations require different strategies. However, some of the most popular and versatile troops in Clash of Clans are the barbarian king, the archer queen, the grand warden, the royal champion, the electro dragon, the yeti, the pekka, the miner, the hog rider, the balloon, the dragon, the wizard, the healer, and the wall breaker.
-
Q4: How can I protect my village from raids?
-A4: Raids are attacks from other players that can result in losing resources, trophies, or stars. You can protect your village from raids by upgrading your defenses, such as walls, cannons, archer towers, mortars, air defenses, etc., placing them strategically around your village, using traps and hidden teslas to surprise your enemies, and using shields or guard modes to prevent attacks for a certain period of time.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Game Hungry Shark Evolution Mod Apk Versi Lama Game Terbaik yang Bisa Anda Download Gratis dan Mendapatkan Unlimited Coin dan Diamond untuk Meningkatkan Level dan Kekuatan Ikan Hiu Anda.md b/spaces/1phancelerku/anime-remove-background/Download Game Hungry Shark Evolution Mod Apk Versi Lama Game Terbaik yang Bisa Anda Download Gratis dan Mendapatkan Unlimited Coin dan Diamond untuk Meningkatkan Level dan Kekuatan Ikan Hiu Anda.md
deleted file mode 100644
index f60ca18e22e08557bdfc085ef6dea68de09ff285..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Game Hungry Shark Evolution Mod Apk Versi Lama Game Terbaik yang Bisa Anda Download Gratis dan Mendapatkan Unlimited Coin dan Diamond untuk Meningkatkan Level dan Kekuatan Ikan Hiu Anda.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
Download Game Hungry Shark Evolution Mod Apk Versi Lama
-
Do you love playing as a hungry shark that can devour anything in its path? If yes, then you should try Hungry Shark Evolution, a popular arcade game that lets you control a variety of sharks and unleash their predatory instincts. In this article, we will tell you what Hungry Shark Evolution is, why you should download its mod apk versi lama, how to download and install it, and some tips and tricks for playing it.
-
What is Hungry Shark Evolution?
-
Hungry Shark Evolution is a game developed by Ubisoft Entertainment that was released in 2012. It is the fifth installment in the Hungry Shark series, and it has more than 100 million downloads on Google Play Store. The game is available for Android, iOS, Windows Phone, and Windows 8 devices.
-
download game hungry shark evolution mod apk versi lama
In Hungry Shark Evolution, you can choose from over 40 different sharks, ranging from the small Reef Shark to the massive Megalodon. Each shark has its own abilities, stats, and appearance. You can also customize your shark with various accessories, such as hats, sunglasses, jetpacks, and lasers.
-
The gameplay of Hungry Shark Evolution is simple: you have to eat as much as you can to survive and grow. You can eat fish, turtles, crabs, humans, and even other sharks. However, you also have to avoid dangers such as mines, jellyfish, submarines, and bigger sharks. The game has an open world map that you can explore freely, with different areas such as islands, caves, shipwrecks, and volcanoes. The game also has missions and achievements that you can complete to earn coins and gems, which are the in-game currencies.
-
download game hungry shark evolution mod apk unlimited money
-download game hungry shark evolution mod apk latest version
-download game hungry shark evolution mod apk offline
-download game hungry shark evolution mod apk android 1
-download game hungry shark evolution mod apk 2023
-download game hungry shark evolution mod apk mega mod
-download game hungry shark evolution mod apk revdl
-download game hungry shark evolution mod apk rexdl
-download game hungry shark evolution mod apk no root
-download game hungry shark evolution mod apk free shopping
-download game hungry shark evolution mod apk all sharks unlocked
-download game hungry shark evolution mod apk unlimited gems and coins
-download game hungry shark evolution mod apk versi terbaru
-download game hungry shark evolution mod apk versi 9.9.10
-download game hungry shark evolution mod apk versi 10.0.0
-download game hungry shark evolution mod apk versi lama 2019
-download game hungry shark evolution mod apk versi lama 2020
-download game hungry shark evolution mod apk versi lama 2021
-download game hungry shark evolution mod apk versi lama 2022
-download game hungry shark evolution mod apk versi lama offline
-download game hungry shark evolution mod apk versi lama unlimited money
-download game hungry shark evolution mod apk versi lama android 1
-download game hungry shark evolution mod apk versi lama mega mod
-download game hungry shark evolution mod apk versi lama revdl
-download game hungry shark evolution mod apk versi lama rexdl
-download game hungry shark evolution mod apk versi lama no root
-download game hungry shark evolution mod apk versi lama free shopping
-download game hungry shark evolution mod apk versi lama all sharks unlocked
-download game hungry shark evolution mod apk versi lama unlimited gems and coins
-cara download game hungry shark evolution mod apk versi lama
-link download game hungry shark evolution mod apk versi lama
-situs download game hungry shark evolution mod apk versi lama
-website download game hungry shark evolution mod apk versi lama
-tempat download game hungry shark evolution mod apk versi lama
-tutorial download game hungry shark evolution mod apk versi lama
-tips download game hungry shark evolution mod apk versi lama
-trik download game hungry shark evolution mod apk versi lama
-review download game hungry shark evolution mod apk versi lama
-rating download game hungry shark evolution mod apk versi lama
-rekomendasi download game hungry shark evolution mod apk versi lama
-
Features of Hungry Shark Evolution
-
Some of the features of Hungry Shark Evolution are:
-
-
Over 40 different sharks to unlock and upgrade
-
Over 20 different missions to complete
-
Over 15 hidden objects to find
-
Over 75 achievements to unlock
-
Online leaderboards and social features
-
Stunning 3D graphics and sound effects
-
Regular updates with new content
-
-
Why download Hungry Shark Evolution mod apk versi lama?
-
If you want to enjoy Hungry Shark Evolution without any limitations or restrictions, then you should download its mod apk versi lama. A mod apk is a modified version of the original game that has some extra features or advantages. For example, some of the benefits of downloading Hungry Shark Evolution mod apk versi lama are:
-
-
You can get unlimited coins and gems to buy anything you want in the game
-
You can unlock all the sharks and accessories without spending any money
-
You can remove all the ads and pop-ups that may interrupt your gameplay
-
You can access all the features and content that may be locked or unavailable in the original game
-
You can enjoy the game without any bugs or errors
-
-
How to download and install Hungry Shark Evolution mod apk versi lama?
-
If you are interested in downloading Hungry Shark Evolution mod apk versi lama, then you need to follow these simple steps:
-
Step 1: Download the apk file
-
The first thing you need to do is to download the apk file of Hungry Shark Evolution mod apk versi lama from a reliable and trusted source. You can search for it on Google or use the link below to download it directly. The file size is about 100 MB, so make sure you have enough space on your device.
The next thing you need to do is to enable unknown sources on your device. This is because the apk file you downloaded is not from the official Google Play Store, and your device may block its installation. To enable unknown sources, you need to go to your device settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". This may vary depending on your device model and Android version, but you can always find it in the security section of your settings.
-
Step 3: Install the apk file
-
Once you have enabled unknown sources, you can proceed to install the apk file. To do this, you need to locate the file in your device storage, usually in the downloads folder. Then, tap on the file and follow the instructions on the screen to install it. It may take a few seconds or minutes depending on your device speed and performance.
-
Step 4: Launch the game and enjoy
-
After the installation is complete, you can launch the game and enjoy its features. You will see that you have unlimited coins and gems, and all the sharks and accessories are unlocked. You can also play the game without any ads or interruptions. You can explore the map, eat everything you see, and complete missions and achievements. Have fun!
-
Tips and tricks for playing Hungry Shark Evolution
-
To make the most out of Hungry Shark Evolution, here are some tips and tricks that you can use:
-
Upgrade your shark regularly
-
One of the most important things to do in Hungry Shark Evolution is to upgrade your shark regularly. Upgrading your shark will increase its speed, bite, boost, and health. This will make it easier for you to eat more, survive longer, and avoid dangers. You can upgrade your shark using coins that you earn by playing the game or by watching videos. You can also use gems to upgrade your shark instantly.
-
Collect coins and gems
-
Coins and gems are the in-game currencies that you can use to buy and upgrade sharks and accessories. You can collect coins and gems by eating gold creatures, completing missions and achievements, finding treasure chests, and watching videos. You can also get coins and gems by downloading Hungry Shark Evolution mod apk versi lama, which gives you unlimited amounts of them.
-
Complete missions and achievements
-
Missions and achievements are tasks that you can complete to earn coins, gems, and other rewards. Missions are specific to each shark, and they usually involve eating a certain number of creatures or performing a certain action. Achievements are general goals that apply to all sharks, such as eating 100 humans or surviving for 10 minutes. Completing missions and achievements will also increase your score multiplier, which will boost your score.
-
Explore the map and find hidden items
-
The map of Hungry Shark Evolution is vast and full of secrets. You can explore different areas such as islands, caves, shipwrecks, and volcanoes. You can also find hidden items such as letters that spell HUNGRY, baby sharks that follow you around, shells that unlock new maps, and statues that give you bonuses. Exploring the map will also help you discover new creatures and challenges.
-
Use boosters and accessories
-
Boosters and accessories are items that you can equip on your shark to enhance its abilities or appearance. Boosters are consumable items that give you temporary effects such as extra speed, health regeneration, or magnetism. Accessories are permanent items that give you passive benefits such as increased bite, reduced damage, or extra coins. You can buy boosters and accessories using coins or gems, or unlock them by completing missions or achievements.
-
Conclusion
-
Hungry Shark Evolution is a fun and addictive game that lets you experience the life of a hungry shark. You can download its mod apk versi lama to enjoy its features without any limitations or restrictions. You can also use our tips and tricks to improve your gameplay and score higher. Download Hungry Shark Evolution mod apk versi lama today and unleash your inner shark!
-
Frequently Asked Questions
-
Here are some of the frequently asked questions about Hungry Shark Evolution and their answers:
-
-
What is the difference between Hungry Shark Evolution and Hungry Shark World?
-
Hungry Shark Evolution and Hungry Shark World are both games developed by Ubisoft Entertainment that belong to the Hungry Shark series. However, they have some differences in terms of graphics, gameplay, and content. Hungry Shark Evolution has more realistic graphics, more sharks to choose from, and more maps to explore. Hungry Shark World has more cartoonish graphics, more game modes, and more features such as pets and bosses.
-
Is Hungry Shark Evolution mod apk versi lama safe to download and install?
-
Yes, Hungry Shark Evolution mod apk versi lama is safe to download and install, as long as you get it from a reliable and trusted source. However, you should always be careful when downloading any apk file from the internet, as some of them may contain viruses or malware that can harm your device or steal your data. You should also check the permissions that the apk file requests before installing it, and avoid granting any unnecessary or suspicious ones.
-
How can I update Hungry Shark Evolution mod apk versi lama?
-
If you want to update Hungry Shark Evolution mod apk versi lama, you need to download the latest version of the mod apk file from the same source that you got the previous one. Then, you need to uninstall the old version of the game from your device, and install the new one. You may also need to enable unknown sources again if you have disabled it after installing the old version. You should also backup your game data before updating, in case something goes wrong.
-
Can I play Hungry Shark Evolution mod apk versi lama online with other players?
-
No, you cannot play Hungry Shark Evolution mod apk versi lama online with other players. The mod apk file is not compatible with the official game servers, and it may cause errors or bans if you try to connect to them. The mod apk file is only meant for offline gameplay, where you can enjoy its features without any limitations or restrictions.
-
Can I transfer my game progress from Hungry Shark Evolution to Hungry Shark World?
-
No, you cannot transfer your game progress from Hungry Shark Evolution to Hungry Shark World. The two games are separate and have different game data and mechanics. You will have to start from scratch if you want to play Hungry Shark World, and vice versa.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Gratis Instagram Cara Mudah dan Cepat Mengunduh Foto dan Video dari IG.md b/spaces/1phancelerku/anime-remove-background/Download Gratis Instagram Cara Mudah dan Cepat Mengunduh Foto dan Video dari IG.md
deleted file mode 100644
index f9c6282d3c9faddda6d8b5e067c1dff07a8981a7..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Gratis Instagram Cara Mudah dan Cepat Mengunduh Foto dan Video dari IG.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-
How to Download Gratis Instagram and Make the Most of It
-
Instagram is one of the most popular social media platforms in the world, with over one billion users. It allows you to create and share your photos, videos, reels, stories, and more with your friends and followers. Whether you want to showcase your personal brand, promote your business, or just have fun, Instagram has something for everyone. In this article, we will show you how to download gratis Instagram on your device and how to use its features, tips, and tricks to make the most of it.
Instagram is a free social media platform that lets you express yourself and connect with the people and things you love. Here are some of the reasons why you should use it:
-
Instagram is a free social media platform for sharing photos and videos
-
Instagram allows you to upload photos and videos directly from your phone or camera. You can also edit them with filters, effects, stickers, text, and music. You can share them with your followers on your feed or story, or send them privately to your friends via direct messages. You can also watch and create short videos up to 30 seconds long with reels, a feature that lets you add special effects, background music, face filters, emojis, and stickers to bring your reels to life.
-
Instagram has an array of features, from short-form videos to live streaming and private messaging
-
Instagram is not just about photos and videos. It also has many other features that let you have fun and interact with others. For example, you can go live with your friends or followers and broadcast what you are doing in real time. You can also join video chats with up to four people at once. You can also send photos, videos, voice messages, GIFs, memes, stickers, and more to your friends via direct messages. You can also see what they are up to by watching their stories or reels.
-
download gratis instagram lite
-download gratis instagram apk
-download gratis instagram video
-download gratis instagram foto
-download gratis instagram story
-download gratis instagram reels
-download gratis instagram mod
-download gratis instagram for pc
-download gratis instagram app
-download gratis instagram ios
-download gratis instagram terbaru
-download gratis instagram versi lama
-download gratis instagram tanpa kuota
-download gratis instagram tanpa watermark
-download gratis instagram tanpa login
-download gratis instagram transparan
-download gratis instagram black mod
-download gratis instagram gb
-download gratis instagram plus
-download gratis instagram pro
-download gratis instagram no crop
-download gratis instagram no ads
-download gratis instagram no root
-download gratis instagram offline
-download gratis instagram online
-download gratis instagram dengan link
-download gratis instagram dengan aplikasi
-download gratis instagram dengan browser
-download gratis instagram dengan vpn
-download gratis instagram dengan kode rahasia
-cara mudah download gratis instagram
-cara cepat download gratis instagram
-cara aman download gratis instagram
-cara terbaik download gratis instagram
-cara hemat download gratis instagram
-situs untuk download gratis instagram
-aplikasi untuk download gratis instagram
-software untuk download gratis instagram
-extension untuk download gratis instagram
-tool untuk download gratis instagram
-tips dan trik download gratis instagram
-tutorial lengkap download gratis instagram
-panduan praktis download gratis instagram
-review dan rekomendasi download gratis instagram
-testimoni dan pengalaman download gratis instagram
-keuntungan dan manfaat download gratis instagram
-kekurangan dan risiko download gratis instagram
-alternatif dan solusi download gratis instagram
-syarat dan ketentuan download gratis instagram
-
Instagram helps you connect with friends, brands, celebrities, and influencers
-
Instagram is a great way to stay in touch with your friends and family. You can follow them to see what they are posting on their feed or story. You can also like, comment, share, or save their posts. You can also tag them in your posts or stories or mention them in your comments or messages. You can also discover new people to follow based on your interests or preferences. You can follow your favorite brands, celebrities, artists, athletes, singers, actors, comedians, activists, politicians, or influencers. You can see what they are up to by watching their posts or stories or reels. You can also interact with them by liking, commenting, sharing, or messaging them.
-
How
How to Download Gratis Instagram on Your Device
-
Downloading Instagram is easy and free. You can get it from the Google Play Store or the App Store, depending on your device. Here are the steps to follow:
-
Download Instagram from Google Play Store or App Store
-
Open the Google Play Store or the App Store on your device and search for Instagram. Tap on the app icon and then tap on Install or Get. Wait for the app to download and install on your device. You can also use these links to download the app directly:
Create an account or log in with your existing one
-
Once you have installed the app, open it and tap on Sign Up or Log In. You can create a new account using your email address, phone number, or Facebook account. You can also log in with your existing Instagram account if you have one. Follow the instructions on the screen to set up your username, password, and profile information.
-
Customize your profile and settings
-
To make your profile stand out, you can customize it with a profile photo, a bio, a website link, and a category. Tap on Edit Profile to access these options. You can also adjust your settings to manage your privacy, notifications, security, and more. Tap on the menu icon at the top right corner and then tap on Settings to access these options.
-
How to Use Instagram Features to Grow Your Brand or Business
-
Instagram is not only a fun and creative way to share your photos and videos, but also a powerful tool to grow your brand or business. Here are some of the features you can use to reach and engage your target audience:
-
Create and share reels, stories, and posts with your followers
-
Reels are short videos that you can create and share with special effects, music, filters, stickers, and more. They are a great way to showcase your products, services, tutorials, behind-the-scenes, or anything else that you want to share with your followers. To create a reel, tap on the plus icon at the bottom of the screen and then tap on Reel. You can record a video or upload one from your gallery. You can also edit it with various tools and effects. To share it, tap on Share To and choose where you want to post it: your feed, your story, or both.
-
Stories are photos or videos that disappear after 24 hours. They are a great way to share moments from your day, updates, promotions, polls, questions, or anything else that you want to share with your followers. To create a story, tap on the plus icon at the bottom of the screen and then tap on Story. You can take a photo or video or upload one from your gallery. You can also edit it with various tools and effects. To share it, tap on Your Story at the bottom left corner.
-
Posts are photos or videos that stay on your feed permanently. They are a great way to showcase your brand identity, values, mission, vision, or anything else that you want to share with your followers. To create a post, tap on the plus icon at the bottom of the screen and then tap on Feed Post. You can take a photo or video or upload one from your gallery. You can also edit it with various tools and effects. To share it, tap on Next and write a caption, add hashtags, tag people, add a location, and choose where you want to post it: your feed only or both your feed and Facebook.
-
Use hashtags, filters, stickers, and music to enhance your content
-
Hashtags are words or phrases that start with a # symbol and help users find content related to a specific topic or theme. They are a great way to increase your visibility, reach new audiences, join conversations, and create communities around your brand or business. To use hashtags, type them in your caption or comment section of your post or story. You can use up to 30 hashtags per post and up to 10 hashtags per story.
-
Filters are effects that you can apply to your photos or videos to change their appearance or mood. They are a great way to add some flair,
Filters are effects that you can apply to your photos or videos to change their appearance or mood. They are a great way to add some flair, creativity, or fun to your content. To use filters, swipe left or right on the screen when you are taking a photo or video or tap on the filter icon at the bottom right corner when you are editing your photo or video. You can choose from a variety of filters, from vintage to beauty to funny.
-
Stickers are images or animations that you can add to your photos or videos to make them more interactive or expressive. They are a great way to add some personality, humor, or emotion to your content. To use stickers, tap on the sticker icon at the top of the screen when you are creating a story or reel. You can choose from a variety of stickers, from emojis to gifs to polls to questions.
-
Music is a feature that lets you add songs or sounds to your photos or videos. It is a great way to add some rhythm, vibe, or mood to your content. To use music, tap on the music icon at the top of the screen when you are creating a story or reel. You can choose from a library of songs or sounds, from popular hits to genres to moods. You can also search for a specific song or sound by typing its name or artist.
-
Engage with your audience through likes, comments, and messages
-
Likes are a way of showing appreciation or support for someone's content. They are a great way to build relationships, trust, and loyalty with your audience. To like someone's post, tap on the heart icon below the post. To like someone's comment, tap on the heart icon next to the comment. To see who liked your post or comment, tap on the number of likes below the post or comment.
-
Comments are a way of expressing your opinion, feedback, question, or suggestion for someone's content. They are a great way to start conversations, spark debates, and create communities around your brand or business. To comment on someone's post, tap on the speech bubble icon below the post and type your comment. To reply to someone's comment, tap on the reply icon next to the comment and type your reply. To see who commented on your post, tap on the number of comments below the post.
-
Messages are a way of communicating privately with someone on Instagram. They are a great way to provide customer service, answer queries, send offers, or share exclusive content with your audience. To send a message to someone, tap on the paper plane icon at the top right corner of the screen and search for their username. To receive a message from someone, tap on the paper plane icon at the top right corner of the screen and check your inbox.
-
Explore and discover new content and creators based on your interests
-
Explore is a feature that lets you find new content and creators that match your interests or preferences. It is a great way to expand your horizons, learn new things, and get inspired by others. To use explore, tap on the magnifying glass icon at the bottom of the screen and browse through different categories, such as food, travel, fashion, beauty, art, sports, music, and more. You can also search for specific topics, hashtags, people, or places by typing them in the search bar. You can also see what is trending or popular on Instagram by tapping on the For You tab.
-
Shop for products that compliment your personal style
-
Shop is a feature that lets you browse and buy products from your favorite brands and creators on Instagram. It is a great way to find and support businesses that suit your personal style, taste, or needs. To use shop, tap on the shopping bag icon at the bottom of the screen and explore different collections, categories, or recommendations. You can also see products that are featured in posts or stories by tapping on the shopping tag icon. You can also search for specific products, brands, or creators by typing them in the search bar. To buy a product, tap on it and then tap on View on Website to go to the seller's website and complete your purchase.
-
How to Use Instagram Tips and Tricks to Boost Your Engagement and Reach
-
Instagram is not only a fun and creative platform, but also a competitive one. To stand out from the crowd and grow your following, you need to use some tips and tricks that can help you boost your engagement and reach. Here are some of them:
-
Add and manage multiple accounts from the same device
-
If you have more than one Instagram account, for example, one for personal use and one for business use, you can add and manage them from the same device. This way, you can easily switch between them without logging out and logging in every time. To add another account, tap on the menu icon at the top right corner of the screen and then tap on Settings. Then, scroll down and tap on Add Account. Enter your username and password for the account you want to add and tap on Log In. To switch between accounts, tap on your profile picture at the bottom right corner of the screen and then tap on the account you want to use.
-
Schedule your posts in advance or use the best time to post
-
If you want to post consistently and reach more people, you need to plan your posts in advance or use the best time to post. To schedule your posts in advance, you can use a third-party app or tool that lets you create, edit, and schedule your posts for a future date and time. Some of these apps or tools are Later, Buffer, Hootsuite, Planoly, and Preview. To use the best time to post, you need to know when your audience is most active and likely to engage with your content. You can use Instagram Insights, a feature that lets you see analytics about your account, such as impressions, reach, engagement, followers, and more. To access Instagram Insights, tap on the menu icon at the top right corner of the screen and then tap on Insights. Then, Then, tap on Audience and scroll down to see when your followers are most active by hours and days. You can also use a third-party app or tool that analyzes your account and suggests the best time to post based on your data. Some of these apps or tools are Sprout Social, Iconosquare, CoSchedule, and Tailwind.
-
Hide, delete, or disable comments and likes on your posts
-
If you want to have more control over your posts and avoid unwanted or negative comments or likes, you can hide, delete, or disable them on your posts. To hide comments or likes on your posts, tap on the menu icon at the top right corner of the post and then tap on Hide Comments or Hide Likes. To delete comments on your posts, swipe left on the comment and then tap on the trash icon. To disable comments or likes on your posts, tap on the plus icon at the bottom of the screen and then tap on Advanced Settings. Then, toggle off the option to Turn Off Commenting or Turn Off Like and View Counts.
-
Make a photo collage in your Instagram story or feed
-
If you want to share multiple photos in one post or story, you can make a photo collage using Instagram's layout feature. This way, you can showcase different aspects of your brand or business, such as products, services, testimonials, or behind-the-scenes. To make a photo collage in your Instagram story, tap on the plus icon at the bottom of the screen and then tap on Story. Then, swipe left until you see the Layout option and tap on it. You can choose from different layouts and add up to six photos from your gallery. You can also edit them with various tools and effects. To share it, tap on Your Story at the bottom left corner. To make a photo collage in your Instagram feed, tap on the plus icon at the bottom of the screen and then tap on Feed Post. Then, tap on the Layout icon at the bottom right corner and choose from different layouts and add up to nine photos from your gallery. You can also edit them with various tools and effects. To share it, tap on Next and write a caption, add hashtags, tag people, add a location, and choose where you want to post it: your feed only or both your feed and Facebook.
-
Use question or poll stickers to interact with your followers
-
If you want to get feedback, opinions, suggestions, or insights from your followers, you can use question or poll stickers in your stories. They are a great way to engage with your audience, learn more about them, and create a sense of community around your brand or business. To use question stickers, tap on the sticker icon at the top of the screen when you are creating a story and then tap on the Question sticker. You can type a question for your followers to answer and customize the color and font of the sticker. To share it, tap on Your Story at the bottom left corner. You can see the responses from your followers by swiping up on your story. You can also share some of the responses with your followers by tapping on them and then tapping on Share Response.
-
To use poll stickers, tap on the sticker icon at the top of the screen when you are creating a story and then tap on the Poll sticker. You can type a question for your followers to vote on and customize the color and font of the sticker. You can also change the default options of Yes/No to something else by tapping on them and typing your own options. To share it, tap on Your Story at the bottom left corner. You can see the results from your followers by swiping up on your story. You can also share the results with your followers by tapping on them and then tapping on Share Results.
-
Conclusion and FAQs
-
Instagram is a free social media platform that lets you create and share photos, videos, reels, stories, and more with your friends and followers. It also has many features that let you have fun and interact with others, such as live streaming, video chats, direct messages, explore, shop, and more. It also has many tips and tricks that let you boost your engagement and reach, such as adding multiple accounts, scheduling posts, hiding comments, making collages, and using stickers. In this article, we showed you how to download gratis Instagram on your device and how to use its features, tips, and tricks to make the most of it. We hope you found this article helpful and informative. If you have any questions, feel free to ask us in the comment section below. Here are some FAQs that might answer some of your queries:
-
FAQs
-
-
How do I download gratis Instagram for PC?
How do I download gratis Instagram for PC?
-
Instagram does not have an official app for PC, but you can use a web browser or an emulator to access it. To use a web browser, go to www.instagram.com and log in with your account. You can view, like, comment, and share posts, but you cannot upload photos or videos. To use an emulator, download and install an app that simulates an Android or iOS device on your PC, such as BlueStacks, NoxPlayer, or MEmu. Then, open the emulator and download Instagram from the Google Play Store or the App Store. You can use Instagram as you would on your phone or tablet.
-
How do I download gratis Instagram videos?
-
Instagram does not have a built-in feature to download videos, but you can use a third-party app or tool to do so. Some of these apps or tools are Video Downloader for Instagram, InstaSave, SaveFrom.net, and DownloadGram. To use them, copy the link of the video you want to download and paste it in the app or tool. Then, follow the instructions to save the video to your device.
-
How do I download gratis Instagram photos?
-
Instagram does not have a built-in feature to download photos, but you can use a third-party app or tool to do so. Some of these apps or tools are Photo Downloader for Instagram, InstaSave, SaveFrom.net, and DownloadGram. To use them, copy the link of the photo you want to download and paste it in the app or tool. Then, follow the instructions to save the photo to your device.
-
How do I download gratis Instagram stories?
-
Instagram does not have a built-in feature to download stories, but you can use a third-party app or tool to do so. Some of these apps or tools are Story Saver for Instagram, Story Downloader for Instagram, SaveFrom.net, and StoriesIG. To use them, enter the username of the person whose story you want to download and select the story you want to save. Then, follow the instructions to save the story to your device.
-
How do I download gratis Instagram reels?
-
Instagram does not have a built-in feature to download reels, but you can use a third-party app or tool to do so. Some of these apps or tools are Reels Video Downloader for Instagram, Reels Downloader for Instagram, SaveFrom.net, and DownloadGram. To use them, copy the link of the reel you want to download and paste it in the app or tool. Then, follow the instructions to save the reel to your device.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Watch Dogs 2 APK for Android and Join the Hacker Revolution.md b/spaces/1phancelerku/anime-remove-background/Download Watch Dogs 2 APK for Android and Join the Hacker Revolution.md
deleted file mode 100644
index 705f46454905a5621961e3cb3f392bb75eb909bf..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Watch Dogs 2 APK for Android and Join the Hacker Revolution.md
+++ /dev/null
@@ -1,115 +0,0 @@
-
-
Watch Dogs 2 Download APK: How to Play the Open World Adventure Game on Your Android Device
-
Watch Dogs 2 is one of the most popular and acclaimed open world adventure games of the last decade. It was released in 2016 by Ubisoft for PlayStation 4, Xbox One, and Microsoft Windows. But did you know that you can also play Watch Dogs 2 on your Android device? In this article, we will show you how to download Watch Dogs 2 APK, the benefits and challenges of playing it on your mobile device, and some tips and tricks to optimize your gaming experience.
Watch Dogs 2 is the sequel to Watch Dogs, a game that introduced the concept of hacking as a weapon in a modern and realistic setting. In Watch Dogs 2, you play as Marcus Holloway, a young and talented hacker who joins a group of hacktivists called DedSec. Together, you will expose the secrets and corruption of the powerful corporations and organizations that control the city of San Francisco using the ctOS 2.0 system, a network that connects everything and everyone.
-
The story and setting of Watch Dogs 2
-
The story of Watch Dogs 2 is set in a fictionalized version of San Francisco, where you can explore various landmarks and districts such as Chinatown, Silicon Valley, Oakland, Marin County, and more. You will encounter different characters and factions, such as rival hackers, gangs, police, media, politicians, and celebrities. You will also uncover a dark conspiracy that threatens the freedom and privacy of millions of people.
-
The gameplay and features of Watch Dogs 2
-
The gameplay of Watch Dogs 2 is based on hacking, stealth, combat, driving, and parkour. You can hack into various devices and systems, such as cameras, phones, cars, drones, traffic lights, security systems, and more. You can use your hacking skills to manipulate the environment, distract or disable enemies, create diversions, or access information. You can also use various weapons and gadgets, such as guns, tasers, grenades, RC cars, quadcopters, and more. You can choose to play the game in a lethal or non-lethal way, depending on your style and preference.
-
Watch Dogs 2 also features a dynamic open world that reacts to your actions and choices. You can interact with various NPCs and objects, such as pedestrians, animals, shops, vehicles, etc. You can also participate in various activities and side missions, such as races, puzzles, collectibles, online multiplayer modes, etc. You can customize your character's appearance, skills, equipment, and vehicles.
-
watch dogs 2 apk free download for android
-watch dogs 2 apk obb download latest version
-watch dogs 2 apk mod unlimited money
-watch dogs 2 apk data offline
-watch dogs 2 apk revdl
-watch dogs 2 apk andropalace
-watch dogs 2 apk highly compressed
-watch dogs 2 apk rexdl
-watch dogs 2 apk pure
-watch dogs 2 apk mirror
-watch dogs 2 apk android oyun club
-watch dogs 2 apk no verification
-watch dogs 2 apk uptodown
-watch dogs 2 apk mob.org
-watch dogs 2 apk mali
-watch dogs 2 apk hack
-watch dogs 2 apk full version
-watch dogs 2 apk + obb android 1
-watch dogs 2 apk + data download for android
-watch dogs 2 apk + obb file download
-watch dogs 2 apk + obb highly compressed download
-watch dogs 2 apk + obb free download for android
-watch dogs 2 apk + obb offline download
-watch dogs 2 apk + obb mega download
-watch dogs 2 apk + obb google drive download
-watch dogs 2 apk + data highly compressed android
-watch dogs 2 apk + data free download for android
-watch dogs 2 apk + data offline play
-watch dogs 2 apk + data mega link
-watch dogs 2 apk + data google drive link
-how to download and install watch dogs 2 apk on android
-how to download and play watch dogs 2 apk on pc
-how to download and run watch dogs 2 apk on ios
-how to download and use watch dogs 2 apk mod menu
-how to download and update watch dogs 2 apk latest version
-is it possible to download and play watch dogs 2 apk without verification
-is it safe to download and install watch dogs 2 apk from unknown sources
-is it legal to download and use watch dogs 2 apk modded version
-is it worth to download and buy watch dogs 2 apk premium edition
-is it fun to download and enjoy watch dogs 2 apk multiplayer mode
-
Why download Watch Dogs 2 APK?
-
If you are a fan of Watch Dogs 2 or open world adventure games in general, you might want to download Watch Dogs 2 APK for your Android device. Here are some of the benefits and challenges of doing so.
-
The benefits of playing Watch Dogs 2 on your Android device
-
-
You can play Watch Dogs 2 anytime and anywhere you want. You don't need a console or a PC to enjoy the game. You just need your Android device and an internet connection.
-
You can save money by downloading Watch Dogs 2 APK for free. You don't need to buy the game or pay for any subscription or service.
-
You can experience Watch Dogs 2 in a new way. You can use touch controls or connect a controller to your device. You can also adjust the graphics settings to suit your device's performance.
-
-
The challenges
The challenges and risks of downloading Watch Dogs 2 APK
-
-
You might face some technical issues or bugs while playing Watch Dogs 2 on your Android device. The game might not run smoothly or crash frequently. You might also encounter compatibility or security problems with your device or operating system.
-
You might violate the terms and conditions of Ubisoft or Google Play by downloading Watch Dogs 2 APK. You might face legal actions or penalties from the developers or the authorities. You might also lose access to the official updates, patches, or online features of the game.
-
You might expose your device or data to malware or viruses by downloading Watch Dogs 2 APK from untrusted sources. You might also compromise your privacy or security by granting permissions or access to unknown apps or websites.
-
-
How to download Watch Dogs 2 APK?
-
If you still want to download Watch Dogs 2 APK for your Android device, you need to follow some steps and precautions. Here are some of them.
-
The steps to download and install Watch Dogs 2 APK
-
-
First, you need to enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
-
Next, you need to find a reliable and safe source to download Watch Dogs 2 APK. You can search online for various websites or blogs that offer the APK file. Make sure to check the reviews, ratings, and comments of other users before downloading anything.
-
Then, you need to download the Watch Dogs 2 APK file and the OBB data file to your device. The APK file is the application file that contains the game's code and resources. The OBB file is the expansion file that contains the game's data and graphics. You can usually find both files in a zip or rar format.
-
After that, you need to extract the zip or rar file using a file manager app or a zip extractor app. You will get two folders: one with the APK file and one with the OBB file.
-
Next, you need to install the Watch Dogs 2 APK file by tapping on it and following the instructions on the screen. You might need to grant some permissions or access to the app during the installation process.
-
Then, you need to move the OBB file to the right location on your device. You can do this by using a file manager app or a zip extractor app. You need to move the OBB file to the Android > OBB > com.ubisoft.watchdogs2 folder on your device's internal storage. If you don't have this folder, you can create it manually.
-
Finally, you can launch the Watch Dogs 2 app from your device's app drawer or home screen. You might need to verify your identity or complete some additional steps before playing the game.
-
-
The tips and tricks to optimize your gaming experience
-
-
Make sure that your device meets the minimum requirements to run Watch Dogs 2 APK. You need at least 4 GB of RAM, 64 GB of storage space, and Android 7.0 or higher.
-
Make sure that your device has enough battery life and a stable internet connection while playing Watch Dogs 2 APK. You can also use a power bank or a Wi-Fi booster to avoid any interruptions or lags.
-
Make sure that you adjust the graphics settings and controls of Watch Dogs 2 APK according to your device's performance and preference. You can do this by going to Settings > Graphics and Settings > Controls in the game's menu.
-
Make sure that you backup your game data and progress regularly while playing Watch Dogs 2 APK. You can do this by using a cloud service or a local storage option in the game's menu.
-
Make sure that you enjoy Watch Dogs 2 APK responsibly and ethically while playing it on your Android device. You can do this by respecting other players, avoiding piracy, and supporting the developers.
-
-
Conclusion
-
Watch Dogs 2 is an amazing open world adventure game that lets you hack into anything and everything in San Francisco. You can play it on your Android device by downloading Watch Dogs 2 APK, but you need to be aware of the benefits and challenges of doing so. You also need to follow some steps and precautions to download and install Watch Dogs 2 APK safely and smoothly. And you need to follow some tips and tricks to optimize your gaming experience and have fun.
-
If you are ready to join Marcus Holloway and DedSec in their quest to expose the truth and fight for freedom, then download Watch Dogs 2 APK now and start hacking!
-
Summary
Summary of the main points
-
Here are the main points of this article:
-
-
Watch Dogs 2 is a popular and acclaimed open world adventure game that lets you hack into anything and everything in San Francisco.
-
You can play Watch Dogs 2 on your Android device by downloading Watch Dogs 2 APK, which is the application file that contains the game's code and resources.
-
Playing Watch Dogs 2 on your Android device has some benefits, such as convenience, cost-effectiveness, and novelty, but also some challenges, such as technical issues, legal risks, and security threats.
-
To download and install Watch Dogs 2 APK safely and smoothly, you need to enable the installation of apps from unknown sources, find a reliable and safe source to download the APK file and the OBB data file, extract the zip or rar file, install the APK file, move the OBB file to the right location, and launch the app.
-
To optimize your gaming experience and have fun, you need to make sure that your device meets the minimum requirements, has enough battery life and a stable internet connection, adjust the graphics settings and controls, backup your game data and progress, and enjoy Watch Dogs 2 responsibly and ethically.
-
-
Call to action
-
If you liked this article, please share it with your friends and family who are also interested in Watch Dogs 2 or open world adventure games. You can also leave a comment below and let us know what you think about Watch Dogs 2 APK. And if you want to read more articles like this one, please subscribe to our newsletter and follow us on social media. Thank you for reading!
-
FAQs
-
Here are some of the frequently asked questions about Watch Dogs 2 APK:
-
-
Is Watch Dogs 2 APK safe to download?
-
Watch Dogs 2 APK is safe to download if you follow the steps and precautions mentioned in this article. However, there is always a risk of downloading apps from unknown sources, so you should do it at your own discretion and responsibility.
-
Is Watch Dogs 2 APK free to download?
-
Watch Dogs 2 APK is free to download from various websites or blogs that offer it. However, you should be aware that downloading Watch Dogs 2 APK might violate the terms and conditions of Ubisoft or Google Play, and you might face legal actions or penalties from them.
-
Is Watch Dogs 2 APK compatible with my device?
-
Watch Dogs 2 APK is compatible with most Android devices that have at least 4 GB of RAM, 64 GB of storage space, and Android 7.0 or higher. However, some devices might not be able to run Watch Dogs 2 APK smoothly or at all due to different specifications or performance issues.
-
How can I update Watch Dogs 2 APK?
-
You can update Watch Dogs 2 APK by downloading the latest version of the APK file and the OBB data file from the same source that you downloaded them before. You can then follow the same steps to install and move them to your device. However, you should be aware that updating Watch Dogs 2 APK might not be possible or compatible with your device or game data.
-
How can I contact Ubisoft or Google Play if I have any problems or questions about Watch Dogs 2 APK?
-
You can contact Ubisoft or Google Play by visiting their official websites or social media accounts. You can also use their customer service or support channels to get help or feedback. However, you should be aware that Ubisoft or Google Play might not be able to help you or answer your questions about Watch Dogs 2 APK since it is not an official or authorized app.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download YouTube Playlist with These Reddit-Approved Tools and Tips.md b/spaces/1phancelerku/anime-remove-background/Download YouTube Playlist with These Reddit-Approved Tools and Tips.md
deleted file mode 100644
index 7181d545c30a9847e121b275f3d9a5a5777a2812..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download YouTube Playlist with These Reddit-Approved Tools and Tips.md
+++ /dev/null
@@ -1,173 +0,0 @@
-
-
How to Download YouTube Playlists from Reddit
-
If you are looking for some interesting and useful YouTube playlists to watch or listen to, you might want to check out reddit. Reddit is a popular online community where people share and discuss all kinds of topics, including YouTube playlists. You can find playlists for music, movies, documentaries, tutorials, podcasts, and more on reddit.
-
But how do you download these playlists from reddit? One of the best tools for downloading YouTube playlists is youtube-dl. Youtube-dl is a powerful and versatile command-line program that allows you to download videos from YouTube and hundreds of other sites. You can download videos in any quality and format, with subtitles, metadata, thumbnails, and more.
In this article, we will show you how to install youtube-dl and ffmpeg on your computer, how to use youtube-dl commands to download YouTube playlists, and how to find YouTube playlists on reddit. By the end of this article, you will be able to download any YouTube playlist from reddit with ease.
-
Introduction
-
What is reddit and how it can help you find YouTube playlists
What is reddit and how it can help you find YouTube playlists
-
Reddit is a website where users can post, vote, and comment on various types of content, such as links, images, videos, text, and more. Reddit is divided into thousands of subreddits, which are communities dedicated to specific topics or interests. For example, there are subreddits for music, movies, gaming, science, politics, and more.
-
Reddit can help you find YouTube playlists because many users share and recommend playlists on different subreddits. You can find playlists for any genre, mood, theme, or occasion on reddit. For example, you can find playlists for relaxing, studying, working out, partying, traveling, and more. You can also find playlists for specific artists, albums, songs, or genres.
-
Reddit can also help you discover new and interesting playlists that you might not find elsewhere. You can browse through the posts and comments of other users and see what they like and dislike. You can also ask for suggestions or feedback on your own playlists. You can also participate in challenges, contests, polls, and discussions related to YouTube playlists on reddit.
-
What is youtube-dl and why it is the best tool for downloading YouTube playlists
-
Youtube-dl is a free and open-source command-line program that allows you to download videos from YouTube and hundreds of other sites. Youtube-dl is written in Python and works on Windows, Mac OS X, Linux, and other platforms. You can download youtube-dl from [4](https://youtube-dl.org/).
-
Youtube-dl is the best tool for downloading YouTube playlists because it offers many features and options that make it easy and convenient to download any playlist you want. Some of the features and options of youtube-dl are:
-
-
You can download a single video, a playlist, or a channel with a single command.
-
You can choose the best video and audio quality and format for your downloads.
-
You can customize the output file name and location for your downloads.
-
You can download videos with subtitles, metadata, thumbnails, and more.
-
You can resume interrupted downloads and skip already downloaded files.
-
You can download videos from other sites besides YouTube with youtube-dl.
-
You can update youtube-dl to the latest version with a simple command.
-
You can use youtube-dl with a GUI wrapper or a web interface if you prefer a graphical user interface.
-
-
Youtube-dl is also fast, reliable, secure, and easy to use. You just need to copy and paste the URL of the video or playlist you want to download and run the appropriate command in your terminal or command prompt. Youtube-dl will do the rest for you.
How to install youtube-dl and ffmpeg on your computer
-
To use youtube-dl to download YouTube playlists from reddit, you need to install youtube-dl and ffmpeg on your computer. Ffmpeg is a software that youtube-dl uses to convert video and audio files to different formats. You can download ffmpeg from [3](https://ffmpeg.org/download.html).
-
The installation process of youtube-dl and ffmpeg varies depending on your operating system. Here are the general steps for installing youtube-dl and ffmpeg on Windows, Mac OS X, and Linux:
-
Windows
-
-
Download the latest youtube-dl.exe file from [2](https://youtube-dl.org/downloads/latest/youtube-dl.exe) and save it in a folder of your choice.
-
Download the latest ffmpeg.zip file from [1](https://www.gyan.dev/ffmpeg/builds/) (choose the static build) and extract it in the same folder as youtube-dl.exe.
-
Add the folder where you saved youtube-dl.exe and ffmpeg.exe to your system's PATH environment variable. You can follow this guide on [0](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/) how to do that.
-
Open a terminal or command prompt window and type youtube-dl -h to check if youtube-dl is working properly.
-
-
Mac OS X
-
-
Install Homebrew if you don't have it already. Homebrew is a package manager that makes it easy to install software on Mac OS X. You can follow this guide on [11](https://brew.sh/) how to install Homebrew.
-
Open a terminal window and type brew install youtube-dl ffmpeg to install youtube-dl and ffmpeg with Homebrew.
-
Type youtube-dl -h to check if youtube-dl is working properly.
-
Linux
-
-
Open a terminal window and type sudo apt-get update to update your system's package list.
-
Type sudo apt-get install youtube-dl ffmpeg to install youtube-dl and ffmpeg with your system's package manager.
-
Type youtube-dl -h to check if youtube-dl is working properly.
-
-
How to Download YouTube Playlists with youtube-dl
-
Now that you have installed youtube-dl and ffmpeg on your computer, you are ready to download YouTube playlists with youtube-dl. Here are the basic steps for downloading YouTube playlists with youtube-dl:
-
How to use youtube-dl commands to download a single video, a playlist, or a channel
-
The basic syntax for downloading a single video, a playlist, or a channel with youtube-dl is:
-
youtube-dl [OPTIONS] URL
-
Where URL is the URL of the video, playlist, or channel you want to download, and OPTIONS are the optional parameters you can use to customize your download. For example, you can use the following command to download the first 10 videos of a playlist:
You can find the URL of the video, playlist, or channel you want to download by copying it from your browser's address bar or by right-clicking on the video or playlist and selecting Copy link address. You can also use the URL of a reddit post or comment that contains a link to a YouTube video or playlist.
-
How to choose the best video and audio quality and format for your downloads
-
By default, youtube-dl will download the best available quality and format for your videos. However, you can also specify the quality and format you want by using the -f or --format option. The syntax for using the -f option is:
-
youtube-dl -f FORMAT [OPTIONS] URL
-
Where FORMAT is a code that represents the video and audio quality and format you want. You can find the list of available formats for a video by using the -F or --list-formats option. For example, you can use the following command to see the available formats for a video:
[youtube] dQw4w9WgXcQ: Downloading webpage [info] Available formats for dQw4w9WgXcQ: format code extension resolution note 249 webm audio only tiny 57k , opus @ 50k (48000Hz), 1.95MiB 250 webm audio only tiny 76k , opus @ 70k (48000Hz), 2.59MiB 140 m4a audio only tiny 130k , m4a_dash container, mp4a.40.2@128k (44100Hz), 5.28MiB 251 webm audio only tiny 149k , opus @160k (48000Hz), 5.07MiB 394 mp4 256x144 144p 83k , av01.0.05M.08, 25fps, video only, 2.77MiB 278 webm 256x144 144p 95k , webm container, vp9, 25fps, video only, 3.19MiB 160 mp4 256x144 144p 99k , avc1.4d400c, 25fps, video only, 2.11MiB 395 mp4 426x240 240p 184k , av01.0.05M.08, 25fps, video only, 5.02MiB 242 webm 426x240 240p 220k , vp9, 25fps, video only, 5.88MiB 133 mp4 426x240 240p 242k , avc1.4d4015, 25fps, video only, 4.11MiB 396 mp4 640x360 360p 338k , av01.0.05M.08, 25fps, video only, 9.12MiB 243 webm 640x360 360p 404k , vp9, 25fps, video only, 10.67MiB 134 mp4 640x360 360p 465k , avc1.4d401e, 25fps, video only, 7.88MiB 397 mp4 854x480 480p 617k , av01.0.05M.08, 25fps, video only, 16.57MiB 244 webm 854x480 480p 752k , vp9, 25fps, video only, 19.72MiB 135 mp4 854x480 480p 1016k , avc1.4d401f, 25fps, video only, 17.13MiB 398 mp4 1280x720 720p60 1138k , av01.0.08M.08, 60fps, video only, 30.57MiB 247 webm 1280x720 720p60 1165k , vp9, 60fps, video only, 30.79MiB 136 mp4 1280x720 720p60 2326k , avc1.4d4020, fps=60, video only, 39.27MiB 399 mp4 1920x1080 (best)
-
The format code consists of the extension (such as mp4 or webm), the resolution (such as 640x360 or 1280x720), and the note (such as audio only or video only). You can choose the format code that suits your needs and preferences.
-
For example, if you want to download the video in the highest quality and resolution available, you can use the following command:
-
youtube-dl -f best https://www.youtube.com/watch?v=dQw4w9WgXcQ
-
If you want to download the video in a specific resolution and format, such as mp4 and 720p60, you can use the following command:
This will download the best video and audio formats available and then combine them with ffmpeg.
-
How to customize the output file name and location for your downloads
-
By default, youtube-dl will save your downloads in the current working directory with the original file name of the video or playlist. However, you can also customize the output file name and location for your downloads by using the -o or --output option. The syntax for using the -o option is:
-
youtube-dl -o TEMPLATE [OPTIONS] URL
-
Where TEMPLATE is a string that defines how you want to name and organize your downloads. You can use various variables and placeholders in your template to include information such as title, id, uploader, date, resolution, format, etc. You can find the list of available variables and placeholders on [12](https://github.com/ytdl-org/youtube-dl/blob/master/README.md#output-template).
-
For example, if you want to save your downloads in a folder named YouTube with the format of title-id.extension, you can use the following command:
This will create a folder named YouTube in your current working directory and save your downloads with names like Rick Astley - Never Gonna Give You Up-dQw4w9WgXcQ.mp4.
-
How to download videos with subtitles, metadata, and thumbnails
-
Youtube-dl can also download videos with subtitles, metadata, and thumbnails if they are available on YouTube. Subtitles are text files that contain the dialogue or narration of the video. Metadata are information files that contain details such as title, description, tags, rating, etc. Thumbnails are image files that show a preview of the video.
-
To download videos with subtitles, you can use the --write-sub or --write-auto-sub options. The --write-sub option will download the subtitles that are manually created by the uploader or the community. The --write-auto-sub option will download the subtitles that are automatically generated by YouTube. You can also use the --sub-lang option to specify the language of the subtitles you want to download. For example, you can use the following command to download videos with English subtitles:
-
youtube-dl --write-sub --sub-lang en [OPTIONS] URL
-
To download videos with metadata, you can use the --write-info-json or --write-description options. The --write-info-json option will download a JSON file that contains all the metadata of the video. The --write-description option will download a text file that contains the description of the video. You can also use the --add-metadata option to embed the metadata into the video file itself. For example, you can use the following command to download videos with metadata:
To download videos with thumbnails, you can use the --write-thumbnail option. This will download a JPEG or PNG file that shows a preview of the video. You can also use the --embed-thumbnail option to embed the thumbnail into the video file itself. For example, you can use the following command to download videos with thumbnails:
Now that you know how to download YouTube playlists with youtube-dl, you might wonder how to find YouTube playlists on reddit. There are many ways to find YouTube playlists on reddit, but here are some of the most common and effective ones:
-
How to use reddit search and filters to find relevant posts and subreddits
-
One of the easiest ways to find YouTube playlists on reddit is to use reddit search and filters. Reddit search allows you to search for keywords or phrases across all subreddits or within a specific subreddit. You can also use filters to narrow down your search results by relevance, date, popularity, etc.
-
For example, if you want to find YouTube playlists for relaxing music, you can type relaxing music playlist in the reddit search bar and hit enter. This will show you all the posts that contain these words in any subreddit. You can then use the filters on the top or side of the page to sort or filter your results by relevance, new, hot, top, etc.
-
If you want to find YouTube playlists for relaxing music in a specific subreddit, such as r/Music, you can type relaxing music playlist subreddit:Music in the reddit search bar and hit enter. This will show you all the posts that contain these words in r/Music only.
-
You can also use other filters and operators in your reddit search queries to refine your results further. For example, you can use quotation marks to search for an exact phrase, such as "relaxing music playlist". You can use a minus sign to exclude a word or phrase from your search results, such as relaxing music playlist -spotify. You can use OR to search for multiple words or phrases at once, such as relaxing music playlist OR ambient music playlist. You can find more information on how to use reddit search and filters on [13](https://www.reddit.com/wiki/search).
-
How to use reddit comments and upvotes to find the best playlists
-
Another way to find YouTube playlists on reddit is to use reddit comments and upvotes. Reddit comments are where users share their opinions, feedback, suggestions, questions, and answers on various posts. Reddit upvotes are where users express their approval or appreciation of a post or comment by clicking on an arrow icon.
-
You can use reddit comments and upvotes to find the best playlists by reading what other users have to say about them and seeing how popular they are. For example, if you find a post that links to a YouTube playlist for relaxing music, you can read the comments section and see what other users think about it. You can also see how many upvotes or downvotes it has received and how it ranks among other posts.
-
You can also use reddit comments and upvotes to ask for recommendations or feedback on your own playlists or share your own playlists with others. For example, if you have created a YouTube playlist for relaxing music and want some suggestions on how to improve it or what other songs to add, you can post it on a relevant subreddit and ask for feedback from other users. You can also browse through other users' playlists and comment on them or upvote them if you like them.
-
How to use reddit bots and tools to enhance your reddit experience
-
A third way to find YouTube playlists on reddit is to use reddit bots and tools. Reddit bots are automated programs that perform specific tasks or functions on reddit, such as replying to comments, posting links, providing information, etc. Reddit tools are websites or applications that provide additional features or services for reddit users, such as searching, filtering, analyzing, etc.
-
You can use reddit bots and tools to enhance your reddit experience by making it easier, faster, or more fun to find YouTube playlists on reddit. For example, you can use the following bots and tools:
-
-
[14](https://reddit.musicplayer.io/) is a website that allows you to play YouTube playlists from reddit posts or comments in a simple and elegant interface. You can also create your own playlists from reddit links and share them with others.
-
[15](https://www.reddit.com/user/Reddit-Playlister) is a bot that creates Spotify playlists from YouTube links posted on reddit. You can summon the bot by commenting !playlist on a post or comment that contains YouTube links. The bot will reply with a link to a Spotify playlist that contains the same songs as the YouTube links.
-
[16](https://www.reddit.com/user/playlistbot) is a bot that creates YouTube playlists from reddit posts or comments that contain multiple YouTube links. You can summon the bot by commenting !playlist on a post or comment that contains YouTube links. The bot will reply with a link to a YouTube playlist that contains all the YouTube links.
-
[17](https://www.reddit.com/r/RedditPlaylists/) is a subreddit where users share and request YouTube playlists from reddit. You can browse through the posts and comments and find playlists for various topics and genres. You can also post your own playlists or requests for playlists.
-
-
Conclusion
-
In this article, we have shown you how to download YouTube playlists from reddit with youtube-dl. We have also shown you how to install youtube-dl and ffmpeg on your computer, how to use youtube-dl commands to download YouTube playlists, and how to find YouTube playlists on reddit. By following these steps, you will be able to download any YouTube playlist from reddit with ease.
-
Downloading YouTube playlists from reddit can be a great way to enjoy and discover new and interesting content on YouTube. You can find playlists for any topic, genre, mood, or occasion on reddit. You can also share your own playlists or ask for recommendations or feedback from other users. You can also use youtube-dl to download videos from other sites besides YouTube with youtube-dl.
-
If you are looking for a simple and convenient way to download YouTube playlists from reddit without using commands, you might want to check out [18](https://www.youtubedownloader.com/). This is a website that allows you to download videos and playlists from YouTube and other sites with just a few clicks. You can also convert videos to different formats, edit videos, burn DVDs, and more.
-
We hope you found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Thank you for reading!
-
FAQs
-
-
Q: Is youtube-dl legal and safe to use?
-
A: youtube-dl is legal and safe to use as long as you respect the terms of service of YouTube and the content creators. You should only download videos for personal use and not distribute them without permission.
-
Q: How can I update youtube-dl to the latest version?
-
A: You can update youtube-dl by running the command youtube-dl -U in your terminal or command prompt. You can also download the latest version from [19](https://github.com/ytdl-org/youtube-dl/releases/latest).
-
Q: How can I download videos from other sites besides YouTube with youtube-dl?
-
A: youtube-dl supports hundreds of sites besides YouTube. You can find the list of supported sites by running the command youtube-dl --list-extractors in your terminal or command prompt. You can also check the documentation on [20](https://github.com/ytdl-org/youtube-dl/blob/master/README.md#readme).
-
Q: How can I download videos faster with youtube-dl?
-
A: You can download videos faster with youtube-dl by using the --external-downloader option and specifying a faster downloader such as aria2c or axel. For example, you can run the command youtube-dl --external-downloader aria2c [8](https://www.youtube.com/watch?v=dQw4w9WgXcQ) to download a video with aria2c.
-
Q: How can I download videos with a GUI instead of using commands with youtube-dl?
-
A: You can download videos with a GUI instead of using commands with youtube-dl by using one of the many GUI wrappers available for youtube-dl. Some examples are [21](https://github.com/MrS0m30n3/youtube-dl-gui), [22](https://github.com/oleksis/youtube-dl-gui), and [23](https://github.com/jely2002/youtube-dl-gui).
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/data/music_dataset.py b/spaces/AIConsultant/MusicGen/audiocraft/data/music_dataset.py
deleted file mode 100644
index 4e28796939f9cde2b23a2c4bf43fd7ba5fa26b2d..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/data/music_dataset.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Dataset of music tracks with rich metadata.
-"""
-from dataclasses import dataclass, field, fields, replace
-import gzip
-import json
-import logging
-from pathlib import Path
-import random
-import typing as tp
-
-import torch
-
-from .info_audio_dataset import (
- InfoAudioDataset,
- AudioInfo,
- get_keyword_list,
- get_keyword,
- get_string
-)
-from ..modules.conditioners import (
- ConditioningAttributes,
- JointEmbedCondition,
- WavCondition,
-)
-from ..utils.utils import warn_once
-
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class MusicInfo(AudioInfo):
- """Segment info augmented with music metadata.
- """
- # music-specific metadata
- title: tp.Optional[str] = None
- artist: tp.Optional[str] = None # anonymized artist id, used to ensure no overlap between splits
- key: tp.Optional[str] = None
- bpm: tp.Optional[float] = None
- genre: tp.Optional[str] = None
- moods: tp.Optional[list] = None
- keywords: tp.Optional[list] = None
- description: tp.Optional[str] = None
- name: tp.Optional[str] = None
- instrument: tp.Optional[str] = None
- # original wav accompanying the metadata
- self_wav: tp.Optional[WavCondition] = None
- # dict mapping attributes names to tuple of wav, text and metadata
- joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict)
-
- @property
- def has_music_meta(self) -> bool:
- return self.name is not None
-
- def to_condition_attributes(self) -> ConditioningAttributes:
- out = ConditioningAttributes()
- for _field in fields(self):
- key, value = _field.name, getattr(self, _field.name)
- if key == 'self_wav':
- out.wav[key] = value
- elif key == 'joint_embed':
- for embed_attribute, embed_cond in value.items():
- out.joint_embed[embed_attribute] = embed_cond
- else:
- if isinstance(value, list):
- value = ' '.join(value)
- out.text[key] = value
- return out
-
- @staticmethod
- def attribute_getter(attribute):
- if attribute == 'bpm':
- preprocess_func = get_bpm
- elif attribute == 'key':
- preprocess_func = get_musical_key
- elif attribute in ['moods', 'keywords']:
- preprocess_func = get_keyword_list
- elif attribute in ['genre', 'name', 'instrument']:
- preprocess_func = get_keyword
- elif attribute in ['title', 'artist', 'description']:
- preprocess_func = get_string
- else:
- preprocess_func = None
- return preprocess_func
-
- @classmethod
- def from_dict(cls, dictionary: dict, fields_required: bool = False):
- _dictionary: tp.Dict[str, tp.Any] = {}
-
- # allow a subset of attributes to not be loaded from the dictionary
- # these attributes may be populated later
- post_init_attributes = ['self_wav', 'joint_embed']
- optional_fields = ['keywords']
-
- for _field in fields(cls):
- if _field.name in post_init_attributes:
- continue
- elif _field.name not in dictionary:
- if fields_required and _field.name not in optional_fields:
- raise KeyError(f"Unexpected missing key: {_field.name}")
- else:
- preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
- value = dictionary[_field.name]
- if preprocess_func:
- value = preprocess_func(value)
- _dictionary[_field.name] = value
- return cls(**_dictionary)
-
-
-def augment_music_info_description(music_info: MusicInfo, merge_text_p: float = 0.,
- drop_desc_p: float = 0., drop_other_p: float = 0.) -> MusicInfo:
- """Augment MusicInfo description with additional metadata fields and potential dropout.
- Additional textual attributes are added given probability 'merge_text_conditions_p' and
- the original textual description is dropped from the augmented description given probability drop_desc_p.
-
- Args:
- music_info (MusicInfo): The music metadata to augment.
- merge_text_p (float): Probability of merging additional metadata to the description.
- If provided value is 0, then no merging is performed.
- drop_desc_p (float): Probability of dropping the original description on text merge.
- if provided value is 0, then no drop out is performed.
- drop_other_p (float): Probability of dropping the other fields used for text augmentation.
- Returns:
- MusicInfo: The MusicInfo with augmented textual description.
- """
- def is_valid_field(field_name: str, field_value: tp.Any) -> bool:
- valid_field_name = field_name in ['key', 'bpm', 'genre', 'moods', 'instrument', 'keywords']
- valid_field_value = field_value is not None and isinstance(field_value, (int, float, str, list))
- keep_field = random.uniform(0, 1) < drop_other_p
- return valid_field_name and valid_field_value and keep_field
-
- def process_value(v: tp.Any) -> str:
- if isinstance(v, (int, float, str)):
- return str(v)
- if isinstance(v, list):
- return ", ".join(v)
- else:
- raise ValueError(f"Unknown type for text value! ({type(v), v})")
-
- description = music_info.description
-
- metadata_text = ""
- if random.uniform(0, 1) < merge_text_p:
- meta_pairs = [f'{_field.name}: {process_value(getattr(music_info, _field.name))}'
- for _field in fields(music_info) if is_valid_field(_field.name, getattr(music_info, _field.name))]
- random.shuffle(meta_pairs)
- metadata_text = ". ".join(meta_pairs)
- description = description if not random.uniform(0, 1) < drop_desc_p else None
- logger.debug(f"Applying text augmentation on MMI info. description: {description}, metadata: {metadata_text}")
-
- if description is None:
- description = metadata_text if len(metadata_text) > 1 else None
- else:
- description = ". ".join([description.rstrip('.'), metadata_text])
- description = description.strip() if description else None
-
- music_info = replace(music_info)
- music_info.description = description
- return music_info
-
-
-class Paraphraser:
- def __init__(self, paraphrase_source: tp.Union[str, Path], paraphrase_p: float = 0.):
- self.paraphrase_p = paraphrase_p
- open_fn = gzip.open if str(paraphrase_source).lower().endswith('.gz') else open
- with open_fn(paraphrase_source, 'rb') as f: # type: ignore
- self.paraphrase_source = json.loads(f.read())
- logger.info(f"loaded paraphrasing source from: {paraphrase_source}")
-
- def sample_paraphrase(self, audio_path: str, description: str):
- if random.random() >= self.paraphrase_p:
- return description
- info_path = Path(audio_path).with_suffix('.json')
- if info_path not in self.paraphrase_source:
- warn_once(logger, f"{info_path} not in paraphrase source!")
- return description
- new_desc = random.choice(self.paraphrase_source[info_path])
- logger.debug(f"{description} -> {new_desc}")
- return new_desc
-
-
-class MusicDataset(InfoAudioDataset):
- """Music dataset is an AudioDataset with music-related metadata.
-
- Args:
- info_fields_required (bool): Whether to enforce having required fields.
- merge_text_p (float): Probability of merging additional metadata to the description.
- drop_desc_p (float): Probability of dropping the original description on text merge.
- drop_other_p (float): Probability of dropping the other fields used for text augmentation.
- joint_embed_attributes (list[str]): A list of attributes for which joint embedding metadata is returned.
- paraphrase_source (str, optional): Path to the .json or .json.gz file containing the
- paraphrases for the description. The json should be a dict with keys are the
- original info path (e.g. track_path.json) and each value is a list of possible
- paraphrased.
- paraphrase_p (float): probability of taking a paraphrase.
-
- See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
- """
- def __init__(self, *args, info_fields_required: bool = True,
- merge_text_p: float = 0., drop_desc_p: float = 0., drop_other_p: float = 0.,
- joint_embed_attributes: tp.List[str] = [],
- paraphrase_source: tp.Optional[str] = None, paraphrase_p: float = 0,
- **kwargs):
- kwargs['return_info'] = True # We require the info for each song of the dataset.
- super().__init__(*args, **kwargs)
- self.info_fields_required = info_fields_required
- self.merge_text_p = merge_text_p
- self.drop_desc_p = drop_desc_p
- self.drop_other_p = drop_other_p
- self.joint_embed_attributes = joint_embed_attributes
- self.paraphraser = None
- if paraphrase_source is not None:
- self.paraphraser = Paraphraser(paraphrase_source, paraphrase_p)
-
- def __getitem__(self, index):
- wav, info = super().__getitem__(index)
- info_data = info.to_dict()
- music_info_path = Path(info.meta.path).with_suffix('.json')
-
- if Path(music_info_path).exists():
- with open(music_info_path, 'r') as json_file:
- music_data = json.load(json_file)
- music_data.update(info_data)
- music_info = MusicInfo.from_dict(music_data, fields_required=self.info_fields_required)
- if self.paraphraser is not None:
- music_info.description = self.paraphraser.sample(music_info.meta.path, music_info.description)
- if self.merge_text_p:
- music_info = augment_music_info_description(
- music_info, self.merge_text_p, self.drop_desc_p, self.drop_other_p)
- else:
- music_info = MusicInfo.from_dict(info_data, fields_required=False)
-
- music_info.self_wav = WavCondition(
- wav=wav[None], length=torch.tensor([info.n_frames]),
- sample_rate=[info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
-
- for att in self.joint_embed_attributes:
- att_value = getattr(music_info, att)
- joint_embed_cond = JointEmbedCondition(
- wav[None], [att_value], torch.tensor([info.n_frames]),
- sample_rate=[info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
- music_info.joint_embed[att] = joint_embed_cond
-
- return wav, music_info
-
-
-def get_musical_key(value: tp.Optional[str]) -> tp.Optional[str]:
- """Preprocess key keywords, discarding them if there are multiple key defined."""
- if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
- return None
- elif ',' in value:
- # For now, we discard when multiple keys are defined separated with comas
- return None
- else:
- return value.strip().lower()
-
-
-def get_bpm(value: tp.Optional[str]) -> tp.Optional[float]:
- """Preprocess to a float."""
- if value is None:
- return None
- try:
- return float(value)
- except ValueError:
- return None
diff --git a/spaces/ANLPRL/NER_On_Oral_Medicine/app.py b/spaces/ANLPRL/NER_On_Oral_Medicine/app.py
deleted file mode 100644
index c15a80a77a57e4afb6364279e03eed7320524c65..0000000000000000000000000000000000000000
--- a/spaces/ANLPRL/NER_On_Oral_Medicine/app.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from transformers import AutoTokenizer, TFAutoModel
-import tensorflow as tf
-#from keras.preprocessing.sequence import pad_sequences
-from tensorflow.keras.preprocessing.sequence import pad_sequences
-import pickle
-import numpy as np
-from keras.models import load_model
-import streamlit as st
-import io
-import PyPDF2
-import re
-from PIL import Image
-
-image = Image.open('header-image.png')
-st.image(image)
-
-
-def preprocess(text):
- # Define a regular expression pattern for URLs, non-alphabetic characters, and user names
- pattern = re.compile(r'https?://\S+|[^0-9A-Za-z\' t]|@\w+')
- # Use the regular expression to find all URLs, non-alphabetic characters, and user names in the text
- matches = pattern.findall(text)
- #Replace the URLs, non-alphabetic characters, and user names with an empty string
- for match in matches:
- text = text.replace(match, ' ')
- return text
-
-
-
-def predict(new_data):
- #Load the trained model
- # Create a LabelEncoder object
- with open("labelencoder1.pkl", 'rb') as f:
- le = pickle.load(f)
- model= tf.keras.models.load_model("biobert-rnn1.h5")
- tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-base-cased-v1.1")
- biobert_model = TFAutoModel.from_pretrained("dmis-lab/biobert-base-cased-v1.1", from_pt=True)
- X_tokens = [tokenizer.encode(text, add_special_tokens=True) for text in new_data.split()]
- X_padded = pad_sequences(X_tokens, maxlen=22, dtype='long', truncating='post', padding='post')
- X_tensor = tf.convert_to_tensor(X_padded)
- X_embeddings = biobert_model(X_tensor)[0]
- pred=model.predict(X_embeddings)
- predicted_labels = list(le.inverse_transform(np.argmax(pred, axis=1)))
- text=new_data.split()
- prev_label=" "
- data=[]
- labels=[]
- for i,(word,label) in enumerate(zip(text,predicted_labels)):
- if label!="Other":
- label=label.split('-')[1]
- if prev_label==label:
- data[-1]=data[-1]+" "+word
- else:
- data.append(word)
- labels.append(label)
- prev_label=label
- return(data,labels)
-
-def highlight(sentence):
- highlighted_text = ""
- entity_colors = {"Symptom":"#87cefa","Medical Condition":"#ffb6c1"}
- words, labels = predict(sentence)
- for words, label in zip(words, labels):
- if label!="Other" and words!="a":
- if label in ["Medical Condition","Symptom"]:
- word_color = entity_colors.get(label, "yellow")
- label_color = entity_colors.get(label + '-label', "black")
- highlighted_text += f'{words}{label} '
- else:
- highlighted_text += f'{words} '
- else:
- highlighted_text += f'{words} '
- st.markdown(highlighted_text, unsafe_allow_html=True)
-
-st.subheader('Named Entity Recognizer for Oral Medicine and Radiology')
-sentence = st.text_area('Enter some text:')
-
-st.write("OR")
-
-selected_options = st.selectbox(
-'Choose a text from dropdown: ',
-(" ",
- 'Anemia and gingival bleeding are connected in that anemia can be a contributing cause to the occurrence of gingival bleeding . Anemia is a condition characterized by a shortage in the number or quality of red blood cells, which can lead to a reduced ability of the blood to carry oxygen throughout the body.',
- 'Hemophilia is a genetic illness that mainly affects the blood ability to clot properly. Individuals with significant hemophilia are at an elevated possibility of experiencing unforeseen bleeding episodes, which can occur in various parts of the body, including the mouth. Gingival bleeding can be a sign of hemophilia and can present as gum bleeding or mouth sores.',
- "Von Willebrand disease VWD is a genetic condition that impairs the blood's ability to clot properly. One of the symptoms of VWD is spontaneous gingival bleeding , which can occur without any apparent cause or trauma")) # set default to None
-
-
-# Define the colors for each label
-
-if st.button('Analyze'):
- if sentence:
- highlight(sentence)
- elif selected_options:
- highlight(selected_options)
- else:
- st.write('Please enter a sentence or select an option from the dropdown or upload a file.')
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/resnext101_4xb32_2048e_3c_noF/resnext101_4xb32_2048e_3c_noF.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/resnext101_4xb32_2048e_3c_noF/resnext101_4xb32_2048e_3c_noF.py
deleted file mode 100644
index e2f977ca9e86a412a643dd45598753372c392b9c..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/resnext101_4xb32_2048e_3c_noF/resnext101_4xb32_2048e_3c_noF.py
+++ /dev/null
@@ -1,131 +0,0 @@
-optim_wrapper = dict(
- type='OptimWrapper',
- optimizer=dict(
- type='Adam',
- lr=0.0001,
- betas=(0.9, 0.999),
- eps=1e-08,
- weight_decay=0,
- amsgrad=False),
- accumulative_counts=8)
-param_scheduler = [
- dict(
- type='LinearLR',
- start_factor=1e-05,
- by_epoch=True,
- end=10,
- convert_to_iter_based=True),
- dict(
- type='MultiStepLR',
- by_epoch=True,
- milestones=[30, 210, 390, 570, 750, 930, 1110, 1290, 1470, 1650, 1830],
- gamma=0.9)
-]
-train_cfg = dict(by_epoch=True, max_epochs=2048, val_interval=16)
-val_cfg = dict()
-test_cfg = dict()
-default_scope = 'mmpretrain'
-default_hooks = dict(
- timer=dict(type='IterTimerHook'),
- logger=dict(type='LoggerHook', interval=10),
- param_scheduler=dict(type='ParamSchedulerHook'),
- checkpoint=dict(type='CheckpointHook', interval=16, save_best='auto'),
- sampler_seed=dict(type='DistSamplerSeedHook'),
- visualization=dict(type='VisualizationHook', enable=False))
-env_cfg = dict(
- cudnn_benchmark=False,
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
- dist_cfg=dict(backend='nccl'))
-vis_backends = [dict(type='LocalVisBackend')]
-visualizer = dict(
- type='UniversalVisualizer',
- vis_backends=[dict(type='LocalVisBackend'),
- dict(type='WandbVisBackend')])
-log_level = 'INFO'
-load_from = None
-resume = False
-randomness = dict(seed=None, deterministic=False)
-dataset_type = 'CustomDataset'
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', scale=224),
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
- dict(type='PackInputs')
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='ResizeEdge', scale=256, edge='short'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='PackInputs')
-]
-train_dataloader = dict(
- pin_memory=True,
- persistent_workers=True,
- collate_fn=dict(type='default_collate'),
- batch_size=32,
- num_workers=5,
- dataset=dict(
- type='CustomDataset',
- data_root='../2_preprocess_data_3000',
- with_label=True,
- ann_file='',
- data_prefix='train',
- pipeline=[
- dict(type='LoadImageFromFile'),
- dict(type='RandomResizedCrop', scale=224),
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
- dict(type='PackInputs')
- ]),
- sampler=dict(type='DefaultSampler', shuffle=True))
-val_dataloader = dict(
- pin_memory=True,
- persistent_workers=True,
- collate_fn=dict(type='default_collate'),
- batch_size=32,
- num_workers=5,
- dataset=dict(
- type='CustomDataset',
- data_root='../2_preprocess_data_3000',
- with_label=True,
- ann_file='',
- data_prefix='val',
- pipeline=[
- dict(type='LoadImageFromFile'),
- dict(type='ResizeEdge', scale=256, edge='short'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='PackInputs')
- ]),
- sampler=dict(type='DefaultSampler', shuffle=False))
-val_evaluator = dict(type='Accuracy', topk=(1, 3))
-test_dataloader = dict(
- pin_memory=True,
- persistent_workers=True,
- collate_fn=dict(type='default_collate'),
- batch_size=32,
- num_workers=5,
- dataset=dict(
- type='CustomDataset',
- data_root='../2_preprocess_data_3000',
- with_label=True,
- ann_file='',
- data_prefix='val',
- pipeline=[
- dict(type='LoadImageFromFile'),
- dict(type='ResizeEdge', scale=256, edge='short'),
- dict(type='CenterCrop', crop_size=224),
- dict(type='PackInputs')
- ]),
- sampler=dict(type='DefaultSampler', shuffle=False))
-test_evaluator = dict(type='Accuracy', topk=(1, 3))
-model = dict(
- type='ImageClassifier',
- backbone=dict(type='ResNeXt', depth=101, in_channels=3),
- neck=dict(type='GlobalAveragePooling'),
- head=dict(
- type='LinearClsHead',
- num_classes=7,
- in_channels=2048,
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
- topk=(1, 3)))
-launcher = 'pytorch'
-work_dir = './work_dirs/resnext101_4xb32_2048e_3c_noF'
diff --git a/spaces/Aaaaaaaabdualh/poetry2023/README.md b/spaces/Aaaaaaaabdualh/poetry2023/README.md
deleted file mode 100644
index c958a0c31dcf28cc9fa8983a3f43d6b3b0481875..0000000000000000000000000000000000000000
--- a/spaces/Aaaaaaaabdualh/poetry2023/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Poetry2023
-emoji: 👁
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.16.0
-app_file: app.py
-pinned: false
-duplicated_from: akhooli/poetry2023
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/CHANGELOG.md b/spaces/AbandonedMuse/UnlimitedMusicGen/CHANGELOG.md
deleted file mode 100644
index fe15d47ed6a6b77862867a8e83215e8ffd8b0ede..0000000000000000000000000000000000000000
--- a/spaces/AbandonedMuse/UnlimitedMusicGen/CHANGELOG.md
+++ /dev/null
@@ -1,33 +0,0 @@
-## [0.0.2a2] - 2023-07-20
-
-Music Generation set to a max of 720 seconds (12 minutes) to avoid memory issues.
-
-Video editing options (thanks @Surn and @oncorporation).
-
-Music Conditioning segment options
-
-
-## [0.0.2a] - TBD
-
-Improved demo, fixed top p (thanks @jnordberg).
-
-Compressor tanh on output to avoid clipping with some style (especially piano).
-Now repeating the conditioning periodically if it is too short.
-
-More options when launching Gradio app locally (thanks @ashleykleynhans).
-
-Testing out PyTorch 2.0 memory efficient attention.
-
-Added extended generation (infinite length) by slowly moving the windows.
-Note that other implementations exist: https://github.com/camenduru/MusicGen-colab.
-
-## [0.0.1] - 2023-06-09
-
-Initial release, with model evaluation only.
-
-
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode3.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode3.js
deleted file mode 100644
index d31a41f163fc817b83aa2f813807d654c478682a..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode3.js
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Elements:
- ```
- HHH
- LCR
- LFR
- ```
-*/
-
-import {
- GetAddHeaderConfig,
- GetAddLeftSideConfig, GetAddContentConfig, GetAddRightSideConfig,
- GetAddFooterConfig,
- GetAddContainerConfig
-} from './GetAddChildConfig.js';
-import CreatExpandContainer from './CreatExpandContainer.js';
-
-var LayoutMode0 = function (config) {
- var scene = this.scene;
-
- // Add Header
- var header = config.header;
- if (header) {
- this.add(header, GetAddHeaderConfig(config));
- }
-
- /*
- L C R
- L F R
- */
- var bodySizer0 = CreatExpandContainer(scene, 0);
- this.add(bodySizer0, GetAddContainerConfig(config));
-
- // Add Left-side
- var leftSide = config.leftSide;
- if (leftSide) {
- bodySizer0.add(leftSide, GetAddLeftSideConfig(config));
- }
-
- /*
- C
-
- F
- */
- var bodySizer1 = CreatExpandContainer(scene, 1);
- bodySizer0.add(bodySizer1, GetAddContainerConfig(config));
-
- // Add content
- var content = config.content;
- if (content) {
- bodySizer1.add(content, GetAddContentConfig(config));
- }
-
- // Add Footer
- var footer = config.footer;
- if (footer) {
- bodySizer1.add(footer, GetAddFooterConfig(config));
- }
-
- // Add Right-side
- var rightSide = config.rightSide;
- if (rightSide) {
- bodySizer0.add(rightSide, GetAddRightSideConfig(config));
- }
-
-}
-
-export default LayoutMode0;
\ No newline at end of file
diff --git a/spaces/AiMimicry/sovits-models/hubert/hubert_model_onnx.py b/spaces/AiMimicry/sovits-models/hubert/hubert_model_onnx.py
deleted file mode 100644
index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000
--- a/spaces/AiMimicry/sovits-models/hubert/hubert_model_onnx.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
- def forward(self, x):
- return self.units(x)
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/uroman/bin/uroman-quick.pl b/spaces/Akmyradov/TurkmenTTSweSTT/uroman/bin/uroman-quick.pl
deleted file mode 100644
index 3c2bb6a84e891d68e7ee996dd72d154e8820c05d..0000000000000000000000000000000000000000
--- a/spaces/Akmyradov/TurkmenTTSweSTT/uroman/bin/uroman-quick.pl
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/perl -w
-
-# uroman Nov. 12, 2015 - July 25, 2016
-# version v0.7
-# Author: Ulf Hermjakob
-
-# Usage: uroman-quick.pl {-l [tur|uig|ukr|yid]} < STDIN
-# currently only for Arabic script languages, incl. Uyghur
-
-$|=1;
-
-use FindBin;
-use Cwd "abs_path";
-use File::Basename qw(dirname);
-use File::Spec;
-
-my $bin_dir = abs_path(dirname($0));
-my $root_dir = File::Spec->catfile($bin_dir, File::Spec->updir());
-my $data_dir = File::Spec->catfile($root_dir, "data");
-my $lib_dir = File::Spec->catfile($root_dir, "lib");
-
-use lib "$FindBin::Bin/../lib";
-use NLP::Romanizer;
-use NLP::UTF8;
-$romanizer = NLP::Romanizer;
-%ht = ();
-$lang_code = "";
-
-while (@ARGV) {
- $arg = shift @ARGV;
- if ($arg =~ /^-+(l|lc|lang-code)$/) {
- $lang_code = lc (shift @ARGV || "")
- } else {
- print STDERR "Ignoring unrecognized arg $arg\n";
- }
-}
-
-$romanization_table_arabic_block_filename = File::Spec->catfile($data_dir, "romanization-table-arabic-block.txt");
-$romanization_table_filename = File::Spec->catfile($data_dir, "romanization-table.txt");
-
-$romanizer->load_romanization_table(*ht, $romanization_table_arabic_block_filename);
-$romanizer->load_romanization_table(*ht, $romanization_table_filename);
-
-$line_number = 0;
-while (<>) {
- $line_number++;
- my $line = $_;
- print $romanizer->quick_romanize($line, $lang_code, *ht) . "\n";
- if ($line_number =~ /0000$/) {
- print STDERR $line_number;
- } elsif ($line_number =~ /000$/) {
- print STDERR ".";
- }
-}
-print STDERR "\n";
-
-exit 0;
-
diff --git a/spaces/Akshay-More-007/starcoder/apikey.py b/spaces/Akshay-More-007/starcoder/apikey.py
deleted file mode 100644
index 389f16d5ed962e469bfa13aaa862dbca7ef0e652..0000000000000000000000000000000000000000
--- a/spaces/Akshay-More-007/starcoder/apikey.py
+++ /dev/null
@@ -1 +0,0 @@
-apikey_hungingface = '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'
\ No newline at end of file
diff --git a/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/parallel/__init__.py b/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/parallel/__init__.py
deleted file mode 100644
index 9b52f49cc0755562218a460483cbf02514ddd773..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/parallel/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .data_parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to
diff --git a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py b/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py
deleted file mode 100644
index acd00238895d57ba878fd0211d5654250fb10061..0000000000000000000000000000000000000000
--- a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py
+++ /dev/null
@@ -1,509 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import ONNXVITS_modules as modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- self.w = None
- self.reverse = None
- self.noise_scale = None
- def forward(self, x, x_mask, g=None):
- w = self.w
- reverse = self.reverse
- noise_scale = self.noise_scale
-
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- self.reverse = None
- def forward(self, x, x_mask, g=None):
- reverse = self.reverse
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask # x_in : [b, c, t] -> [b, h, t]
- x = self.enc(x, x_mask, g=g) # x_in : [b, h, t], g : [b, h, 1], x = x_in + g
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask # z, m, logs : [b, h, t]
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
-
- if n_speakers > 0:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, sid=None, noise_scale=.667, length_scale=1, noise_scale_w=.8, max_len=None):
- torch.onnx.export(
- self.enc_p,
- (x, x_lengths),
- "ONNX_net/enc_p.onnx",
- input_names=["x", "x_lengths"],
- output_names=["xout", "m_p", "logs_p", "x_mask"],
- dynamic_axes={
- "x" : [1],
- "xout" : [2],
- "m_p" : [2],
- "logs_p" : [2],
- "x_mask" : [2]
- },
- verbose=True,
- )
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
-
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- self.dp.reverse = True
- self.dp.noise_scale = noise_scale_w
- torch.onnx.export(
- self.dp,
- (x, x_mask, g),
- "ONNX_net/dp.onnx",
- input_names=["x", "x_mask", "g"],
- output_names=["logw"],
- dynamic_axes={
- "x" : [2],
- "x_mask" : [2],
- "logw" : [2]
- },
- verbose=True,
- )
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
-
- self.flow.reverse = True
- torch.onnx.export(
- self.flow,
- (z_p, y_mask, g),
- "ONNX_net/flow.onnx",
- input_names=["z_p", "y_mask", "g"],
- output_names=["z"],
- dynamic_axes={
- "z_p" : [2],
- "y_mask" : [2],
- "z" : [2]
- },
- verbose=True,
- )
- z = self.flow(z_p, y_mask, g=g)
- z_in = (z * y_mask)[:,:,:max_len]
-
- torch.onnx.export(
- self.dec,
- (z_in, g),
- "ONNX_net/dec.onnx",
- input_names=["z_in", "g"],
- output_names=["o"],
- dynamic_axes={
- "z_in" : [2],
- "o" : [2]
- },
- verbose=True,
- )
- o = self.dec(z_in, g=g)
- return o
diff --git a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/attentions.py b/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/attentions.py
deleted file mode 100644
index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000
--- a/spaces/Alichuan/VITS-Umamusume-voice-synthesizer/attentions.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-from modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Alinadi98/movie_recommendation_system/README.md b/spaces/Alinadi98/movie_recommendation_system/README.md
deleted file mode 100644
index 2d03535a6c7989909804d8b4e7da92acb0efbd74..0000000000000000000000000000000000000000
--- a/spaces/Alinadi98/movie_recommendation_system/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Movie Recommendation System
-emoji: 🐢
-colorFrom: yellow
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ameaou/academic-chatgpt3.1/docs/self_analysis.md b/spaces/Ameaou/academic-chatgpt3.1/docs/self_analysis.md
deleted file mode 100644
index 28f6682c3bc70c884b31322350099b156e770bf0..0000000000000000000000000000000000000000
--- a/spaces/Ameaou/academic-chatgpt3.1/docs/self_analysis.md
+++ /dev/null
@@ -1,256 +0,0 @@
-# chatgpt-academic项目自译解报告
-(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
-
-## 对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能。
-
-整体概括:
-
-该程序是一个基于自然语言处理和机器学习的科学论文辅助工具,主要功能包括聊天机器人、批量总结PDF文档、批量翻译PDF文档、生成函数注释、解析项目源代码等。程序基于 Gradio 构建 Web 服务,并集成了代理和自动更新功能,提高了用户的使用体验。
-
-文件功能表格:
-
-| 文件名 | 文件功能 |
-| --- | --- |
-| check_proxy.py | 用于检查代理的正确性和可用性 |
-| colorful.py | 包含不同预设置颜色的常量,并用于多种UI元素 |
-| config.py | 用于全局配置的类 |
-| config_private.py | 与config.py文件一起使用的另一个配置文件,用于更改私密信息 |
-| core_functional.py | 包含一些TextFunctional类和基础功能函数 |
-| crazy_functional.py | 包含大量高级功能函数和实验性的功能函数 |
-| main.py | 程序的主入口,包含GUI主窗口和主要的UI管理功能 |
-| theme.py | 包含一些预设置主题的颜色 |
-| toolbox.py | 提供了一些有用的工具函数 |
-| crazy_functions\crazy_utils.py | 包含一些用于实现高级功能的辅助函数 |
-| crazy_functions\Latex全文润色.py | 实现了对LaTeX文件中全文的润色和格式化功能 |
-| crazy_functions\Latex全文翻译.py | 实现了对LaTeX文件中的内容进行翻译的功能 |
-| crazy_functions\_\_init\_\_.py | 用于导入crazy_functional.py中的功能函数 |
-| crazy_functions\下载arxiv论文翻译摘要.py | 从Arxiv上下载论文并提取重要信息 |
-| crazy_functions\代码重写为全英文_多线程.py | 针对中文Python文件,将其翻译为全英文 |
-| crazy_functions\总结word文档.py | 提取Word文件的重要内容来生成摘要 |
-| crazy_functions\批量Markdown翻译.py | 批量翻译Markdown文件 |
-| crazy_functions\批量总结PDF文档.py | 批量从PDF文件中提取摘要 |
-| crazy_functions\批量总结PDF文档pdfminer.py | 批量从PDF文件中提取摘要 |
-| crazy_functions\批量翻译PDF文档_多线程.py | 批量翻译PDF文件 |
-| crazy_functions\理解PDF文档内容.py | 批量分析PDF文件并提取摘要 |
-| crazy_functions\生成函数注释.py | 自动生成Python文件中函数的注释 |
-| crazy_functions\解析项目源代码.py | 解析并分析给定项目的源代码 |
-| crazy_functions\询问多个大语言模型.py | 向多个大语言模型询问输入文本并进行处理 |
-| crazy_functions\读文献写摘要.py | 根据用户输入读取文献内容并生成摘要 |
-| crazy_functions\谷歌检索小助手.py | 利用谷歌学术检索用户提供的论文信息并提取相关信息 |
-| crazy_functions\高级功能函数模板.py | 实现高级功能的模板函数 |
-| request_llm\bridge_all.py | 处理与LLM的交互 |
-| request_llm\bridge_chatglm.py | 使用ChatGLM模型进行聊天 |
-| request_llm\bridge_chatgpt.py | 实现对话生成的各项功能 |
-| request_llm\bridge_tgui.py | 在Websockets中与用户进行交互并生成文本输出 |
-
-
-
-## [0/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\check_proxy.py
-
-该文件主要包括四个函数:check_proxy、backup_and_download、patch_and_restart 和 auto_update。其中,check_proxy 函数用于检查代理是否可用;backup_and_download 用于进行一键更新备份和下载;patch_and_restart 是一键更新协议的重要函数,用于覆盖和重启;auto_update 函数用于查询版本和用户意见,并自动进行一键更新。该文件主要使用了 requests、json、shutil、zipfile、distutils、subprocess 等 Python 标准库和 toolbox 和 colorful 两个第三方库。
-
-## [1/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\colorful.py
-
-该程序文件实现了一些打印文本的函数,使其具有不同的颜色输出。当系统为Linux时直接跳过,否则使用colorama库来实现颜色输出。程序提供了深色和亮色两种颜色输出方式,同时也提供了对打印函数的别名。对于不是终端输出的情况,对所有的打印函数进行重复定义,以便在重定向时能够避免打印错误日志。
-
-## [2/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\config.py
-
-该程序文件是一个配置文件,其主要功能是提供使用API密钥等信息,以及对程序的体验进行优化,例如定义对话框高度、布局等。还包含一些其他的设置,例如设置并行使用的线程数、重试次数限制等等。
-
-## [3/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\config_private.py
-
-这是一个名为config_private.py的Python文件,它用于配置API_KEY和代理信息。API_KEY是一个私密密钥,用于访问某些受保护的API。USE_PROXY变量设置为True以应用代理,proxies变量配置了代理网络的地址和协议。在使用该文件时,需要填写正确的API_KEY和代理信息。
-
-## [4/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\core_functional.py
-
-该文件是一个Python模块,名为"core_functional.py"。模块中定义了一个字典,包含了各种核心功能的配置信息,如英语学术润色、中文学术润色、查找语法错误等。每个功能都包含一些前言和后语,在前言中描述了该功能的任务和要求,在后语中提供一些附加信息。此外,有些功能还定义了一些特定的处理函数和按钮颜色。
-
-## [5/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functional.py
-
-这是一个Python程序文件,文件名是crazy_functional.py。它导入了一个名为HotReload的工具箱,并定义了一个名为get_crazy_functions()的函数。这个函数包括三个部分的插件组,分别是已经编写完成的第一组插件、已经测试但距离完美状态还差一点点的第二组插件和尚未充分测试的第三组插件。每个插件都有一个名称、一个按钮颜色、一个函数和一个是否加入下拉菜单中的标志位。这些插件提供了多种功能,包括生成函数注释、解析项目源代码、批量翻译PDF文档、谷歌检索、PDF文档内容理解和Latex文档的全文润色、翻译等功能。其中第三组插件可能还存在一定的bug。
-
-## [6/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\main.py
-
-该Python脚本代码实现了一个用于交互式对话的Chatbot机器人。它使用了Gradio框架来构建一个Web界面,并在此基础之上嵌入了一个文本输入框和与Chatbot进行交互的其他控件,包括提交、重置、停止和清除按钮、选择框和滑块等。此外,它还包括了一些类和函数和一些用于编程分析的工具和方法。整个程序文件的结构清晰,注释丰富,并提供了很多技术细节,使得开发者可以很容易地在其基础上进行二次开发、修改、扩展和集成。
-
-## [7/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\theme.py
-
-该程序文件名为theme.py,主要功能为调节Gradio的全局样式。在该文件中,调节了Gradio的主题颜色、字体、阴影、边框、渐变等等样式。同时,该文件还添加了一些高级CSS样式,比如调整表格单元格的背景和边框,设定聊天气泡的圆角、最大宽度和阴影等等。如果CODE_HIGHLIGHT为True,则还进行了代码高亮显示。
-
-## [8/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\toolbox.py
-
-这是一个名为`toolbox.py`的源代码文件。该文件包含了一系列工具函数和装饰器,用于聊天Bot的开发和调试。其中有一些功能包括将输入参数进行重组、捕捉函数中的异常并记录到历史记录中、生成Markdown格式的聊天记录报告等。该文件中还包含了一些与转换Markdown文本相关的函数。
-
-## [9/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\crazy_utils.py
-
-这是一个Python程序文件 `crazy_utils.py`,它包含了两个函数:
-
-- `input_clipping(inputs, history, max_token_limit)`:这个函数接收三个参数,inputs 是一个字符串,history 是一个列表,max_token_limit 是一个整数。它使用 `tiktoken` 、`numpy` 和 `toolbox` 模块,处理输入文本和历史记录,将其裁剪到指定的最大标记数,避免输入过长导致的性能问题。如果 inputs 长度不超过 max_token_limit 的一半,则只裁剪历史;否则,同时裁剪输入和历史。
-- `request_gpt_model_in_new_thread_with_ui_alive(inputs, inputs_show_user, llm_kwargs, chatbot, history, sys_prompt, refresh_interval=0.2, handle_token_exceed=True, retry_times_at_unknown_error=2)`:这个函数接收八个参数,其中后三个是列表类型,其他为标量或句柄等。它提供对话窗口和刷新控制,执行 `predict_no_ui_long_connection` 方法,将输入数据发送至 GPT 模型并获取结果,如果子任务出错,返回相应的错误信息,否则返回结果。
-
-## [10/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\Latex全文润色.py
-
-这是一个名为"crazy_functions\Latex全文润色.py"的程序文件,其中包含了两个函数"Latex英文润色"和"Latex中文润色",以及其他辅助函数。这些函数能够对 Latex 项目进行润色处理,其中 "多文件润色" 函数是一个主要函数,它调用了其他辅助函数用于读取和处理 Latex 项目中的文件。函数使用了多线程和机器学习模型进行自然语言处理,对文件进行简化和排版来满足学术标准。注释已删除并可以在函数内部查找。
-
-## [11/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\Latex全文翻译.py
-
-这个程序文件包括一个用于对整个Latex项目进行翻译的函数 `Latex英译中` 和一个用于将中文翻译为英文的函数 `Latex中译英`。这两个函数都会尝试导入依赖库 tiktoken, 若无法导入则会提示用户安装。`Latex英译中` 函数会对 Latex 项目中的文件进行分离并去除注释,然后运行多线程翻译。`Latex中译英` 也做同样的事情,只不过是将中文翻译为英文。这个程序文件还包括其他一些帮助函数。
-
-## [12/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\__init__.py
-
-这是一个 Python 包,包名为 `crazy_functions`,在 `__init__.py` 文件中定义了一些函数,包含以下函数:
-
-- `crazy_addition(a, b)`:对两个数进行加法运算,并将结果返回。
-- `crazy_multiplication(a, b)`:对两个数进行乘法运算,并将结果返回。
-- `crazy_subtraction(a, b)`:对两个数进行减法运算,并将结果返回。
-- `crazy_division(a, b)`:对两个数进行除法运算,并将结果返回。
-- `crazy_factorial(n)`:计算 `n` 的阶乘并返回结果。
-
-这些函数可能会有一些奇怪或者不符合常规的实现方式(由函数名可以看出来),所以这个包的名称为 `crazy_functions`,可能是暗示这些函数会有一些“疯狂”的实现方式。
-
-## [13/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\下载arxiv论文翻译摘要.py
-
-该程序实现了一个名为“下载arxiv论文并翻译摘要”的函数插件,作者是“binary-husky”。该函数的功能是,在输入一篇arxiv论文的链接后,提取摘要、下载PDF文档、翻译摘要为中文,并将翻译结果保存到文件中。程序使用了一些Python库,如requests、pdfminer和beautifulsoup4等。程序入口是名为“下载arxiv论文并翻译摘要”的函数,其中使用了自定义的辅助函数download_arxiv_和get_name。程序中还使用了其他非函数的辅助函数和变量,如update_ui、CatchException、report_exception和get_conf等。
-
-## [14/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\代码重写为全英文_多线程.py
-
-该文件是一个多线程Python脚本,包含多个函数和利用第三方库进行的API请求。主要功能是将给定文件夹内的Python代码文件中所有中文转化为英文,然后输出转化后的英文代码。重要的功能和步骤包括:
-
-1. 清空历史,以免输入溢出
-2. 尝试导入依赖,如果缺少依赖,则给出安装建议
-3. 集合文件
-4. 显示随意内容以防卡顿的感觉
-5. Token限制下的截断与处理
-6. 多线程操作请求转换中文变为英文的代码
-7. 所有线程同时开始执行任务函数
-8. 循环轮询各个线程是否执行完毕
-9. 把结果写入文件
-10. 备份一个文件
-
-## [15/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\总结word文档.py
-
-这是一个名为"总结word文档.py"的程序文件,使用python编写。该文件导入了"toolbox"和"crazy_utils"模块,实现了解析docx格式和doc格式的文件的功能。该文件包含了一个名为"解析docx"的函数,通过对文件内容应用自然语言处理技术,生成文章片段的中英文概述。具体实现过程中,该函数使用了"docx"模块和"win32com.client"模块来实现对docx和doc格式文件的解析,同时使用了"request_gpt_model_in_new_thread_with_ui_alive"函数来向GPT模型发起请求。最后,该文件还实现了一个名为"总结word文档"的函数来批量总结Word文档。
-
-## [16/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量Markdown翻译.py
-
-这个程序文件实现了一个批量Markdown翻译功能,可以将一个源代码项目中的Markdown文本翻译成指定语言(目前支持中<-英和英<-中)。程序主要分为三个函数,`PaperFileGroup`类用于处理长文本的拆分,`多文件翻译`是主要函数调用了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`函数进行多线程翻译并输出结果,`Markdown英译中`和`Markdown中译外`分别是英译中和中译英的入口函数,用于解析项目路径和调用翻译函数。程序依赖于tiktoken等库实现。
-
-## [17/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量总结PDF文档.py
-
-这是一个名为“批量总结PDF文档”的Python脚本,包含了多个函数。其中有一个函数名为“clean_text”,可以对PDF提取出的原始文本进行清洗和格式化处理,将连字转换为其基本形式,并根据heuristic规则判断换行符是否是段落分隔,并相应地进行替换。另一个函数名为“解析PDF”,可以接收一个PDF文件清单,并对清单中的每一个PDF进行解析,提取出文本并调用“clean_text”函数进行清洗和格式化处理,然后向用户发送一个包含文章简介信息的问题并等待用户回答。最后,该脚本也包含一个名为“批量总结PDF文档”的主函数,其中调用了“解析PDF”函数来完成对PDF文件的批量处理。
-
-## [18/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量总结PDF文档pdfminer.py
-
-这个文件是一个Python模块,文件名为pdfminer.py,它定义了一个函数批量总结PDF文档。该函数接受一些参数,然后尝试导入pdfminer和beautifulsoup4库。该函数将读取pdf文件或tex文件中的内容,对其进行分析,并使用GPT模型进行自然语言摘要。文件中还有一个辅助函数readPdf,用于读取pdf文件中的内容。
-
-## [19/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量翻译PDF文档_多线程.py
-
-这是一个Python脚本,文件名是crazy_functions\批量翻译PDF文档_多线程.py。该脚本提供了一个名为“批量翻译PDF文档”的函数,可以批量翻译PDF文件并生成报告文件。该函数使用了多个模块和函数(如toolbox、crazy_utils、update_ui等),使用了Python的异常处理和多线程功能,还使用了一些文本处理函数和第三方库(如fitz和tiktoken)。在函数执行过程中,它会进行一些参数检查、读取和清理PDF文本、递归地切割PDF文件、获取文章meta信息、多线程翻译、整理报告格式等操作,并更新UI界面和生成报告文件。
-
-## [20/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\理解PDF文档内容.py
-
-这是一个解析PDF文件内容的Python程序,程序文件名为"理解PDF文档内容.py",程序主要由5个步骤组成:第0步是切割PDF文件;第1步是从摘要中提取高价值信息,放到history中;第2步是迭代地历遍整个文章,提取精炼信息;第3步是整理history;第4步是设置一个token上限,防止回答时Token溢出。程序主要用到了Python中的各种模块和函数库,如:toolbox, tiktoken, pymupdf等。
-
-## [21/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\生成函数注释.py
-
-这是一个名为"生成函数注释"的函数,带有一个装饰器"@CatchException",可以捕获异常。该函数接受文件路径、参数和聊天机器人等参数,用于对多个Python或C++文件进行函数注释,使用了"toolbox"和"crazy_utils"模块中的函数。该函数会逐个读取指定文件中的内容,并使用聊天机器人进行交互,向用户请求注释信息,然后将生成的注释与原文件内容一起输出到一个markdown表格中。最后,该函数返回一个字符串,指示任务是否已完成。另外还包含一个名为"批量生成函数注释"的函数,它与"生成函数注释"函数一起用于批量处理多个文件。
-
-## [22/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\解析项目源代码.py
-
-这个程序文件实现了对一个源代码项目进行分析的功能。其中,函数`解析项目本身`、`解析一个Python项目`、`解析一个C项目的头文件`、`解析一个C项目`、`解析一个Java项目`和`解析一个Rect项目`分别用于解析不同类型的项目。函数`解析源代码新`实现了对每一个源代码文件的分析,并将分析结果汇总,同时还实现了分组和迭代处理,提高了效率。最后,函数`write_results_to_file`将所有分析结果写入文件。中间,还用到了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`和`request_gpt_model_in_new_thread_with_ui_alive`来完成请求和响应,并用`update_ui`实时更新界面。
-
-## [23/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\询问多个大语言模型.py
-
-这是一个Python程序,文件名为"crazy_functions\询问多个大语言模型.py"。该程序实现了一个同时向多个大语言模型询问的功能,接收用户输入文本以及模型参数,向ChatGPT和ChatGLM模型发出请求,并将对话记录显示在聊天框中,同时刷新界面。
-
-## [24/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\读文章写摘要.py
-
-该程序文件是一个Python模块,文件名为"读文章写摘要.py",主要包含两个函数:"解析Paper"和"读文章写摘要"。其中,"解析Paper"函数接受文件路径、参数等参数,逐个打印文件内容并使用GPT模型生成对该文件的摘要;"读文章写摘要"函数则接受一段文本内容和参数,将该文本内容及其所有.tex文件逐个传递给"解析Paper"函数进行处理,并使用GPT模型生成文章的中英文摘要。文件还导入了一些工具函数,如异常处理、信息上报和文件写入等。
-
-## [25/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\谷歌检索小助手.py
-
-该文件代码包含了一个名为`get_meta_information`的函数和一个名为`谷歌检索小助手`的装饰器函数,用于从谷歌学术中抓取文章元信息,并从用户提供的搜索页面中分析所有文章的相关信息。该文件使用了许多第三方库,如requests、arxiv、BeautifulSoup等。其中`get_meta_information`函数中还定义了一个名为`string_similar`的辅助函数,用于比较字符串相似度。
-
-## [26/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\高级功能函数模板.py
-
-该程序文件是一个 Python 模块,包含一个名为“高阶功能模板函数”的函数。该函数接受多个参数,其中包括输入文本、GPT 模型参数、插件模型参数、聊天显示框、聊天历史等。 该函数的主要功能是根据输入文本,使用 GPT 模型生成一些问题,并等待用户回答这些问题(使用 Markdown 格式),然后将用户回答加入到聊天历史中,并更新聊天显示框。该函数还包含了一些异常处理和多线程的相关操作。该程序文件还引用了另一个 Python 模块中的两个函数,分别为“CatchException”和“update_ui”,并且还引用了一个名为“request_gpt_model_in_new_thread_with_ui_alive”的自定义函数。
-
-## [27/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_all.py
-
-这个文件是用来处理与LLM的交互的。包含两个函数,一个是 predict_no_ui_long_connection 用来处理长文本的输出,可以多线程调用;另一个是 predict 用来处理基础的对话功能。这个文件会导入其他文件中定义的方法进行调用,具体调用哪个方法取决于传入的参数。函数中还有一些装饰器和管理多线程的逻辑。
-
-## [28/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_chatglm.py
-
-这个程序文件实现了一个使用ChatGLM模型进行聊天的功能。具体实现过程是:首先进行初始化,然后使用GetGLMHandle类进行ChatGLM模型的加载和运行。predict_no_ui_long_connection函数用于多线程聊天,而predict函数用于单线程聊天,它们的不同之处在于前者不会更新UI界面,后者会。这个文件还导入了其他模块和库,例如transformers、time、importlib等,并使用了多进程Pipe。
-
-## [29/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_chatgpt.py
-
-这个程序文件是用于对话生成的,主要包含三个函数:predict、predict_no_ui、predict_no_ui_long_connection。其中,predict是用于普通对话的函数,具备完备的交互功能,但不具备多线程能力;predict_no_ui是高级实验性功能模块调用的函数,参数简单,可以多线程并行,方便实现复杂的功能逻辑;predict_no_ui_long_connection解决了predict_no_ui在处理长文档时容易断开连接的问题,同样支持多线程。程序中还包含一些常量和工具函数,用于整合信息,选择LLM模型,生成http请求,发送请求,接收响应等。它需要配置一个config文件,包含代理网址、API等敏感信息。
-
-## [30/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_tgui.py
-
-该程序文件实现了一个基于Websockets的文本生成服务和对话功能。其中,有三个函数:`run()`、`predict()`和`predict_no_ui_long_connection()`。`run()`函数用于连接到Websocket服务并生成文本结果;`predict()`函数用于将用户输入作为文本生成的输入,同时在UI上显示对话历史记录,并在不断更新UI的过程中不断更新生成的文本输出;`predict_no_ui_long_connection()`函数与`predict()`函数类似,但没有UI,并在一段时间内返回单个生成的文本。整个程序还引入了多个Python模块来完成相关功能,例如`asyncio`、`websockets`、`json`等等。
-
-## 根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, theme.py, toolbox.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py, crazy_functions\代码重写为全英文_多线程.py, crazy_functions\总结word文档.py)。
-
-程序功能概括:该程序是一个聊天机器人,可以通过 Web 界面与用户进行交互。它包含了丰富的功能,如文本润色、翻译、代码重写、在线查找等,并且支持多线程处理。用户可以通过 Gradio 框架提供的 Web 界面进行交互,程序还提供了一些调试工具,如toolbox 模块,方便程序开发和调试。
-
-下表概述了每个文件的功能:
-
-| 文件名 | 功能 |
-| ----------------------------------------------------------- | ------------------------------------------------------------ |
-| check_proxy.py | 检查代理是否可用 |
-| colorful.py | 用于打印文本的字体颜色输出模块 |
-| config.py | 用于程序中的各种设置,如并行线程数量和重试次数的限制等 |
-| config_private.py | 配置API_KEY和代理信息的文件 |
-| core_functional.py | 包含具体的文本处理功能的模块 |
-| crazy_functional.py | 包括各种插件函数的模块,提供了多种文本处理功能 |
-| main.py | 包含 Chatbot 机器人主程序的模块 |
-| theme.py | 用于调节全局样式的模块 |
-| toolbox.py | 包含工具函数和装饰器,用于聊天Bot的开发和调试 |
-| crazy_functions\crazy_utils.py | 包含一些辅助函数,如文本裁剪和消息捕捉等 |
-| crazy_functions\Latex全文润色.py | 对 Latex 项目进行润色处理的功能模块 |
-| crazy_functions\Latex全文翻译.py | 对 Latex 项目进行翻译的功能模块 |
-| crazy_functions\__init__.py | 定义一些奇特的数学函数等 |
-| crazy_functions\下载arxiv论文翻译摘要.py | 下载 Arxiv 论文并翻译摘要的功能模块 |
-| crazy_functions\代码重写为全英文_多线程.py | 将Python程序中所有中文转化为英文的功能模块 |
-| crazy_functions\总结word文档.py | 解析 docx 和 doc 格式的文件,生成文章片段的中英文概述的功能模块 |
-
-## 根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, theme.py, toolbox.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py, crazy_functions\代码重写为全英文_多线程.py, crazy_functions\总结word文档.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py, crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_tgui.py)。
-
-根据以上分析,整个程序是一个集成了多个有用工具和功能的文本处理和生成工具,提供了多种在不同场景下使用的功能,包括但不限于对话生成、文本摘要、PDF文件批量处理、代码翻译和实用工具等。主要的Python模块包括"toolbox.py"、"config.py"、"core_functional.py"和"crazy_functional.py"等,并且还使用了许多第三方库和模块实现相关功能。以下是每个程序文件的功能:
-
-| 文件名 | 文件功能 |
-| --- | --- |
-| check_proxy.py | 用于检查代理的正确性和可用性 |
-| colorful.py | 包含不同预设置颜色的常量,并用于多种UI元素 |
-| config.py | 用于全局配置的类 |
-| config_private.py | 与config.py文件一起使用的另一个配置文件,用于更改私密信息 |
-| core_functional.py | 包含一些TextFunctional类和基础功能函数 |
-| crazy_functional.py | 包含大量高级功能函数和实验性的功能函数 |
-| main.py | 程序的主入口,包含GUI主窗口和主要的UI管理功能 |
-| theme.py | 包含一些预设置主题的颜色 |
-| toolbox.py | 提供了一些有用的工具函数 |
-| crazy_functions\crazy_utils.py | 包含一些用于实现高级功能的辅助函数 |
-| crazy_functions\Latex全文润色.py | 实现了对LaTeX文件中全文的润色和格式化功能 |
-| crazy_functions\Latex全文翻译.py | 实现了对LaTeX文件中的内容进行翻译的功能 |
-| crazy_functions\_\_init\_\_.py | 用于导入crazy_functional.py中的功能函数 |
-| crazy_functions\下载arxiv论文翻译摘要.py | 从Arxiv上下载论文并提取重要信息 |
-| crazy_functions\代码重写为全英文_多线程.py | 针对中文Python文件,将其翻译为全英文 |
-| crazy_functions\总结word文档.py | 提取Word文件的重要内容来生成摘要 |
-| crazy_functions\批量Markdown翻译.py | 批量翻译Markdown文件 |
-| crazy_functions\批量总结PDF文档.py | 批量从PDF文件中提取摘要 |
-| crazy_functions\批量总结PDF文档pdfminer.py | 批量从PDF文件中提取摘要 |
-| crazy_functions\批量翻译PDF文档_多线程.py | 批量翻译PDF文件 |
-| crazy_functions\理解PDF文档内容.py | 批量分析PDF文件并提取摘要 |
-| crazy_functions\生成函数注释.py | 自动生成Python文件中函数的注释 |
-| crazy_functions\解析项目源代码.py | 解析并分析给定项目的源代码 |
-| crazy_functions\询问多个大语言模型.py | 向多个大语言模型询问输入文本并进行处理 |
-| crazy_functions\读文献写摘要.py | 根据用户输入读取文献内容并生成摘要 |
-| crazy_functions\谷歌检索小助手.py | 利用谷歌学术检索用户提供的论文信息并提取相关信息 |
-| crazy_functions\高级功能函数模板.py | 实现高级功能的模板函数 |
-| request_llm\bridge_all.py | 处理与LLM的交互 |
-| request_llm\bridge_chatglm.py | 使用ChatGLM模型进行聊天 |
-| request_llm\bridge_chatgpt.py | 实现对话生成的各项功能 |
-| request_llm\bridge_tgui.py | 在Websockets中与用户进行交互并生成文本输出 |
-
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py
deleted file mode 100644
index 22b3eaf0915e4dffcf128fe15abf0ffb30957fc9..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Callable, List, Optional, Union
-
-import torch
-
-from ...models import UNet2DConditionModel, VQModel
-from ...schedulers import DDPMScheduler
-from ...utils import (
- is_accelerate_available,
- is_accelerate_version,
- logging,
- randn_tensor,
- replace_example_docstring,
-)
-from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> import torch
- >>> import numpy as np
-
- >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
- >>> from transformers import pipeline
- >>> from diffusers.utils import load_image
-
-
- >>> def make_hint(image, depth_estimator):
- ... image = depth_estimator(image)["depth"]
- ... image = np.array(image)
- ... image = image[:, :, None]
- ... image = np.concatenate([image, image, image], axis=2)
- ... detected_map = torch.from_numpy(image).float() / 255.0
- ... hint = detected_map.permute(2, 0, 1)
- ... return hint
-
-
- >>> depth_estimator = pipeline("depth-estimation")
-
- >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
- ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
- ... )
- >>> pipe_prior = pipe_prior.to("cuda")
-
- >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
- ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
- ... )
- >>> pipe = pipe.to("cuda")
-
-
- >>> img = load_image(
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- ... "/kandinsky/cat.png"
- ... ).resize((768, 768))
-
- >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
-
- >>> prompt = "A robot, 4k photo"
- >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
-
- >>> generator = torch.Generator(device="cuda").manual_seed(43)
-
- >>> image_emb, zero_image_emb = pipe_prior(
- ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
- ... ).to_tuple()
-
- >>> images = pipe(
- ... image_embeds=image_emb,
- ... negative_image_embeds=zero_image_emb,
- ... hint=hint,
- ... num_inference_steps=50,
- ... generator=generator,
- ... height=768,
- ... width=768,
- ... ).images
-
- >>> images[0].save("robot_cat.png")
- ```
-"""
-
-
-# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width
-def downscale_height_and_width(height, width, scale_factor=8):
- new_height = height // scale_factor**2
- if height % scale_factor**2 != 0:
- new_height += 1
- new_width = width // scale_factor**2
- if width % scale_factor**2 != 0:
- new_width += 1
- return new_height * scale_factor, new_width * scale_factor
-
-
-class KandinskyV22ControlnetPipeline(DiffusionPipeline):
- """
- Pipeline for text-to-image generation using Kandinsky
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- scheduler ([`DDIMScheduler`]):
- A scheduler to be used in combination with `unet` to generate image latents.
- unet ([`UNet2DConditionModel`]):
- Conditional U-Net architecture to denoise the image embedding.
- movq ([`VQModel`]):
- MoVQ Decoder to generate the image from the latents.
- """
-
- def __init__(
- self,
- unet: UNet2DConditionModel,
- scheduler: DDPMScheduler,
- movq: VQModel,
- ):
- super().__init__()
-
- self.register_modules(
- unet=unet,
- scheduler=scheduler,
- movq=movq,
- )
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
-
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- latents = latents * scheduler.init_noise_sigma
- return latents
-
- # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.KandinskyV22Pipeline.enable_model_cpu_offload
- def enable_model_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
- """
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
- from accelerate import cpu_offload_with_hook
- else:
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- if self.device.type != "cpu":
- self.to("cpu", silence_dtype_warnings=True)
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
-
- hook = None
- for cpu_offloaded_model in [self.unet, self.movq]:
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
-
- # We'll offload the last model manually.
- self.final_offload_hook = hook
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
- negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
- hint: torch.FloatTensor,
- height: int = 512,
- width: int = 512,
- num_inference_steps: int = 100,
- guidance_scale: float = 4.0,
- num_images_per_prompt: int = 1,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- return_dict: bool = True,
- ):
- """
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- hint (`torch.FloatTensor`):
- The controlnet condition.
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
- The clip image embeddings for text prompt, that will be used to condition the image generation.
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- height (`int`, *optional*, defaults to 512):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to 512):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 100):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 4.0):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
- (`np.array`) or `"pt"` (`torch.Tensor`).
- callback (`Callable`, *optional*):
- A function that calls every `callback_steps` steps during inference. The function is called with the
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function is called. If not specified, the callback is called at
- every step.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
-
- Examples:
-
- Returns:
- [`~pipelines.ImagePipelineOutput`] or `tuple`
- """
- device = self._execution_device
-
- do_classifier_free_guidance = guidance_scale > 1.0
-
- if isinstance(image_embeds, list):
- image_embeds = torch.cat(image_embeds, dim=0)
- if isinstance(negative_image_embeds, list):
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
- if isinstance(hint, list):
- hint = torch.cat(hint, dim=0)
-
- batch_size = image_embeds.shape[0] * num_images_per_prompt
-
- if do_classifier_free_guidance:
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
- hint = hint.repeat_interleave(num_images_per_prompt, dim=0)
-
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
- dtype=self.unet.dtype, device=device
- )
- hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device)
-
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps_tensor = self.scheduler.timesteps
-
- num_channels_latents = self.movq.config.latent_channels
-
- height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
-
- # create initial latent
- latents = self.prepare_latents(
- (batch_size, num_channels_latents, height, width),
- image_embeds.dtype,
- device,
- generator,
- latents,
- self.scheduler,
- )
-
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
-
- added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint}
- noise_pred = self.unet(
- sample=latent_model_input,
- timestep=t,
- encoder_hidden_states=None,
- added_cond_kwargs=added_cond_kwargs,
- return_dict=False,
- )[0]
-
- if do_classifier_free_guidance:
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- _, variance_pred_text = variance_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
-
- if not (
- hasattr(self.scheduler.config, "variance_type")
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
- ):
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(
- noise_pred,
- t,
- latents,
- generator=generator,
- )[0]
-
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
- # post-processing
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
-
- # Offload last model to CPU
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.final_offload_hook.offload()
-
- if output_type not in ["pt", "np", "pil"]:
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
-
- if output_type in ["np", "pil"]:
- image = image * 0.5 + 0.5
- image = image.clamp(0, 1)
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
-
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_utils.py
deleted file mode 100644
index 0f95beb022ac042b6e1ef588a72365b2623338de..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_utils.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import importlib
-import os
-from dataclasses import dataclass
-from enum import Enum
-from typing import Any, Dict, Optional, Union
-
-import torch
-
-from ..utils import BaseOutput
-
-
-SCHEDULER_CONFIG_NAME = "scheduler_config.json"
-
-
-# NOTE: We make this type an enum because it simplifies usage in docs and prevents
-# circular imports when used for `_compatibles` within the schedulers module.
-# When it's used as a type in pipelines, it really is a Union because the actual
-# scheduler instance is passed in.
-class KarrasDiffusionSchedulers(Enum):
- DDIMScheduler = 1
- DDPMScheduler = 2
- PNDMScheduler = 3
- LMSDiscreteScheduler = 4
- EulerDiscreteScheduler = 5
- HeunDiscreteScheduler = 6
- EulerAncestralDiscreteScheduler = 7
- DPMSolverMultistepScheduler = 8
- DPMSolverSinglestepScheduler = 9
- KDPM2DiscreteScheduler = 10
- KDPM2AncestralDiscreteScheduler = 11
- DEISMultistepScheduler = 12
- UniPCMultistepScheduler = 13
- DPMSolverSDEScheduler = 14
-
-
-@dataclass
-class SchedulerOutput(BaseOutput):
- """
- Base class for the scheduler's step function output.
-
- Args:
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
- denoising loop.
- """
-
- prev_sample: torch.FloatTensor
-
-
-class SchedulerMixin:
- """
- Mixin containing common functions for the schedulers.
-
- Class attributes:
- - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that
- `from_config` can be used from a class different than the one used to save the config (should be overridden
- by parent class).
- """
-
- config_name = SCHEDULER_CONFIG_NAME
- _compatibles = []
- has_compatibles = True
-
- @classmethod
- def from_pretrained(
- cls,
- pretrained_model_name_or_path: Dict[str, Any] = None,
- subfolder: Optional[str] = None,
- return_unused_kwargs=False,
- **kwargs,
- ):
- r"""
- Instantiate a Scheduler class from a pre-defined JSON configuration file inside a directory or Hub repo.
-
- Parameters:
- pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
- Can be either:
-
- - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an
- organization name, like `google/ddpm-celebahq-256`.
- - A path to a *directory* containing the schedluer configurations saved using
- [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`.
- subfolder (`str`, *optional*):
- In case the relevant files are located inside a subfolder of the model repo (either remote in
- huggingface.co or downloaded locally), you can specify the folder name here.
- return_unused_kwargs (`bool`, *optional*, defaults to `False`):
- Whether kwargs that are not consumed by the Python class should be returned or not.
- cache_dir (`Union[str, os.PathLike]`, *optional*):
- Path to a directory in which a downloaded pretrained model configuration should be cached if the
- standard cache should not be used.
- force_download (`bool`, *optional*, defaults to `False`):
- Whether or not to force the (re-)download of the model weights and configuration files, overriding the
- cached versions if they exist.
- resume_download (`bool`, *optional*, defaults to `False`):
- Whether or not to delete incompletely received files. Will attempt to resume the download if such a
- file exists.
- proxies (`Dict[str, str]`, *optional*):
- A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
- 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
- output_loading_info(`bool`, *optional*, defaults to `False`):
- Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
- local_files_only(`bool`, *optional*, defaults to `False`):
- Whether or not to only look at local files (i.e., do not try to download the model).
- use_auth_token (`str` or *bool*, *optional*):
- The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
- when running `transformers-cli login` (stored in `~/.huggingface`).
- revision (`str`, *optional*, defaults to `"main"`):
- The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
- git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
- identifier allowed by git.
-
-
-
- It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
- models](https://huggingface.co/docs/hub/models-gated#gated-models).
-
-
-
-
-
- Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
- use this method in a firewalled environment.
-
-
-
- """
- config, kwargs, commit_hash = cls.load_config(
- pretrained_model_name_or_path=pretrained_model_name_or_path,
- subfolder=subfolder,
- return_unused_kwargs=True,
- return_commit_hash=True,
- **kwargs,
- )
- return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)
-
- def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
- """
- Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the
- [`~SchedulerMixin.from_pretrained`] class method.
-
- Args:
- save_directory (`str` or `os.PathLike`):
- Directory where the configuration JSON file will be saved (will be created if it does not exist).
- """
- self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)
-
- @property
- def compatibles(self):
- """
- Returns all schedulers that are compatible with this scheduler
-
- Returns:
- `List[SchedulerMixin]`: List of compatible schedulers
- """
- return self._get_compatibles()
-
- @classmethod
- def _get_compatibles(cls):
- compatible_classes_str = list(set([cls.__name__] + cls._compatibles))
- diffusers_library = importlib.import_module(__name__.split(".")[0])
- compatible_classes = [
- getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)
- ]
- return compatible_classes
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/coco_instance.py b/spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/coco_instance.py
deleted file mode 100644
index f6ea4f4562a8118275a444879a884717b55caa15..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/_base_/datasets/coco_instance.py
+++ /dev/null
@@ -1,48 +0,0 @@
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_train2017.json',
- img_prefix=data_root + 'train2017/',
- pipeline=train_pipeline),
- val=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- ann_file=data_root + 'annotations/instances_val2017.json',
- img_prefix=data_root + 'val2017/',
- pipeline=test_pipeline))
-evaluation = dict(metric=['bbox', 'segm'])
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py b/spaces/Andy1621/uniformer_image_detection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
deleted file mode 100644
index 5b17451d78cfd5039f57af707acbcee50836e738..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
+++ /dev/null
@@ -1,39 +0,0 @@
-_base_ = [
- '../_base_/models/faster_rcnn_r50_fpn.py',
- '../_base_/datasets/cityscapes_detection.py',
- '../_base_/default_runtime.py'
-]
-model = dict(
- pretrained=None,
- roi_head=dict(
- bbox_head=dict(
- type='Shared2FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=8,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
-# optimizer
-# lr is set for a batch size of 8
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- # [7] yields higher performance than [6]
- step=[7])
-runner = dict(
- type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
-log_config = dict(interval=100)
-# For better, more stable performance initialize from COCO
-load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py
deleted file mode 100644
index b966f24969a60b95878b0b86bb8dae7b8cb3f1ae..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py
+++ /dev/null
@@ -1,13 +0,0 @@
-_base_ = './fsaf_r50_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://resnext101_64x4d',
- backbone=dict(
- type='ResNeXt',
- depth=101,
- groups=64,
- base_width=4,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- style='pytorch'))
diff --git a/spaces/Andy1621/uniformer_image_detection/mmcv_custom/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmcv_custom/__init__.py
deleted file mode 100644
index 7e0e39b03e2a149c33c372472b2b814a872ec55c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmcv_custom/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from .checkpoint import load_checkpoint
-
-__all__ = ['load_checkpoint']
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py
deleted file mode 100644
index 7243d0390f6394fdd528c881bb128b2c13d08037..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py
+++ /dev/null
@@ -1,5 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3plus_r50-d8.py',
- '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_40k.py'
-]
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py
deleted file mode 100644
index f30646ede7b036e6c82c335729b19f92293efb35..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50-d16_512x1024_40k_cityscapes.py
+++ /dev/null
@@ -1,8 +0,0 @@
-_base_ = [
- '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/cityscapes.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
-]
-model = dict(
- backbone=dict(dilations=(1, 1, 1, 2), strides=(1, 2, 2, 1)),
- decode_head=dict(dilation=6),
- auxiliary_head=dict(dilation=6))
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/extensions.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/extensions.py
deleted file mode 100644
index 6c0725043e83ac24220bc9bcec2255a1a9247795..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/extensions.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import traceback
-from functools import partial
-from inspect import signature
-
-import gradio as gr
-
-import extensions
-import modules.shared as shared
-from modules.logging_colors import logger
-
-state = {}
-available_extensions = []
-setup_called = set()
-
-
-def apply_settings(extension, name):
- if not hasattr(extension, 'params'):
- return
-
- for param in extension.params:
- _id = f"{name}-{param}"
- if _id not in shared.settings:
- continue
-
- extension.params[param] = shared.settings[_id]
-
-
-def load_extensions():
- global state, setup_called
- state = {}
- for i, name in enumerate(shared.args.extensions):
- if name in available_extensions:
- if name != 'api':
- logger.info(f'Loading the extension "{name}"...')
- try:
- exec(f"import extensions.{name}.script")
- extension = getattr(extensions, name).script
- apply_settings(extension, name)
- if extension not in setup_called and hasattr(extension, "setup"):
- setup_called.add(extension)
- extension.setup()
-
- state[name] = [True, i]
- except:
- logger.error(f'Failed to load the extension "{name}".')
- traceback.print_exc()
-
-
-# This iterator returns the extensions in the order specified in the command-line
-def iterator():
- for name in sorted(state, key=lambda x: state[x][1]):
- if state[name][0]:
- yield getattr(extensions, name).script, name
-
-
-# Extension functions that map string -> string
-def _apply_string_extensions(function_name, text, state, is_chat=False):
- for extension, _ in iterator():
- if hasattr(extension, function_name):
- func = getattr(extension, function_name)
-
- # Handle old extensions without the 'state' arg or
- # the 'is_chat' kwarg
- count = 0
- has_chat = False
- for k in signature(func).parameters:
- if k == 'is_chat':
- has_chat = True
- else:
- count += 1
-
- if count == 2:
- args = [text, state]
- else:
- args = [text]
-
- if has_chat:
- kwargs = {'is_chat': is_chat}
- else:
- kwargs = {}
-
- text = func(*args, **kwargs)
-
- return text
-
-
-# Extension functions that map string -> string
-def _apply_chat_input_extensions(text, visible_text, state):
- for extension, _ in iterator():
- if hasattr(extension, 'chat_input_modifier'):
- text, visible_text = extension.chat_input_modifier(text, visible_text, state)
-
- return text, visible_text
-
-
-# custom_generate_chat_prompt handling - currently only the first one will work
-def _apply_custom_generate_chat_prompt(text, state, **kwargs):
- for extension, _ in iterator():
- if hasattr(extension, 'custom_generate_chat_prompt'):
- return extension.custom_generate_chat_prompt(text, state, **kwargs)
-
- return None
-
-
-# Extension that modifies the input parameters before they are used
-def _apply_state_modifier_extensions(state):
- for extension, _ in iterator():
- if hasattr(extension, "state_modifier"):
- state = getattr(extension, "state_modifier")(state)
-
- return state
-
-
-# Extension that modifies the chat history before it is used
-def _apply_history_modifier_extensions(history):
- for extension, _ in iterator():
- if hasattr(extension, "history_modifier"):
- history = getattr(extension, "history_modifier")(history)
-
- return history
-
-
-# Extension functions that override the default tokenizer output - The order of execution is not defined
-def _apply_tokenizer_extensions(function_name, state, prompt, input_ids, input_embeds):
- for extension, _ in iterator():
- if hasattr(extension, function_name):
- prompt, input_ids, input_embeds = getattr(extension, function_name)(state, prompt, input_ids, input_embeds)
-
- return prompt, input_ids, input_embeds
-
-
-# Allow extensions to add their own logits processors to the stack being run.
-# Each extension would call `processor_list.append({their LogitsProcessor}())`.
-def _apply_logits_processor_extensions(function_name, processor_list, input_ids):
- for extension, _ in iterator():
- if hasattr(extension, function_name):
- result = getattr(extension, function_name)(processor_list, input_ids)
- if type(result) is list:
- processor_list = result
-
- return processor_list
-
-
-# Get prompt length in tokens after applying extension functions which override the default tokenizer output
-# currently only the first one will work
-def _apply_custom_tokenized_length(prompt):
- for extension, _ in iterator():
- if hasattr(extension, 'custom_tokenized_length'):
- return getattr(extension, 'custom_tokenized_length')(prompt)
-
- return None
-
-
-# Custom generate reply handling - currently only the first one will work
-def _apply_custom_generate_reply():
- for extension, _ in iterator():
- if hasattr(extension, 'custom_generate_reply'):
- return getattr(extension, 'custom_generate_reply')
-
- return None
-
-
-def _apply_custom_css():
- all_css = ''
- for extension, _ in iterator():
- if hasattr(extension, 'custom_css'):
- all_css += getattr(extension, 'custom_css')()
-
- return all_css
-
-
-def _apply_custom_js():
- all_js = ''
- for extension, _ in iterator():
- if hasattr(extension, 'custom_js'):
- all_js += getattr(extension, 'custom_js')()
-
- return all_js
-
-
-def create_extensions_block():
- to_display = []
- for extension, name in iterator():
- if hasattr(extension, "ui") and not (hasattr(extension, 'params') and extension.params.get('is_tab', False)):
- to_display.append((extension, name))
-
- # Creating the extension ui elements
- if len(to_display) > 0:
- with gr.Column(elem_id="extensions"):
- for row in to_display:
- extension, _ = row
- extension.ui()
-
-
-def create_extensions_tabs():
- for extension, name in iterator():
- if hasattr(extension, "ui") and (hasattr(extension, 'params') and extension.params.get('is_tab', False)):
- display_name = getattr(extension, 'params', {}).get('display_name', name)
- with gr.Tab(display_name, elem_classes="extension-tab"):
- extension.ui()
-
-
-EXTENSION_MAP = {
- "input": partial(_apply_string_extensions, "input_modifier"),
- "output": partial(_apply_string_extensions, "output_modifier"),
- "chat_input": _apply_chat_input_extensions,
- "state": _apply_state_modifier_extensions,
- "history": _apply_history_modifier_extensions,
- "bot_prefix": partial(_apply_string_extensions, "bot_prefix_modifier"),
- "tokenizer": partial(_apply_tokenizer_extensions, "tokenizer_modifier"),
- 'logits_processor': partial(_apply_logits_processor_extensions, 'logits_processor_modifier'),
- "custom_generate_chat_prompt": _apply_custom_generate_chat_prompt,
- "custom_generate_reply": _apply_custom_generate_reply,
- "tokenized_length": _apply_custom_tokenized_length,
- "css": _apply_custom_css,
- "js": _apply_custom_js
-}
-
-
-def apply_extensions(typ, *args, **kwargs):
- if typ not in EXTENSION_MAP:
- raise ValueError(f"Invalid extension type {typ}")
-
- return EXTENSION_MAP[typ](*args, **kwargs)
diff --git a/spaces/AriaMei/TTSdemo/monotonic_align/__init__.py b/spaces/AriaMei/TTSdemo/monotonic_align/__init__.py
deleted file mode 100644
index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000
--- a/spaces/AriaMei/TTSdemo/monotonic_align/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import numpy as np
-import torch
-from .monotonic_align.core import maximum_path_c
-
-
-def maximum_path(neg_cent, mask):
- """ Cython optimized version.
- neg_cent: [b, t_t, t_s]
- mask: [b, t_t, t_s]
- """
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
- path = np.zeros(neg_cent.shape, dtype=np.int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
- return torch.from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/Ariharasudhan/YoloV5/utils/downloads.py b/spaces/Ariharasudhan/YoloV5/utils/downloads.py
deleted file mode 100644
index 21bb6608d5bac031ece90054c85caba5886de5ed..0000000000000000000000000000000000000000
--- a/spaces/Ariharasudhan/YoloV5/utils/downloads.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Download utils
-"""
-
-import logging
-import os
-import subprocess
-import urllib
-from pathlib import Path
-
-import requests
-import torch
-
-
-def is_url(url, check=True):
- # Check if string is URL and check if URL exists
- try:
- url = str(url)
- result = urllib.parse.urlparse(url)
- assert all([result.scheme, result.netloc]) # check if is url
- return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online
- except (AssertionError, urllib.request.HTTPError):
- return False
-
-
-def gsutil_getsize(url=''):
- # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
- s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
- return eval(s.split(' ')[0]) if len(s) else 0 # bytes
-
-
-def url_getsize(url='https://ultralytics.com/images/bus.jpg'):
- # Return downloadable file size in bytes
- response = requests.head(url, allow_redirects=True)
- return int(response.headers.get('content-length', -1))
-
-
-def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
- # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
- from utils.general import LOGGER
-
- file = Path(file)
- assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
- try: # url1
- LOGGER.info(f'Downloading {url} to {file}...')
- torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)
- assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
- except Exception as e: # url2
- if file.exists():
- file.unlink() # remove partial downloads
- LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
- os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
- finally:
- if not file.exists() or file.stat().st_size < min_bytes: # check
- if file.exists():
- file.unlink() # remove partial downloads
- LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}")
- LOGGER.info('')
-
-
-def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'):
- # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc.
- from utils.general import LOGGER
-
- def github_assets(repository, version='latest'):
- # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...])
- if version != 'latest':
- version = f'tags/{version}' # i.e. tags/v6.2
- response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api
- return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets
-
- file = Path(str(file).strip().replace("'", ''))
- if not file.exists():
- # URL specified
- name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
- if str(file).startswith(('http:/', 'https:/')): # download
- url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
- file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
- if Path(file).is_file():
- LOGGER.info(f'Found {url} locally at {file}') # file already exists
- else:
- safe_download(file=file, url=url, min_bytes=1E5)
- return file
-
- # GitHub assets
- assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default
- try:
- tag, assets = github_assets(repo, release)
- except Exception:
- try:
- tag, assets = github_assets(repo) # latest release
- except Exception:
- try:
- tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
- except Exception:
- tag = release
-
- file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
- if name in assets:
- url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror
- safe_download(
- file,
- url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
- min_bytes=1E5,
- error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}')
-
- return str(file)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py
deleted file mode 100644
index 3341a3826858e8623fade6da45a83f031b735ab8..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py
+++ /dev/null
@@ -1,1204 +0,0 @@
-"""
- pygments.lexers.python
- ~~~~~~~~~~~~~~~~~~~~~~
-
- Lexers for Python and related languages.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import re
-import keyword
-
-from pip._vendor.pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
- default, words, combined, do_insertions, this, line_re
-from pip._vendor.pygments.util import get_bool_opt, shebang_matches
-from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Generic, Other, Error, Whitespace
-from pip._vendor.pygments import unistring as uni
-
-__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
- 'Python2Lexer', 'Python2TracebackLexer',
- 'CythonLexer', 'DgLexer', 'NumPyLexer']
-
-
-class PythonLexer(RegexLexer):
- """
- For Python source code (version 3.x).
-
- .. versionadded:: 0.10
-
- .. versionchanged:: 2.5
- This is now the default ``PythonLexer``. It is still available as the
- alias ``Python3Lexer``.
- """
-
- name = 'Python'
- url = 'http://www.python.org'
- aliases = ['python', 'py', 'sage', 'python3', 'py3']
- filenames = [
- '*.py',
- '*.pyw',
- # Type stubs
- '*.pyi',
- # Jython
- '*.jy',
- # Sage
- '*.sage',
- # SCons
- '*.sc',
- 'SConstruct',
- 'SConscript',
- # Skylark/Starlark (used by Bazel, Buck, and Pants)
- '*.bzl',
- 'BUCK',
- 'BUILD',
- 'BUILD.bazel',
- 'WORKSPACE',
- # Twisted Application infrastructure
- '*.tac',
- ]
- mimetypes = ['text/x-python', 'application/x-python',
- 'text/x-python3', 'application/x-python3']
-
- uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
-
- def innerstring_rules(ttype):
- return [
- # the old style '%s' % (...) string formatting (still valid in Py3)
- (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
- '[hlL]?[E-GXc-giorsaux%]', String.Interpol),
- # the new style '{}'.format(...) string formatting
- (r'\{'
- r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
- r'(\![sra])?' # conversion
- r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
- r'\}', String.Interpol),
-
- # backslashes, quotes and formatting signs must be parsed one at a time
- (r'[^\\\'"%{\n]+', ttype),
- (r'[\'"\\]', ttype),
- # unhandled string formatting sign
- (r'%|(\{{1,2})', ttype)
- # newlines are an error (use "nl" state)
- ]
-
- def fstring_rules(ttype):
- return [
- # Assuming that a '}' is the closing brace after format specifier.
- # Sadly, this means that we won't detect syntax error. But it's
- # more important to parse correct syntax correctly, than to
- # highlight invalid syntax.
- (r'\}', String.Interpol),
- (r'\{', String.Interpol, 'expr-inside-fstring'),
- # backslashes, quotes and formatting signs must be parsed one at a time
- (r'[^\\\'"{}\n]+', ttype),
- (r'[\'"\\]', ttype),
- # newlines are an error (use "nl" state)
- ]
-
- tokens = {
- 'root': [
- (r'\n', Whitespace),
- (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
- bygroups(Whitespace, String.Affix, String.Doc)),
- (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
- bygroups(Whitespace, String.Affix, String.Doc)),
- (r'\A#!.+$', Comment.Hashbang),
- (r'#.*$', Comment.Single),
- (r'\\\n', Text),
- (r'\\', Text),
- include('keywords'),
- include('soft-keywords'),
- (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
- (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
- (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
- 'fromimport'),
- (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
- 'import'),
- include('expr'),
- ],
- 'expr': [
- # raw f-strings
- ('(?i)(rf|fr)(""")',
- bygroups(String.Affix, String.Double),
- combined('rfstringescape', 'tdqf')),
- ("(?i)(rf|fr)(''')",
- bygroups(String.Affix, String.Single),
- combined('rfstringescape', 'tsqf')),
- ('(?i)(rf|fr)(")',
- bygroups(String.Affix, String.Double),
- combined('rfstringescape', 'dqf')),
- ("(?i)(rf|fr)(')",
- bygroups(String.Affix, String.Single),
- combined('rfstringescape', 'sqf')),
- # non-raw f-strings
- ('([fF])(""")', bygroups(String.Affix, String.Double),
- combined('fstringescape', 'tdqf')),
- ("([fF])(''')", bygroups(String.Affix, String.Single),
- combined('fstringescape', 'tsqf')),
- ('([fF])(")', bygroups(String.Affix, String.Double),
- combined('fstringescape', 'dqf')),
- ("([fF])(')", bygroups(String.Affix, String.Single),
- combined('fstringescape', 'sqf')),
- # raw bytes and strings
- ('(?i)(rb|br|r)(""")',
- bygroups(String.Affix, String.Double), 'tdqs'),
- ("(?i)(rb|br|r)(''')",
- bygroups(String.Affix, String.Single), 'tsqs'),
- ('(?i)(rb|br|r)(")',
- bygroups(String.Affix, String.Double), 'dqs'),
- ("(?i)(rb|br|r)(')",
- bygroups(String.Affix, String.Single), 'sqs'),
- # non-raw strings
- ('([uU]?)(""")', bygroups(String.Affix, String.Double),
- combined('stringescape', 'tdqs')),
- ("([uU]?)(''')", bygroups(String.Affix, String.Single),
- combined('stringescape', 'tsqs')),
- ('([uU]?)(")', bygroups(String.Affix, String.Double),
- combined('stringescape', 'dqs')),
- ("([uU]?)(')", bygroups(String.Affix, String.Single),
- combined('stringescape', 'sqs')),
- # non-raw bytes
- ('([bB])(""")', bygroups(String.Affix, String.Double),
- combined('bytesescape', 'tdqs')),
- ("([bB])(''')", bygroups(String.Affix, String.Single),
- combined('bytesescape', 'tsqs')),
- ('([bB])(")', bygroups(String.Affix, String.Double),
- combined('bytesescape', 'dqs')),
- ("([bB])(')", bygroups(String.Affix, String.Single),
- combined('bytesescape', 'sqs')),
-
- (r'[^\S\n]+', Text),
- include('numbers'),
- (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
- (r'[]{}:(),;[]', Punctuation),
- (r'(in|is|and|or|not)\b', Operator.Word),
- include('expr-keywords'),
- include('builtins'),
- include('magicfuncs'),
- include('magicvars'),
- include('name'),
- ],
- 'expr-inside-fstring': [
- (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
- # without format specifier
- (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
- r'(\![sraf])?' # conversion
- r'\}', String.Interpol, '#pop'),
- # with format specifier
- # we'll catch the remaining '}' in the outer scope
- (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
- r'(\![sraf])?' # conversion
- r':', String.Interpol, '#pop'),
- (r'\s+', Whitespace), # allow new lines
- include('expr'),
- ],
- 'expr-inside-fstring-inner': [
- (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
- (r'[])}]', Punctuation, '#pop'),
- (r'\s+', Whitespace), # allow new lines
- include('expr'),
- ],
- 'expr-keywords': [
- # Based on https://docs.python.org/3/reference/expressions.html
- (words((
- 'async for', 'await', 'else', 'for', 'if', 'lambda',
- 'yield', 'yield from'), suffix=r'\b'),
- Keyword),
- (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
- ],
- 'keywords': [
- (words((
- 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
- 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
- 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
- 'yield from', 'as', 'with'), suffix=r'\b'),
- Keyword),
- (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
- ],
- 'soft-keywords': [
- # `match`, `case` and `_` soft keywords
- (r'(^[ \t]*)' # at beginning of line + possible indentation
- r'(match|case)\b' # a possible keyword
- r'(?![ \t]*(?:' # not followed by...
- r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
- r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
- bygroups(Text, Keyword), 'soft-keywords-inner'),
- ],
- 'soft-keywords-inner': [
- # optional `_` keyword
- (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
- default('#pop')
- ],
- 'builtins': [
- (words((
- '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray',
- 'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex',
- 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter',
- 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr',
- 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass',
- 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview',
- 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print',
- 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
- 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple',
- 'type', 'vars', 'zip'), prefix=r'(?>|[-~+/*%=<>&^|.]', Operator),
- include('keywords'),
- (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
- (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
- (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
- 'fromimport'),
- (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
- 'import'),
- include('builtins'),
- include('magicfuncs'),
- include('magicvars'),
- include('backtick'),
- ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
- bygroups(String.Affix, String.Double), 'tdqs'),
- ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
- bygroups(String.Affix, String.Single), 'tsqs'),
- ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
- bygroups(String.Affix, String.Double), 'dqs'),
- ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
- bygroups(String.Affix, String.Single), 'sqs'),
- ('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
- combined('stringescape', 'tdqs')),
- ("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
- combined('stringescape', 'tsqs')),
- ('([uUbB]?)(")', bygroups(String.Affix, String.Double),
- combined('stringescape', 'dqs')),
- ("([uUbB]?)(')", bygroups(String.Affix, String.Single),
- combined('stringescape', 'sqs')),
- include('name'),
- include('numbers'),
- ],
- 'keywords': [
- (words((
- 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
- 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
- 'print', 'raise', 'return', 'try', 'while', 'yield',
- 'yield from', 'as', 'with'), suffix=r'\b'),
- Keyword),
- ],
- 'builtins': [
- (words((
- '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
- 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
- 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
- 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
- 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
- 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
- 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
- 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
- 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
- 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
- 'unichr', 'unicode', 'vars', 'xrange', 'zip'),
- prefix=r'(?>> a = 'foo'
- >>> print a
- foo
- >>> 1 / 0
- Traceback (most recent call last):
- File "", line 1, in
- ZeroDivisionError: integer division or modulo by zero
-
- Additional options:
-
- `python3`
- Use Python 3 lexer for code. Default is ``True``.
-
- .. versionadded:: 1.0
- .. versionchanged:: 2.5
- Now defaults to ``True``.
- """
- name = 'Python console session'
- aliases = ['pycon']
- mimetypes = ['text/x-python-doctest']
-
- def __init__(self, **options):
- self.python3 = get_bool_opt(options, 'python3', True)
- Lexer.__init__(self, **options)
-
- def get_tokens_unprocessed(self, text):
- if self.python3:
- pylexer = PythonLexer(**self.options)
- tblexer = PythonTracebackLexer(**self.options)
- else:
- pylexer = Python2Lexer(**self.options)
- tblexer = Python2TracebackLexer(**self.options)
-
- curcode = ''
- insertions = []
- curtb = ''
- tbindex = 0
- tb = 0
- for match in line_re.finditer(text):
- line = match.group()
- if line.startswith('>>> ') or line.startswith('... '):
- tb = 0
- insertions.append((len(curcode),
- [(0, Generic.Prompt, line[:4])]))
- curcode += line[4:]
- elif line.rstrip() == '...' and not tb:
- # only a new >>> prompt can end an exception block
- # otherwise an ellipsis in place of the traceback frames
- # will be mishandled
- insertions.append((len(curcode),
- [(0, Generic.Prompt, '...')]))
- curcode += line[3:]
- else:
- if curcode:
- yield from do_insertions(
- insertions, pylexer.get_tokens_unprocessed(curcode))
- curcode = ''
- insertions = []
- if (line.startswith('Traceback (most recent call last):') or
- re.match(' File "[^"]+", line \\d+\\n$', line)):
- tb = 1
- curtb = line
- tbindex = match.start()
- elif line == 'KeyboardInterrupt\n':
- yield match.start(), Name.Class, line
- elif tb:
- curtb += line
- if not (line.startswith(' ') or line.strip() == '...'):
- tb = 0
- for i, t, v in tblexer.get_tokens_unprocessed(curtb):
- yield tbindex+i, t, v
- curtb = ''
- else:
- yield match.start(), Generic.Output, line
- if curcode:
- yield from do_insertions(insertions,
- pylexer.get_tokens_unprocessed(curcode))
- if curtb:
- for i, t, v in tblexer.get_tokens_unprocessed(curtb):
- yield tbindex+i, t, v
-
-
-class PythonTracebackLexer(RegexLexer):
- """
- For Python 3.x tracebacks, with support for chained exceptions.
-
- .. versionadded:: 1.0
-
- .. versionchanged:: 2.5
- This is now the default ``PythonTracebackLexer``. It is still available
- as the alias ``Python3TracebackLexer``.
- """
-
- name = 'Python Traceback'
- aliases = ['pytb', 'py3tb']
- filenames = ['*.pytb', '*.py3tb']
- mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
-
- tokens = {
- 'root': [
- (r'\n', Whitespace),
- (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
- (r'^During handling of the above exception, another '
- r'exception occurred:\n\n', Generic.Traceback),
- (r'^The above exception was the direct cause of the '
- r'following exception:\n\n', Generic.Traceback),
- (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
- (r'^.*\n', Other),
- ],
- 'intb': [
- (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
- bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
- (r'^( File )("[^"]+")(, line )(\d+)(\n)',
- bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
- (r'^( )(.+)(\n)',
- bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
- (r'^([ \t]*)(\.\.\.)(\n)',
- bygroups(Whitespace, Comment, Whitespace)), # for doctests...
- (r'^([^:]+)(: )(.+)(\n)',
- bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
- (r'^([a-zA-Z_][\w.]*)(:?\n)',
- bygroups(Generic.Error, Whitespace), '#pop')
- ],
- 'markers': [
- # Either `PEP 657 `
- # error locations in Python 3.11+, or single-caret markers
- # for syntax errors before that.
- (r'^( {4,})([~^]+)(\n)',
- bygroups(Whitespace, Punctuation.Marker, Whitespace),
- '#pop'),
- default('#pop'),
- ],
- }
-
-
-Python3TracebackLexer = PythonTracebackLexer
-
-
-class Python2TracebackLexer(RegexLexer):
- """
- For Python tracebacks.
-
- .. versionadded:: 0.7
-
- .. versionchanged:: 2.5
- This class has been renamed from ``PythonTracebackLexer``.
- ``PythonTracebackLexer`` now refers to the Python 3 variant.
- """
-
- name = 'Python 2.x Traceback'
- aliases = ['py2tb']
- filenames = ['*.py2tb']
- mimetypes = ['text/x-python2-traceback']
-
- tokens = {
- 'root': [
- # Cover both (most recent call last) and (innermost last)
- # The optional ^C allows us to catch keyboard interrupt signals.
- (r'^(\^C)?(Traceback.*\n)',
- bygroups(Text, Generic.Traceback), 'intb'),
- # SyntaxError starts with this.
- (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
- (r'^.*\n', Other),
- ],
- 'intb': [
- (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
- bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
- (r'^( File )("[^"]+")(, line )(\d+)(\n)',
- bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
- (r'^( )(.+)(\n)',
- bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
- (r'^([ \t]*)(\.\.\.)(\n)',
- bygroups(Text, Comment, Whitespace)), # for doctests...
- (r'^([^:]+)(: )(.+)(\n)',
- bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
- (r'^([a-zA-Z_]\w*)(:?\n)',
- bygroups(Generic.Error, Whitespace), '#pop')
- ],
- 'marker': [
- # For syntax errors.
- (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
- default('#pop'),
- ],
- }
-
-
-class CythonLexer(RegexLexer):
- """
- For Pyrex and Cython source code.
-
- .. versionadded:: 1.1
- """
-
- name = 'Cython'
- url = 'http://cython.org'
- aliases = ['cython', 'pyx', 'pyrex']
- filenames = ['*.pyx', '*.pxd', '*.pxi']
- mimetypes = ['text/x-cython', 'application/x-cython']
-
- tokens = {
- 'root': [
- (r'\n', Whitespace),
- (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
- (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
- (r'[^\S\n]+', Text),
- (r'#.*$', Comment),
- (r'[]{}:(),;[]', Punctuation),
- (r'\\\n', Whitespace),
- (r'\\', Text),
- (r'(in|is|and|or|not)\b', Operator.Word),
- (r'(<)([a-zA-Z0-9.?]+)(>)',
- bygroups(Punctuation, Keyword.Type, Punctuation)),
- (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
- (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
- bygroups(Keyword, Number.Integer, Operator, Name, Operator,
- Name, Punctuation)),
- include('keywords'),
- (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
- (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
- # (should actually start a block with only cdefs)
- (r'(cdef)(:)', bygroups(Keyword, Punctuation)),
- (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
- (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
- (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
- include('builtins'),
- include('backtick'),
- ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
- ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
- ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
- ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
- ('[uU]?"""', String, combined('stringescape', 'tdqs')),
- ("[uU]?'''", String, combined('stringescape', 'tsqs')),
- ('[uU]?"', String, combined('stringescape', 'dqs')),
- ("[uU]?'", String, combined('stringescape', 'sqs')),
- include('name'),
- include('numbers'),
- ],
- 'keywords': [
- (words((
- 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
- 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
- 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
- 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
- Keyword),
- (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
- ],
- 'builtins': [
- (words((
- '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
- 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
- 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
- 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
- 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
- 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
- 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
- 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
- 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
- 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
- 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
- 'vars', 'xrange', 'zip'), prefix=r'(?> /etc/sudoers
-USER appuser
-WORKDIR /home/appuser
-
-ENV PATH="/home/appuser/.local/bin:${PATH}"
-RUN wget https://bootstrap.pypa.io/get-pip.py && \
- python3 get-pip.py --user && \
- rm get-pip.py
-
-# install dependencies
-# See https://pytorch.org/ for other options if you use a different version of CUDA
-RUN pip install --user tensorboard cmake # cmake from apt-get is too old
-RUN pip install --user torch==1.10 torchvision==0.11.1 -f https://download.pytorch.org/whl/cu111/torch_stable.html
-
-RUN pip install --user 'git+https://github.com/facebookresearch/fvcore'
-# install detectron2
-RUN git clone https://github.com/facebookresearch/detectron2 detectron2_repo
-# set FORCE_CUDA because during `docker build` cuda is not accessible
-ENV FORCE_CUDA="1"
-# This will by default build detectron2 for all common cuda architectures and take a lot more time,
-# because inside `docker build`, there is no way to tell which architecture will be used.
-ARG TORCH_CUDA_ARCH_LIST="Kepler;Kepler+Tesla;Maxwell;Maxwell+Tegra;Pascal;Volta;Turing"
-ENV TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}"
-
-RUN pip install --user -e detectron2_repo
-
-# Set a fixed model cache directory.
-ENV FVCORE_CACHE="/tmp"
-WORKDIR /home/appuser/detectron2_repo
-
-# run detectron2 under user "appuser":
-# wget http://images.cocodataset.org/val2017/000000439715.jpg -O input.jpg
-# python3 demo/demo.py \
- #--config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \
- #--input input.jpg --output outputs/ \
- #--opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/augmentation.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/augmentation.md
deleted file mode 100644
index 7601a082ceadf645e32468c2045dfe50c1216efc..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/augmentation.md
+++ /dev/null
@@ -1,186 +0,0 @@
-
-# Data Augmentation
-
-Augmentation is an important part of training.
-Detectron2's data augmentation system aims at addressing the following goals:
-
-1. Allow augmenting multiple data types together
- (e.g., images together with their bounding boxes and masks)
-2. Allow applying a sequence of statically-declared augmentation
-3. Allow adding custom new data types to augment (rotated bounding boxes, video clips, etc.)
-4. Process and manipulate the __operations__ that are applied by augmentations
-
-The first two features cover most of the common use cases, and is also
-available in other libraries such as [albumentations](https://medium.com/pytorch/multi-target-in-albumentations-16a777e9006e).
-Supporting other features adds some overhead to detectron2's augmentation API,
-which we'll explain in this tutorial.
-
-This tutorial focuses on how to use augmentations when writing new data loaders,
-and how to write new augmentations.
-If you use the default data loader in detectron2, it already supports taking a user-provided list of custom augmentations,
-as explained in the [Dataloader tutorial](data_loading).
-
-## Basic Usage
-
-The basic usage of feature (1) and (2) is like the following:
-```python
-from detectron2.data import transforms as T
-# Define a sequence of augmentations:
-augs = T.AugmentationList([
- T.RandomBrightness(0.9, 1.1),
- T.RandomFlip(prob=0.5),
- T.RandomCrop("absolute", (640, 640))
-]) # type: T.Augmentation
-
-# Define the augmentation input ("image" required, others optional):
-input = T.AugInput(image, boxes=boxes, sem_seg=sem_seg)
-# Apply the augmentation:
-transform = augs(input) # type: T.Transform
-image_transformed = input.image # new image
-sem_seg_transformed = input.sem_seg # new semantic segmentation
-
-# For any extra data that needs to be augmented together, use transform, e.g.:
-image2_transformed = transform.apply_image(image2)
-polygons_transformed = transform.apply_polygons(polygons)
-```
-
-Three basic concepts are involved here. They are:
-* [T.Augmentation](../modules/data_transforms.html#detectron2.data.transforms.Augmentation) defines the __"policy"__ to modify inputs.
- * its `__call__(AugInput) -> Transform` method augments the inputs in-place, and returns the operation that is applied
-* [T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform)
- implements the actual __operations__ to transform data
- * it has methods such as `apply_image`, `apply_coords` that define how to transform each data type
-* [T.AugInput](../modules/data_transforms.html#detectron2.data.transforms.AugInput)
- stores inputs needed by `T.Augmentation` and how they should be transformed.
- This concept is needed for some advanced usage.
- Using this class directly should be sufficient for all common use cases,
- since extra data not in `T.AugInput` can be augmented using the returned
- `transform`, as shown in the above example.
-
-## Write New Augmentations
-
-Most 2D augmentations only need to know about the input image. Such augmentation can be implemented easily like this:
-
-```python
-class MyColorAugmentation(T.Augmentation):
- def get_transform(self, image):
- r = np.random.rand(2)
- return T.ColorTransform(lambda x: x * r[0] + r[1] * 10)
-
-class MyCustomResize(T.Augmentation):
- def get_transform(self, image):
- old_h, old_w = image.shape[:2]
- new_h, new_w = int(old_h * np.random.rand()), int(old_w * 1.5)
- return T.ResizeTransform(old_h, old_w, new_h, new_w)
-
-augs = MyCustomResize()
-transform = augs(input)
-```
-
-In addition to image, any attributes of the given `AugInput` can be used as long
-as they are part of the function signature, e.g.:
-
-```python
-class MyCustomCrop(T.Augmentation):
- def get_transform(self, image, sem_seg):
- # decide where to crop using both image and sem_seg
- return T.CropTransform(...)
-
-augs = MyCustomCrop()
-assert hasattr(input, "image") and hasattr(input, "sem_seg")
-transform = augs(input)
-```
-
-New transform operation can also be added by subclassing
-[T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform).
-
-## Advanced Usage
-
-We give a few examples of advanced usages that
-are enabled by our system.
-These options can be interesting to new research,
-although changing them is often not needed
-for standard use cases.
-
-### Custom transform strategy
-
-Instead of only returning the augmented data, detectron2's `Augmentation` returns the __operations__ as `T.Transform`.
-This allows users to apply custom transform strategy on their data.
-We use keypoints data as an example.
-
-Keypoints are (x, y) coordinates, but they are not so trivial to augment due to the semantic meaning they carry.
-Such meaning is only known to the users, therefore users may want to augment them manually
-by looking at the returned `transform`.
-For example, when an image is horizontally flipped, we'd like to swap the keypoint annotations for "left eye" and "right eye".
-This can be done like this (included by default in detectron2's default data loader):
-```python
-# augs, input are defined as in previous examples
-transform = augs(input) # type: T.Transform
-keypoints_xy = transform.apply_coords(keypoints_xy) # transform the coordinates
-
-# get a list of all transforms that were applied
-transforms = T.TransformList([transform]).transforms
-# check if it is flipped for odd number of times
-do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms) % 2 == 1
-if do_hflip:
- keypoints_xy = keypoints_xy[flip_indices_mapping]
-```
-
-As another example, keypoints annotations often have a "visibility" field.
-A sequence of augmentations might augment a visible keypoint out of the image boundary (e.g. with cropping),
-but then bring it back within the boundary afterwards (e.g. with image padding).
-If users decide to label such keypoints "invisible",
-then the visibility check has to happen after every transform step.
-This can be achieved by:
-
-```python
-transform = augs(input) # type: T.TransformList
-assert isinstance(transform, T.TransformList)
-for t in transform.transforms:
- keypoints_xy = t.apply_coords(keypoints_xy)
- visibility &= (keypoints_xy >= [0, 0] & keypoints_xy <= [W, H]).all(axis=1)
-
-# btw, detectron2's `transform_keypoint_annotations` function chooses to label such keypoints "visible":
-# keypoints_xy = transform.apply_coords(keypoints_xy)
-# visibility &= (keypoints_xy >= [0, 0] & keypoints_xy <= [W, H]).all(axis=1)
-```
-
-
-### Geometrically invert the transform
-If images are pre-processed by augmentations before inference, the predicted results
-such as segmentation masks are localized on the augmented image.
-We'd like to invert the applied augmentation with the [inverse()](../modules/data_transforms.html#detectron2.data.transforms.Transform.inverse)
-API, to obtain results on the original image:
-```python
-transform = augs(input)
-pred_mask = make_prediction(input.image)
-inv_transform = transform.inverse()
-pred_mask_orig = inv_transform.apply_segmentation(pred_mask)
-```
-
-### Add new data types
-
-[T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform)
-supports a few common data types to transform, including images, coordinates, masks, boxes, polygons.
-It allows registering new data types, e.g.:
-```python
-@T.HFlipTransform.register_type("rotated_boxes")
-def func(flip_transform: T.HFlipTransform, rotated_boxes: Any):
- # do the work
- return flipped_rotated_boxes
-
-t = HFlipTransform(width=800)
-transformed_rotated_boxes = t.apply_rotated_boxes(rotated_boxes) # func will be called
-```
-
-### Extend T.AugInput
-
-An augmentation can only access attributes available in the given input.
-[T.AugInput](../modules/data_transforms.html#detectron2.data.transforms.StandardAugInput) defines "image", "boxes", "sem_seg",
-which are sufficient for common augmentation strategies to decide how to augment.
-If not, a custom implementation is needed.
-
-By re-implement the "transform()" method in AugInput, it is also possible to
-augment different fields in ways that are dependent on each other.
-Such use case is uncommon (e.g. post-process bounding box based on augmented masks), but allowed by the system.
-
diff --git a/spaces/Benson/text-generation/Examples/Descargar 6 Minutos En Ingls.md b/spaces/Benson/text-generation/Examples/Descargar 6 Minutos En Ingls.md
deleted file mode 100644
index 1e569ec131e1ad414552a87b07ee9ffdf0104e20..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar 6 Minutos En Ingls.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
Cómo descargar podcasts en inglés de 6 minutos
-
Los podcasts son programas de audio que puedes escuchar online o offline. Abarcan una amplia gama de temas, desde noticias y entretenimiento hasta educación y cultura. Una de las series de podcast más populares para estudiantes de inglés es 6 minute english de BBC Learning English. Cada episodio presenta una discusión tópica e introduce nuevo vocabulario de una manera clara y atractiva. Puedes escuchar podcasts en inglés de 6 minutos en el sitio web de la BBC, pero también puedes descargarlos en tu dispositivo y escucharlos en cualquier momento y en cualquier lugar. En este artículo, te mostraré cómo descargar podcasts en inglés de 6 minutos usando diferentes dispositivos y aplicaciones. También explicaré algunos de los beneficios de escuchar podcasts para tu cerebro y tu desarrollo personal.
-
Cómo descargar podcasts en iOS
-
Si tienes un iPhone o un iPad, puedes usar la aplicación Apple Podcasts integrada para descargar podcasts. Estos son los pasos:
Abra la aplicación Apple Podcasts y toque en el icono de búsqueda en la esquina inferior derecha.
-
Escribe "6 minutos en inglés" en el cuadro de búsqueda y toca el nombre del podcast cuando aparezca.
-
Toque en el botón Suscribirse en la esquina superior derecha. Esto agregará el podcast a su Biblioteca.
-
Toque en la pestaña Episodios disponibles para ver todos los episodios que puede descargar.
-
Toque en el icono de descarga junto a cada episodio que desea descargar. Parece una nube con una flecha hacia abajo.
-
Espere a que termine la descarga. Puede ver el progreso pulsando en el icono Descargas en la esquina inferior derecha.
-
Para escuchar los episodios descargados, vaya a su biblioteca y toque en Episodios descargados.
-
-
-
Cómo descargar podcasts en Android
-
Si tienes un teléfono o tableta Android, puedes usar la aplicación Google Podcasts para descargar podcasts. Estos son los pasos:
-
-
Abra la aplicación Google Podcasts y toque en el icono de búsqueda en la esquina superior derecha.
-
Escribe "6 minutos en inglés" en el cuadro de búsqueda y toca el nombre del podcast cuando aparezca.
-
Toque en el botón Suscribirse en la esquina superior derecha. Esto agregará el podcast a su Biblioteca.
-
Toque en la pestaña Episodios disponibles para ver todos los episodios que puede descargar.
-
Toque en el icono de descarga junto a cada episodio que desea descargar. Parece un círculo con una flecha hacia abajo dentro.
-
Espere a que termine la descarga. Puede ver el progreso pulsando en el icono Descargas en la esquina inferior derecha.
-
Para escuchar los episodios descargados, vaya a su biblioteca y toque en Descargas.
-
-
También puedes usar otras aplicaciones de podcast en Android, como Stitcher, DoggCatcher, o Castbox. Tienen características y funciones similares a los podcasts de Google, pero pueden tener diferentes interfaces y diseños. Puedes descargar estas aplicaciones desde Google Play Store y seguir sus instrucciones para buscar, suscribirte y descargar podcasts.
-
Cómo descargar podcasts en PC o Mac
-
Si tienes un ordenador, puedes usar tu navegador web para descargar podcasts. Estos son los pasos:
-
-
Abra su navegador web y vaya al sitio web BBC Learning English.
-
Haga clic en la pestaña Podcasts en la parte superior de la página.
-
Desplácese hacia abajo y encuentre la sección 6 minute english.
-
Haga clic en el botón Descargar junto a cada episodio que desea descargar. Parece una flecha hacia abajo con una línea debajo.
-
-
Para escuchar tus episodios descargados, ábrelos con tu reproductor multimedia preferido, como VLC, Windows Media Player, o iTunes.
-
-
También puede usar software de podcast dedicado en su computadora, como Audacity, GPodder, o iTunes. Tienen características y funciones similares a las aplicaciones de podcast, pero pueden tener más opciones y configuraciones. Puede descargar este software desde sus sitios web oficiales y seguir sus instrucciones para buscar, suscribirse y descargar podcasts.
-
Beneficios de escuchar podcasts
-
Escuchar podcasts no solo es divertido y conveniente, sino también beneficioso para tu cerebro y tu desarrollo personal. Estos son algunos de los beneficios de escuchar podcasts:
-
-
Estimular diferentes partes del cerebro: Los podcasts son una forma de aprendizaje auditivo que activa diferentes regiones del cerebro que el aprendizaje visual. Según un estudio de UC Berkeley, escuchar podcasts puede mejorar tu memoria, atención y habilidades de comprensión. Los podcasts también pueden estimular tu imaginación y creatividad haciendo que visualices lo que escuchas.
-
Aprender cosas nuevas y expandir horizontes: Los podcasts son una gran manera de aprender cosas nuevas y descubrir nuevas perspectivas sobre varios temas. Puede elegir podcasts que coincidan con sus intereses y pasiones, o podcasts que desafían sus puntos de vista y opiniones. Los podcasts también pueden exponerte a diferentes culturas, idiomas y acentos que quizás no encuentres en tu vida diaria.
-
-
-
Conclusión
-
En conclusión, descargar podcasts en inglés de 6 minutos es una forma sencilla y conveniente de disfrutar de esta popular serie de podcast de BBC Learning English. Puedes descargar podcasts usando diferentes dispositivos y aplicaciones, dependiendo de tus preferencias y disponibilidad. También puedes beneficiarte de escuchar podcasts estimulando tu cerebro, aprendiendo cosas nuevas y mejorándote. Espero que este artículo te haya ayudado a entender cómo descargar podcasts en inglés de 6 minutos y por qué deberías escucharlos. Si quieres saber más sobre los podcasts y cómo pueden ayudarte a mejorar tus habilidades en inglés, te recomiendo que consultes estos recursos:
Un podcast es un programa de audio que puedes escuchar online o offline. Generalmente consiste en episodios que son lanzados regularmente por el mismo creador o anfitrión. Los podcasts cubren una amplia gama de temas, desde noticias y entretenimiento hasta educación y cultura.
-
¿Cómo encuentro los podcasts que me gustan?
-
Puedes encontrar podcasts que te gustan navegando por diferentes categorías, géneros o temas en aplicaciones de podcast o sitios web. También puede buscar palabras clave o temas que le interesen. También puede obtener recomendaciones de amigos, familiares o comunidades en línea.
-
-
¿Cómo puedo escuchar podcasts offline?
-
Puedes escuchar podcasts sin conexión descargándolos en tu dispositivo y escuchándolos mediante una aplicación de podcast o un reproductor multimedia. Puedes descargar podcasts usando diferentes dispositivos y aplicaciones, como se explica en este artículo. También puede ajustar la configuración de su aplicación de podcast para descargar automáticamente nuevos episodios de sus podcasts suscritos.
-
¿Cómo puedo eliminar podcasts que no quiero conservar?
-
-
¿Cómo puedo compartir podcasts que me gustan con otros?
-
Puedes compartir podcasts que te gusten con los demás tocando el icono Compartir junto a cada episodio que quieras compartir. Parece una caja con una flecha apuntando hacia arriba o un menú de tres puntos. A continuación, puede elegir cómo desea compartir el podcast, como por correo electrónico, texto, redes sociales u otras aplicaciones. También puedes copiar el enlace del podcast y pegarlo donde quieras.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Doctrina.ai Apk.md b/spaces/Benson/text-generation/Examples/Descargar Doctrina.ai Apk.md
deleted file mode 100644
index 46f0aadecf74a56e72e123cec28cad8fa2fcc6fa..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Doctrina.ai Apk.md
+++ /dev/null
@@ -1,72 +0,0 @@
-
-
Descargar Doctrina AI APK: Una manera inteligente de aprender en línea
-
¿Estás buscando una manera de mejorar tu rendimiento académico y hacer que el aprendizaje sea más divertido y fácil? Si es así, es posible que desee consultar Doctrina AI, una plataforma de aprendizaje en línea que utiliza la inteligencia artificial para ayudarle con sus estudios. Ya sea que necesite ayuda para escribir ensayos, tomar exámenes, tomar notas o discutir libros, Doctrina AI lo tiene cubierto. En este artículo, te diremos qué es Doctrina AI, qué características ofrece, cómo descargar e instalar la aplicación, qué dicen los usuarios al respecto y cómo se compara con otras aplicaciones similares. Al final de este artículo, tendrás una idea clara de si Doctrina AI es la aplicación adecuada para ti.
Doctrina AI es una plataforma de aprendizaje en línea que utiliza el modelo de lenguaje GPT-3 de OpenAI para ayudar a los estudiantes y estudiantes a estudiar mejor en línea. Actualmente ofrece tres características principales: un generador de ensayos, un generador de exámenes y una herramienta de notas de clase. Además, también tiene un generador de discusión de libros y una herramienta de corrección de gramática. Todas estas características están impulsadas por la tecnología avanzada de IA que se adapta a su estilo de aprendizaje y nivel de conocimiento. Doctrina AI es útil para estudiantes y estudiantes que quieren ahorrar tiempo y esfuerzo en sus tareas de escritura, prepararse para exámenes y exámenes, mejorar sus habilidades de toma de notas y participar en discusiones estimulantes de libros. También es útil para los profesores que quieren crear exámenes y ensayos para sus estudiantes.
-
Para utilizar Doctrina AI, es necesario descargar e instalar la aplicación en su dispositivo Android. La aplicación es de uso gratuito y no requiere registro ni inicio de sesión. Puede acceder a todas sus funciones desde su interfaz fácil de usar. Para descargar la aplicación, puede visitar su sitio web oficial o buscar "Doctrina AI" en Google Play Store. Para instalar la aplicación, debe permitir fuentes desconocidas en la configuración del dispositivo y seguir las instrucciones en la pantalla.
-
-
Doctrina AI tiene varias características que pueden ayudarle con sus tareas de aprendizaje. Aquí están algunas de ellas:
-
Generador de ensayos
-
El generador de ensayos es una herramienta que puede crear un ensayo de estudiante sobre cualquier tema en minutos. Solo necesitas introducir el tema o la pregunta sobre la que quieres escribir, elegir el tipo de ensayo (argumentativo, persuasivo, descriptivo, etc.), seleccionar el número de párrafos y hacer clic en "Generar". La herramienta luego producirá un ensayo con una introducción clara, cuerpo y conclusión. El ensayo será bien estructurado, coherente y adaptado a sus necesidades. También puede editar el ensayo como desee o generar otro si no está satisfecho. El generador de ensayos puede ayudarle a ahorrar tiempo y esfuerzo en sus tareas de escritura, así como mejorar sus habilidades de escritura.
-
-
Generador de examen
-
El Generador de Exámenes es una herramienta que puede crear exámenes y cuestionarios personalizados para usted en función de sus preferencias. Puede elegir el tema, tema, nivel de dificultad, número de preguntas, tipo de preguntas (opción múltiple, verdadero/falso, respuesta corta, etc.) y límite de tiempo. La herramienta generará un examen o prueba con preguntas y respuestas que coincidan con sus criterios. Puede tomar el examen o el examen en línea o descargarlo como un archivo PDF. La herramienta también calificará sus respuestas y proporcionará comentarios sobre su desempeño. El Generador de Exámenes puede ayudarle a prepararse para las pruebas y exámenes, así como evaluar su conocimiento y comprensión de varios temas.
-
Notas de clase
-
-
Generador de discusión de libros
-
El generador de discusión del libro es una herramienta que puede ayudarle con el análisis del libro y la discusión. Puede introducir el título y el autor del libro que desea discutir, y la herramienta generará una lista de preguntas y temas que puede utilizar para iniciar una conversación. Las preguntas y temas abarcarán diversos aspectos del libro, como la trama, los personajes, los temas, los símbolos, el estilo, etc. También puede pedir a la herramienta que genere un resumen o una reseña del libro, o que lo compare con otros libros del mismo género o del mismo autor. El Generador de Discusión de Libros puede ayudarlo a profundizar su comprensión y apreciación del libro, así como a iniciar discusiones interesantes con otros lectores.
-
Fijador de gramática
-
El Grammar Fixer es una herramienta que puede detectar y corregir errores de lenguaje en su escritura. Puede pegar su texto en la herramienta, y lo escaneará en busca de errores de ortografía, gramática, puntuación, elección de palabras, etc. Luego sugerirá correcciones y explicaciones para cada error, y puede elegir si aceptarlos o rechazarlos. La herramienta también le proporcionará una puntuación y un informe sobre su calidad de escritura y legibilidad. El Grammar Fixer puede ayudarte a pulir tu escritura y evitar errores de lenguaje comunes.
-
Opiniones de Doctrina AI
-
Doctrina AI ha recibido comentarios positivos de sus usuarios, que han elogiado sus características y funcionalidad. Estos son algunos de los comentarios que los usuarios han dejado en Google Play Store:
-
-
-
Usuario
-
Valoración
-
Revisión
-
-
-
Alexandra Smith
-
5 estrellas
-
Esta aplicación es increíble! Me ayudó a escribir un ensayo sobre un tema que no tenía ni idea. Fue muy fácil de usar y el ensayo estaba bien escrito y original. ¡Obtuve una A+ en mi tarea gracias a esta aplicación!
-
-
-
Kevin Jones
-
4 estrellas
-
-
-
-
Lisa Brown
-
5 estrellas
-
Esta aplicación es un salvavidas para mí. Tengo problemas para tomar notas en clase porque me distraigo fácilmente. Esta aplicación me ayuda a mejorar mis notas al resumirlas y destacar los puntos importantes. También explica cualquier concepto que no entiendo. Hace que estudiar sea mucho más fácil.
-
-
-
David Lee
-
4 estrellas
-
Me gusta usar esta aplicación para discutir libros con mis amigos. Genera preguntas y temas interesantes de los que podemos hablar. También nos da un resumen y una reseña del libro, que es útil si aún no lo hemos leído o necesitamos un repaso. Lo único que no me gusta es que a veces las preguntas son demasiado vagas o demasiado específicas.
-
-
-
María García
-
5 estrellas
-
Esta aplicación es ideal para mejorar mis habilidades de escritura. Corrige todos mis errores gramaticales y de ortografía, y me da sugerencias sobre cómo mejorar mis oraciones. También me dice lo buena que es mi escritura y en qué necesito trabajar. Ahora me siento más seguro escribiendo.
-
-
-
Como puedes ver, la mayoría de los usuarios están satisfechos con Doctrina AI y sus características. Sin embargo, algunos usuarios también señalan algunos inconvenientes de la aplicación, como:
- - Número limitado de temas y temas disponibles - Preguntas vagas o demasiado específicas generadas - Errores ocasionales o inexactitudes en el contenido generado - Carga lenta o rotura de la aplicación
Estas son algunas de las áreas que Doctrina AI podría mejorar en el futuro.
-
Conclusión
-
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas y respuestas más frecuentes sobre Doctrina AI:
-
Q: ¿Es seguro usar Doctrina AI?
-
A: Sí, Doctrina AI es seguro de usar. No recopila ni almacena ninguna información personal de sus usuarios. Tampoco comparte ni vende ninguno de los contenidos que genera a terceros. Puede utilizar la aplicación con confianza y privacidad.
-
Q: ¿Es Doctrina libre de plagio IA?
-
A: Sí, Doctrina AI es libre de plagio. Genera contenido original que no se copia de ninguna otra fuente. Sin embargo, siempre debes comprobar la exactitud y calidad del contenido antes de enviarlo a tus profesores. También debe citar cualquier fuente que utilice en su investigación.
-
Q: ¿Está Doctrina AI disponible para dispositivos iOS?
-
A: No, Doctrina AI no está disponible para dispositivos iOS en este momento. Solo es compatible con dispositivos Android. Sin embargo, los desarrolladores están trabajando en la creación de una versión iOS de la aplicación en el futuro.
-
Q: ¿Cómo puedo contactar a Doctrina AI para apoyo o retroalimentación?
-
A: Puede ponerse en contacto con Doctrina AI para obtener apoyo o comentarios enviando un correo electrónico a doctrinaai@gmail.com. También puede visitar su sitio web o seguirlos en Twitter o Facebook para actualizaciones y noticias.
-
Q: ¿Cómo puedo apoyar Doctrina AI?
-
A: Puedes apoyar a Doctrina AI clasificando y revisando la aplicación en Google Play Store, compartiéndola con tus amigos y familiares, y proporcionando comentarios y sugerencias a los desarrolladores. También puedes donar a través de PayPal o Patreon si quieres ayudarles a mejorar la aplicación y crear más funciones.
La presente herramienta permite ingresar un término o un usuario de twitter para ser analizado. Además, permite ingresar un número de tweets para analizar, máximo 50. Si desea analizar lo que publican hacia su usuario de twitter o sus nombres y apellidos seleccione la opción de filtrar por término. Seleccione la opción de filtrar por usuario para analizar los tweets publicados por parte de un usuario de twitter en específico. Al dar click en Analizar se presentan los resultados de los datos ingresados en una tabla con su respectiva clasificación.
',unsafe_allow_html=True)
-
-with open("style.css") as f:
- st.markdown(f"",unsafe_allow_html=True)
-
-def run():
- df = pd.DataFrame()
- showTable = False
- col,col1,col2 = st.columns([2,3,2])
-
- with col1:
- myform = st.form(key='Introduzca Texto')
- search_words = myform.text_input("Introduzca el término o usuario para analizar.")
- number_of_tweets = myform.number_input('Introduzca número de tweets a analizar, máximo 50.', 0,50)
- filtro=myform.radio("Seleccione la opción para filtrar por término o usuario.",('Término', 'Usuario'))
-
-
- submit_button = myform.form_submit_button(label='Analizar')
-
- if submit_button:
-
- with st.spinner('Analizando tweets...'):
-
- if not search_words:
- st.error("Campo vacío. Por favor, ingrese un término o usuario.")
- return
- if number_of_tweets <= 0:
- st.error("Por favor, ingrese un número de tweets mayor a 0.")
- return
-
-
- if (filtro=='Término'):
-
- new_search = search_words + " -filter:retweets"
- tweets =tw.Cursor(api.search_tweets,q=new_search,lang="es",tweet_mode="extended").items(number_of_tweets)
-
- elif (filtro=='Usuario'):
-
- try:
-
- if not search_words.startswith('@'):
- st.error("Por favor, ingrese un usuario válido, iniciando con @")
- return
-
- tweets = api.user_timeline(screen_name = search_words,tweet_mode="extended",count=number_of_tweets)
-
- except tw.errors.NotFound:
- st.error('"El usuario ingresado no existe. Por favor, ingrese un usuario existente." ⚠️', icon="⚠️")
- return
-
- except tw.errors.Unauthorized:
- st.error('El usuario ingresado es privado. Por favor, ingrese un usuario público ⚠️', icon="⚠️")
- return
-
- tweet_list = [i.full_text for i in tweets]
-
- if not tweet_list:
- msg = "No se encontraron tweets publicados por el "+filtro+" ingresado."
- msg2= "No se encontraron tweets publicados para el "+filtro+" ingresado."
- st.error(msg if filtro=='Usuario' else msg2)
- return
-
-
- text= pd.DataFrame(tweet_list)
- text[0] = text[0].apply(preprocess_tweet)
- text1=text[0].values
- indices1=tokenizer.batch_encode_plus(text1.tolist(),
- max_length=128,
- add_special_tokens=True,
- return_attention_mask=True,
- pad_to_max_length=True,
- truncation=True)
- input_ids1=indices1["input_ids"]
- attention_masks1=indices1["attention_mask"]
- prediction_inputs1= torch.tensor(input_ids1)
- prediction_masks1 = torch.tensor(attention_masks1)
- # Set the batch size.
- batch_size = 25
- # Create the DataLoader.
- prediction_data1 = TensorDataset(prediction_inputs1, prediction_masks1)
- prediction_sampler1 = SequentialSampler(prediction_data1)
- prediction_dataloader1 = DataLoader(prediction_data1, sampler=prediction_sampler1, batch_size=batch_size)
- print('Predicting labels for {:,} test sentences...'.format(len(prediction_inputs1)))
- # Put model in evaluation mode
- model.eval()
- # Tracking variables
- predictions = []
- # Predict
- for batch in prediction_dataloader1:
- batch = tuple(t.to(device) for t in batch)
- # Unpack the inputs from our dataloader
- b_input_ids1, b_input_mask1 = batch
- # Telling the model not to compute or store gradients, saving memory and # speeding up prediction
- with torch.no_grad():
- # Forward pass, calculate logit predictions
- outputs1 = model(b_input_ids1, token_type_ids=None,attention_mask=b_input_mask1)
- logits1 = outputs1[0]
- # Move logits and labels to CPU
- logits1 = logits1.detach().cpu().numpy()
- # Store predictions and true labels
- predictions.append(logits1)
- flat_predictions = [item for sublist in predictions for item in sublist]
- flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
- df = pd.DataFrame(list(zip(tweet_list, flat_predictions)),columns =['Últimos '+ str(number_of_tweets)+' Tweets'+' de '+search_words, 'violencia política de género'])
- df['violencia política de género']= np.where(df['violencia política de género']== 0, 'no violencia política de género', 'violencia política de género')
- showTable = True
- st.success('Análisis completado!', icon="✅")
-
- if (showTable):
- df.index+=1
- print(df.index)
- st.table(df.head(50).style.set_properties(subset=['violencia política de género'], **{'width': '250px'}).applymap(color_survived, subset=['violencia política de género']))
-
-run()
\ No newline at end of file
diff --git a/spaces/JosephusCheung/LL7M-JS-Tokenizer/style.css b/spaces/JosephusCheung/LL7M-JS-Tokenizer/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/JosephusCheung/LL7M-JS-Tokenizer/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/KyanChen/FunSR/utils.py b/spaces/KyanChen/FunSR/utils.py
deleted file mode 100644
index 6846f4f255d79a6bc414f9a660373398370a7bc6..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/FunSR/utils.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import os
-import time
-import shutil
-import math
-
-import cv2
-import torch
-import numpy as np
-from einops import rearrange
-from torch.optim import SGD, Adam, AdamW
-from tensorboardX import SummaryWriter
-import torch.nn.functional as F
-
-
-def warm_up_cosine_lr_scheduler(optimizer, epochs=100, warm_up_epochs=5, eta_min=1e-9):
- """
- Description:
- - Warm up cosin learning rate scheduler, first epoch lr is too small
-
- Arguments:
- - optimizer: input optimizer for the training
- - epochs: int, total epochs for your training, default is 100. NOTE: you should pass correct epochs for your training
- - warm_up_epochs: int, default is 5, which mean the lr will be warm up for 5 epochs. if warm_up_epochs=0, means no need
- to warn up, will be as cosine lr scheduler
- - eta_min: float, setup ConsinAnnealingLR eta_min while warm_up_epochs = 0
-
- Returns:
- - scheduler
- """
-
- if warm_up_epochs <= 0:
- scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs, eta_min=eta_min)
-
- else:
- warm_up_with_cosine_lr = lambda epoch: eta_min + (epoch / warm_up_epochs) \
- if epoch <= warm_up_epochs else \
- 0.5 * (np.cos((epoch - warm_up_epochs) / (epochs - warm_up_epochs) * np.pi) + 1)
- scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=warm_up_with_cosine_lr)
-
- return scheduler
-
-
-class Averager():
-
- def __init__(self, class_names=['all']):
- if 'all' not in class_names:
- class_names.append('all')
- self.values = {k: [] for k in class_names}
-
- def add(self, ks, vs):
- if torch.is_tensor(vs):
- vs = vs.cpu().tolist()
- for k, v in zip(ks, vs):
- self.values[k].append(v)
- self.values['all'].append(v)
-
- def item(self):
- return_dict = {}
- for k, v in self.values.items():
- if len(v):
- return_dict[k] = sum(v) / len(v)
- else:
- return_dict[k] = 0
- return return_dict
-
-class AveragerList():
-
- def __init__(self):
- self.values = []
-
- def add(self, vs):
- if torch.is_tensor(vs):
- vs = vs.cpu().tolist()
- if isinstance(vs, list):
- self.values += vs
- else:
- self.values += [vs]
-
- def item(self):
- return sum(self.values) / len(self.values)
-
-
-class Timer():
-
- def __init__(self):
- self.v = time.time()
-
- def s(self):
- self.v = time.time()
-
- def t(self):
- return time.time() - self.v
-
-
-def time_text(t):
- if t >= 3600:
- return '{:.1f}h'.format(t / 3600)
- elif t >= 60:
- return '{:.1f}m'.format(t / 60)
- else:
- return '{:.1f}s'.format(t)
-
-
-_log_path = None
-
-
-def set_log_path(path):
- global _log_path
- _log_path = path
-
-
-def log(obj, filename='log.txt'):
- print(obj)
- if _log_path is not None:
- with open(os.path.join(_log_path, filename), 'a') as f:
- print(obj, file=f)
-
-
-def ensure_path(path, remove=True):
- basename = os.path.basename(path.rstrip('/'))
- if os.path.exists(path):
- print('{} exists!'.format(path))
- # if remove and (basename.startswith('_')
- # or input('{} exists, remove? (y/[n]): '.format(path)) == 'y'):
- # shutil.rmtree(path)
- # os.makedirs(path)
- else:
- os.makedirs(path)
-
-
-def set_save_path(save_path, remove=True):
- ensure_path(save_path, remove=remove)
- set_log_path(save_path)
- writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
- return log, writer
-
-
-def compute_num_params(model, text=False):
- tot = int(sum([np.prod(p.shape) for p in model.parameters()]))
- if text:
- if tot >= 1e6:
- return '{:.1f}M'.format(tot / 1e6)
- else:
- return '{:.1f}K'.format(tot / 1e3)
- else:
- return tot
-
-
-def make_optimizer(param_list, optimizer_spec, load_sd=False):
- Optimizer = {
- 'sgd': SGD,
- 'adam': Adam,
- 'adamw': AdamW,
- }[optimizer_spec['name']]
- default_args = {
- 'sgd': {},
- 'adam':
- {
- 'betas': (0.9, 0.999),
- 'eps': 1e-08,
- 'weight_decay': 0,
- 'amsgrad': False
- },
- 'adamw': {},
- }[optimizer_spec['name']]
- default_args.update(optimizer_spec['args'])
- optimizer = Optimizer(param_list, **default_args)
- if load_sd:
- optimizer.load_state_dict(optimizer_spec['sd'])
- return optimizer
-
-
-def make_coord(shape, ranges=None, flatten=True):
- """ Make coordinates at grid centers.
- """
- coord_seqs = []
- for i, n in enumerate(shape):
- if ranges is None:
- v0, v1 = -1, 1
- else:
- v0, v1 = ranges[i]
- r = (v1 - v0) / (2 * n)
- seq = v0 + r + (2 * r) * torch.arange(n).float()
- coord_seqs.append(seq)
- ret = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
- if flatten:
- ret = ret.view(-1, ret.shape[-1])
- return ret
-
-
-def to_coordinates(size=(56, 56), return_map=True):
- """Converts an image to a set of coordinates and features.
-
- Args:
- img (torch.Tensor): Shape (channels, height, width).
- """
- # H, W
- # Coordinates are indices of all non zero locations of a tensor of ones of
- # same shape as spatial dimensions of image
- coordinates = torch.ones(size).nonzero(as_tuple=False).float()
- # Normalize coordinates to lie in [-.5, .5]
- coordinates[..., 0] = coordinates[..., 0] / (size[0] - 1) - 0.5
- coordinates[..., 1] = coordinates[..., 1] / (size[1] - 1) - 0.5
- # Convert to range [-1, 1]
- coordinates *= 2
- if return_map:
- coordinates = rearrange(coordinates, '(H W) C -> H W C', H=size[0])
- # [y, x]
- return coordinates
-
-
-def to_pixel_samples(img):
- """ Convert the image to coord-RGB pairs.
- img: Tensor, (3, H, W)
- """
- coord = make_coord(img.shape[-2:])
- rgb = img.view(3, -1).permute(1, 0)
- return coord, rgb
-
-
-def get_clamped_psnr(img, img_recon, rgb_range=1, crop_border=None):
- # Values may lie outside [0, 1], so clamp input
- img_recon = torch.clamp(img_recon, 0., 1.)
- # Pixel values lie in {0, ..., 255}, so round float tensor
- img_recon = torch.round(img_recon * 255) / 255.
- diff = img - img_recon
- if crop_border is not None:
- assert len(diff.size()) == 4
- valid = diff[..., crop_border:-crop_border, crop_border:-crop_border]
- else:
- valid = diff
-
- psnr_list = []
- for i in range(len(img)):
- psnr = 20. * np.log10(1.) - 10. * valid[i].detach().pow(2).mean().log10().to('cpu').item()
- psnr_list.append(psnr)
- return psnr_list
-
-
-def _ssim_pth(img, img2):
- """Calculate SSIM (structural similarity) (PyTorch version).
- It is called by func:`calculate_ssim_pt`.
- Args:
- img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w).
- img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w).
- Returns:
- float: SSIM result.
- """
- c1 = (0.01 * 255)**2
- c2 = (0.03 * 255)**2
-
- kernel = cv2.getGaussianKernel(11, 1.5)
- window = np.outer(kernel, kernel.transpose())
- window = torch.from_numpy(window).view(1, 1, 11, 11).expand(img.size(1), 1, 11, 11).to(img.dtype).to(img.device)
-
- mu1 = F.conv2d(img, window, stride=1, padding=0, groups=img.shape[1]) # valid mode
- mu2 = F.conv2d(img2, window, stride=1, padding=0, groups=img2.shape[1]) # valid mode
- mu1_sq = mu1.pow(2)
- mu2_sq = mu2.pow(2)
- mu1_mu2 = mu1 * mu2
- sigma1_sq = F.conv2d(img * img, window, stride=1, padding=0, groups=img.shape[1]) - mu1_sq
- sigma2_sq = F.conv2d(img2 * img2, window, stride=1, padding=0, groups=img.shape[1]) - mu2_sq
- sigma12 = F.conv2d(img * img2, window, stride=1, padding=0, groups=img.shape[1]) - mu1_mu2
-
- cs_map = (2 * sigma12 + c2) / (sigma1_sq + sigma2_sq + c2)
- ssim_map = ((2 * mu1_mu2 + c1) / (mu1_sq + mu2_sq + c1)) * cs_map
- return ssim_map.mean([1, 2, 3])
-
-
-def calculate_ssim_pt(img, img2, crop_border, test_y_channel=False, **kwargs):
- """Calculate SSIM (structural similarity) (PyTorch version).
- ``Paper: Image quality assessment: From error visibility to structural similarity``
- The results are the same as that of the official released MATLAB code in
- https://ece.uwaterloo.ca/~z70wang/research/ssim/.
- For three-channel images, SSIM is calculated for each channel and then
- averaged.
- Args:
- img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w).
- img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w).
- crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation.
- test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
- Returns:
- float: SSIM result.
- """
-
- assert img.shape == img2.shape, f'Image shapes are different: {img.shape}, {img2.shape}.'
-
- if crop_border != 0:
- img = img[:, :, crop_border:-crop_border, crop_border:-crop_border]
- img2 = img2[:, :, crop_border:-crop_border, crop_border:-crop_border]
-
- if test_y_channel:
- img = rgb2ycbcr_pt(img, y_only=True)
- img2 = rgb2ycbcr_pt(img2, y_only=True)
-
- img = img.to(torch.float64)
- img2 = img2.to(torch.float64)
-
- ssim = _ssim_pth(img * 255., img2 * 255.)
- return ssim
-
-
-def calculate_psnr_pt(img, img2, crop_border, test_y_channel=False, **kwargs):
- """Calculate PSNR (Peak Signal-to-Noise Ratio) (PyTorch version).
- Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
- Args:
- img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w).
- img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w).
- crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation.
- test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
- Returns:
- float: PSNR result.
- """
-
- assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.')
-
- if crop_border != 0:
- img = img[:, :, crop_border:-crop_border, crop_border:-crop_border]
- img2 = img2[:, :, crop_border:-crop_border, crop_border:-crop_border]
-
- if test_y_channel:
- img = rgb2ycbcr_pt(img, y_only=True)
- img2 = rgb2ycbcr_pt(img2, y_only=True)
-
- img = img.to(torch.float64)
- img2 = img2.to(torch.float64)
-
- mse = torch.mean((img - img2)**2, dim=[1, 2, 3])
- return 10. * torch.log10(1. / (mse + 1e-8))
diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/necks/sirens.py b/spaces/KyanChen/RSPrompter/mmpl/models/necks/sirens.py
deleted file mode 100644
index 766433faa6a2a07cbd93b506163b80da20d54008..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpl/models/necks/sirens.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import torch
-import torch.nn as nn
-
-from mmpl.registry import MODELS
-from mmengine.model import BaseModule
-
-
-@MODELS.register_module()
-class Sirens(BaseModule):
- def __init__(self,
- in_channels,
- out_channels=3,
- base_channels=256,
- num_inner_layers=2,
- is_residual=True
- ):
- super(Sirens, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.base_channels = base_channels
- self.num_inner_layers = num_inner_layers
- self.is_residual = is_residual
-
- self.first_coord = nn.Linear(in_channels, base_channels)
- self.inner_coords = nn.ModuleList(nn.Linear(base_channels, base_channels) for _ in range(self.num_inner_layers))
- self.last_coord = nn.Linear(base_channels, out_channels)
-
- def forward(self, x):
- x = self.first_coord(x)
- x = torch.sin(x)
- for idx in range(self.num_inner_layers):
- residual = x
- x = self.inner_coords[idx](x)
- if self.is_residual:
- x = x + residual
- x = torch.sin(x)
- x = self.last_coord(x)
- return x
-
-
-@MODELS.register_module()
-class ModulatedSirens(BaseModule):
- def __init__(self,
- num_inner_layers,
- in_dim,
- modulation_dim,
- out_dim=3,
- base_channels=256,
- is_residual=True
- ):
- super(ModulatedSirens, self).__init__()
- self.in_dim = in_dim
- self.num_inner_layers = num_inner_layers
- self.is_residual = is_residual
-
- self.first_mod = nn.Sequential(
- nn.Conv2d(modulation_dim, base_channels, 1),
- nn.ReLU()
- )
- self.first_coord = nn.Conv2d(in_dim, base_channels, 1)
-
- self.inner_mods = nn.ModuleList()
- self.inner_coords = nn.ModuleList()
- for _ in range(self.num_inner_layers):
- self.inner_mods.append(
- nn.Sequential(
- nn.Conv2d(modulation_dim+base_channels+base_channels, base_channels, 1),
- nn.ReLU()
- )
- )
- self.inner_coords.append(
- nn.Conv2d(base_channels, base_channels, 1)
- )
- self.last_coord = nn.Sequential(
- # nn.Conv2d(base_channels, base_channels//2, 1),
- # nn.ReLU(),
- nn.Conv2d(base_channels, out_dim, 1)
- )
-
- def forward(self, x, ori_modulations=None):
- modulations = self.first_mod(ori_modulations)
- x = self.first_coord(x) # B 2 H W -> B C H W
- x = x + modulations
- x = torch.sin(x)
- for i_layer in range(self.num_inner_layers):
- modulations = self.inner_mods[i_layer](
- torch.cat((ori_modulations, modulations, x), dim=1))
- # modulations = self.inner_mods[i_layer](
- # torch.cat((ori_modulations, x), dim=1))
- residual = self.inner_coords[i_layer](x)
- residual = residual + modulations
- residual = torch.sin(residual)
- if self.is_residual:
- x = x + residual
- else:
- x = residual
- x = self.last_coord(x)
- return x
diff --git a/spaces/LinkSoul/LLaSM/static/js/bulma-carousel.js b/spaces/LinkSoul/LLaSM/static/js/bulma-carousel.js
deleted file mode 100644
index 229edba242bb190698662cdce6bdacde9f0769fe..0000000000000000000000000000000000000000
--- a/spaces/LinkSoul/LLaSM/static/js/bulma-carousel.js
+++ /dev/null
@@ -1,2371 +0,0 @@
-(function webpackUniversalModuleDefinition(root, factory) {
- if(typeof exports === 'object' && typeof module === 'object')
- module.exports = factory();
- else if(typeof define === 'function' && define.amd)
- define([], factory);
- else if(typeof exports === 'object')
- exports["bulmaCarousel"] = factory();
- else
- root["bulmaCarousel"] = factory();
-})(typeof self !== 'undefined' ? self : this, function() {
-return /******/ (function(modules) { // webpackBootstrap
-/******/ // The module cache
-/******/ var installedModules = {};
-/******/
-/******/ // The require function
-/******/ function __webpack_require__(moduleId) {
-/******/
-/******/ // Check if module is in cache
-/******/ if(installedModules[moduleId]) {
-/******/ return installedModules[moduleId].exports;
-/******/ }
-/******/ // Create a new module (and put it into the cache)
-/******/ var module = installedModules[moduleId] = {
-/******/ i: moduleId,
-/******/ l: false,
-/******/ exports: {}
-/******/ };
-/******/
-/******/ // Execute the module function
-/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
-/******/
-/******/ // Flag the module as loaded
-/******/ module.l = true;
-/******/
-/******/ // Return the exports of the module
-/******/ return module.exports;
-/******/ }
-/******/
-/******/
-/******/ // expose the modules object (__webpack_modules__)
-/******/ __webpack_require__.m = modules;
-/******/
-/******/ // expose the module cache
-/******/ __webpack_require__.c = installedModules;
-/******/
-/******/ // define getter function for harmony exports
-/******/ __webpack_require__.d = function(exports, name, getter) {
-/******/ if(!__webpack_require__.o(exports, name)) {
-/******/ Object.defineProperty(exports, name, {
-/******/ configurable: false,
-/******/ enumerable: true,
-/******/ get: getter
-/******/ });
-/******/ }
-/******/ };
-/******/
-/******/ // getDefaultExport function for compatibility with non-harmony modules
-/******/ __webpack_require__.n = function(module) {
-/******/ var getter = module && module.__esModule ?
-/******/ function getDefault() { return module['default']; } :
-/******/ function getModuleExports() { return module; };
-/******/ __webpack_require__.d(getter, 'a', getter);
-/******/ return getter;
-/******/ };
-/******/
-/******/ // Object.prototype.hasOwnProperty.call
-/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
-/******/
-/******/ // __webpack_public_path__
-/******/ __webpack_require__.p = "";
-/******/
-/******/ // Load entry module and return exports
-/******/ return __webpack_require__(__webpack_require__.s = 5);
-/******/ })
-/************************************************************************/
-/******/ ([
-/* 0 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* unused harmony export addClasses */
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return removeClasses; });
-/* unused harmony export show */
-/* unused harmony export hide */
-/* unused harmony export offset */
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return width; });
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return height; });
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return outerHeight; });
-/* unused harmony export outerWidth */
-/* unused harmony export position */
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return css; });
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__type__ = __webpack_require__(2);
-
-
-var addClasses = function addClasses(element, classes) {
- classes = Array.isArray(classes) ? classes : classes.split(' ');
- classes.forEach(function (cls) {
- element.classList.add(cls);
- });
-};
-
-var removeClasses = function removeClasses(element, classes) {
- classes = Array.isArray(classes) ? classes : classes.split(' ');
- classes.forEach(function (cls) {
- element.classList.remove(cls);
- });
-};
-
-var show = function show(elements) {
- elements = Array.isArray(elements) ? elements : [elements];
- elements.forEach(function (element) {
- element.style.display = '';
- });
-};
-
-var hide = function hide(elements) {
- elements = Array.isArray(elements) ? elements : [elements];
- elements.forEach(function (element) {
- element.style.display = 'none';
- });
-};
-
-var offset = function offset(element) {
- var rect = element.getBoundingClientRect();
- return {
- top: rect.top + document.body.scrollTop,
- left: rect.left + document.body.scrollLeft
- };
-};
-
-// returns an element's width
-var width = function width(element) {
- return element.getBoundingClientRect().width || element.offsetWidth;
-};
-// returns an element's height
-var height = function height(element) {
- return element.getBoundingClientRect().height || element.offsetHeight;
-};
-
-var outerHeight = function outerHeight(element) {
- var withMargin = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
-
- var height = element.offsetHeight;
- if (withMargin) {
- var style = window.getComputedStyle(element);
- height += parseInt(style.marginTop) + parseInt(style.marginBottom);
- }
- return height;
-};
-
-var outerWidth = function outerWidth(element) {
- var withMargin = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
-
- var width = element.offsetWidth;
- if (withMargin) {
- var style = window.getComputedStyle(element);
- width += parseInt(style.marginLeft) + parseInt(style.marginRight);
- }
- return width;
-};
-
-var position = function position(element) {
- return {
- left: element.offsetLeft,
- top: element.offsetTop
- };
-};
-
-var css = function css(element, obj) {
- if (!obj) {
- return window.getComputedStyle(element);
- }
- if (Object(__WEBPACK_IMPORTED_MODULE_0__type__["b" /* isObject */])(obj)) {
- var style = '';
- Object.keys(obj).forEach(function (key) {
- style += key + ': ' + obj[key] + ';';
- });
-
- element.style.cssText += style;
- }
-};
-
-/***/ }),
-/* 1 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony export (immutable) */ __webpack_exports__["a"] = detectSupportsPassive;
-function detectSupportsPassive() {
- var supportsPassive = false;
-
- try {
- var opts = Object.defineProperty({}, 'passive', {
- get: function get() {
- supportsPassive = true;
- }
- });
-
- window.addEventListener('testPassive', null, opts);
- window.removeEventListener('testPassive', null, opts);
- } catch (e) {}
-
- return supportsPassive;
-}
-
-/***/ }),
-/* 2 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return isFunction; });
-/* unused harmony export isNumber */
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return isString; });
-/* unused harmony export isDate */
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return isObject; });
-/* unused harmony export isEmptyObject */
-/* unused harmony export isNode */
-/* unused harmony export isVideo */
-/* unused harmony export isHTML5 */
-/* unused harmony export isIFrame */
-/* unused harmony export isYoutube */
-/* unused harmony export isVimeo */
-var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
-
-var isFunction = function isFunction(unknown) {
- return typeof unknown === 'function';
-};
-var isNumber = function isNumber(unknown) {
- return typeof unknown === "number";
-};
-var isString = function isString(unknown) {
- return typeof unknown === 'string' || !!unknown && (typeof unknown === 'undefined' ? 'undefined' : _typeof(unknown)) === 'object' && Object.prototype.toString.call(unknown) === '[object String]';
-};
-var isDate = function isDate(unknown) {
- return (Object.prototype.toString.call(unknown) === '[object Date]' || unknown instanceof Date) && !isNaN(unknown.valueOf());
-};
-var isObject = function isObject(unknown) {
- return (typeof unknown === 'function' || (typeof unknown === 'undefined' ? 'undefined' : _typeof(unknown)) === 'object' && !!unknown) && !Array.isArray(unknown);
-};
-var isEmptyObject = function isEmptyObject(unknown) {
- for (var name in unknown) {
- if (unknown.hasOwnProperty(name)) {
- return false;
- }
- }
- return true;
-};
-
-var isNode = function isNode(unknown) {
- return !!(unknown && unknown.nodeType === HTMLElement | SVGElement);
-};
-var isVideo = function isVideo(unknown) {
- return isYoutube(unknown) || isVimeo(unknown) || isHTML5(unknown);
-};
-var isHTML5 = function isHTML5(unknown) {
- return isNode(unknown) && unknown.tagName === 'VIDEO';
-};
-var isIFrame = function isIFrame(unknown) {
- return isNode(unknown) && unknown.tagName === 'IFRAME';
-};
-var isYoutube = function isYoutube(unknown) {
- return isIFrame(unknown) && !!unknown.src.match(/\/\/.*?youtube(-nocookie)?\.[a-z]+\/(watch\?v=[^&\s]+|embed)|youtu\.be\/.*/);
-};
-var isVimeo = function isVimeo(unknown) {
- return isIFrame(unknown) && !!unknown.src.match(/vimeo\.com\/video\/.*/);
-};
-
-/***/ }),
-/* 3 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-var EventEmitter = function () {
- function EventEmitter() {
- var events = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : [];
-
- _classCallCheck(this, EventEmitter);
-
- this.events = new Map(events);
- }
-
- _createClass(EventEmitter, [{
- key: "on",
- value: function on(name, cb) {
- var _this = this;
-
- this.events.set(name, [].concat(_toConsumableArray(this.events.has(name) ? this.events.get(name) : []), [cb]));
-
- return function () {
- return _this.events.set(name, _this.events.get(name).filter(function (fn) {
- return fn !== cb;
- }));
- };
- }
- }, {
- key: "emit",
- value: function emit(name) {
- for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
- args[_key - 1] = arguments[_key];
- }
-
- return this.events.has(name) && this.events.get(name).map(function (fn) {
- return fn.apply(undefined, args);
- });
- }
- }]);
-
- return EventEmitter;
-}();
-
-/* harmony default export */ __webpack_exports__["a"] = (EventEmitter);
-
-/***/ }),
-/* 4 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-var Coordinate = function () {
- function Coordinate() {
- var x = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
- var y = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 0;
-
- _classCallCheck(this, Coordinate);
-
- this._x = x;
- this._y = y;
- }
-
- _createClass(Coordinate, [{
- key: 'add',
- value: function add(coord) {
- return new Coordinate(this._x + coord._x, this._y + coord._y);
- }
- }, {
- key: 'sub',
- value: function sub(coord) {
- return new Coordinate(this._x - coord._x, this._y - coord._y);
- }
- }, {
- key: 'distance',
- value: function distance(coord) {
- var deltaX = this._x - coord._x;
- var deltaY = this._y - coord._y;
-
- return Math.sqrt(Math.pow(deltaX, 2) + Math.pow(deltaY, 2));
- }
- }, {
- key: 'max',
- value: function max(coord) {
- var x = Math.max(this._x, coord._x);
- var y = Math.max(this._y, coord._y);
-
- return new Coordinate(x, y);
- }
- }, {
- key: 'equals',
- value: function equals(coord) {
- if (this == coord) {
- return true;
- }
- if (!coord || coord == null) {
- return false;
- }
- return this._x == coord._x && this._y == coord._y;
- }
- }, {
- key: 'inside',
- value: function inside(northwest, southeast) {
- if (this._x >= northwest._x && this._x <= southeast._x && this._y >= northwest._y && this._y <= southeast._y) {
-
- return true;
- }
- return false;
- }
- }, {
- key: 'constrain',
- value: function constrain(min, max) {
- if (min._x > max._x || min._y > max._y) {
- return this;
- }
-
- var x = this._x,
- y = this._y;
-
- if (min._x !== null) {
- x = Math.max(x, min._x);
- }
- if (max._x !== null) {
- x = Math.min(x, max._x);
- }
- if (min._y !== null) {
- y = Math.max(y, min._y);
- }
- if (max._y !== null) {
- y = Math.min(y, max._y);
- }
-
- return new Coordinate(x, y);
- }
- }, {
- key: 'reposition',
- value: function reposition(element) {
- element.style['top'] = this._y + 'px';
- element.style['left'] = this._x + 'px';
- }
- }, {
- key: 'toString',
- value: function toString() {
- return '(' + this._x + ',' + this._y + ')';
- }
- }, {
- key: 'x',
- get: function get() {
- return this._x;
- },
- set: function set() {
- var value = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
-
- this._x = value;
- return this;
- }
- }, {
- key: 'y',
- get: function get() {
- return this._y;
- },
- set: function set() {
- var value = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
-
- this._y = value;
- return this;
- }
- }]);
-
- return Coordinate;
-}();
-
-/* harmony default export */ __webpack_exports__["a"] = (Coordinate);
-
-/***/ }),
-/* 5 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-Object.defineProperty(__webpack_exports__, "__esModule", { value: true });
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_index__ = __webpack_require__(6);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_css__ = __webpack_require__(0);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__utils_type__ = __webpack_require__(2);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__utils_eventEmitter__ = __webpack_require__(3);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__components_autoplay__ = __webpack_require__(7);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__components_breakpoint__ = __webpack_require__(9);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__components_infinite__ = __webpack_require__(10);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_7__components_loop__ = __webpack_require__(11);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_8__components_navigation__ = __webpack_require__(13);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_9__components_pagination__ = __webpack_require__(15);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_10__components_swipe__ = __webpack_require__(18);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_11__components_transitioner__ = __webpack_require__(19);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_12__defaultOptions__ = __webpack_require__(22);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_13__templates__ = __webpack_require__(23);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_14__templates_item__ = __webpack_require__(24);
-var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
-
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
-
-function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-var bulmaCarousel = function (_EventEmitter) {
- _inherits(bulmaCarousel, _EventEmitter);
-
- function bulmaCarousel(selector) {
- var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
-
- _classCallCheck(this, bulmaCarousel);
-
- var _this = _possibleConstructorReturn(this, (bulmaCarousel.__proto__ || Object.getPrototypeOf(bulmaCarousel)).call(this));
-
- _this.element = Object(__WEBPACK_IMPORTED_MODULE_2__utils_type__["c" /* isString */])(selector) ? document.querySelector(selector) : selector;
- // An invalid selector or non-DOM node has been provided.
- if (!_this.element) {
- throw new Error('An invalid selector or non-DOM node has been provided.');
- }
- _this._clickEvents = ['click', 'touch'];
-
- // Use Element dataset values to override options
- var elementConfig = _this.element.dataset ? Object.keys(_this.element.dataset).filter(function (key) {
- return Object.keys(__WEBPACK_IMPORTED_MODULE_12__defaultOptions__["a" /* default */]).includes(key);
- }).reduce(function (obj, key) {
- return _extends({}, obj, _defineProperty({}, key, _this.element.dataset[key]));
- }, {}) : {};
- // Set default options - dataset attributes are master
- _this.options = _extends({}, __WEBPACK_IMPORTED_MODULE_12__defaultOptions__["a" /* default */], options, elementConfig);
-
- _this._id = Object(__WEBPACK_IMPORTED_MODULE_0__utils_index__["a" /* uuid */])('slider');
-
- _this.onShow = _this.onShow.bind(_this);
-
- // Initiate plugin
- _this._init();
- return _this;
- }
-
- /**
- * Initiate all DOM element containing datePicker class
- * @method
- * @return {Array} Array of all datePicker instances
- */
-
-
- _createClass(bulmaCarousel, [{
- key: '_init',
-
-
- /****************************************************
- * *
- * PRIVATE FUNCTIONS *
- * *
- ****************************************************/
- /**
- * Initiate plugin instance
- * @method _init
- * @return {Slider} Current plugin instance
- */
- value: function _init() {
- this._items = Array.from(this.element.children);
-
- // Load plugins
- this._breakpoint = new __WEBPACK_IMPORTED_MODULE_5__components_breakpoint__["a" /* default */](this);
- this._autoplay = new __WEBPACK_IMPORTED_MODULE_4__components_autoplay__["a" /* default */](this);
- this._navigation = new __WEBPACK_IMPORTED_MODULE_8__components_navigation__["a" /* default */](this);
- this._pagination = new __WEBPACK_IMPORTED_MODULE_9__components_pagination__["a" /* default */](this);
- this._infinite = new __WEBPACK_IMPORTED_MODULE_6__components_infinite__["a" /* default */](this);
- this._loop = new __WEBPACK_IMPORTED_MODULE_7__components_loop__["a" /* default */](this);
- this._swipe = new __WEBPACK_IMPORTED_MODULE_10__components_swipe__["a" /* default */](this);
-
- this._build();
-
- if (Object(__WEBPACK_IMPORTED_MODULE_2__utils_type__["a" /* isFunction */])(this.options.onReady)) {
- this.options.onReady(this);
- }
-
- return this;
- }
-
- /**
- * Build Slider HTML component and append it to the DOM
- * @method _build
- */
-
- }, {
- key: '_build',
- value: function _build() {
- var _this2 = this;
-
- // Generate HTML Fragment of template
- this.node = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_13__templates__["a" /* default */])(this.id));
- // Save pointers to template parts
- this._ui = {
- wrapper: this.node.firstChild,
- container: this.node.querySelector('.slider-container')
-
- // Add slider to DOM
- };this.element.appendChild(this.node);
- this._ui.wrapper.classList.add('is-loading');
- this._ui.container.style.opacity = 0;
-
- this._transitioner = new __WEBPACK_IMPORTED_MODULE_11__components_transitioner__["a" /* default */](this);
-
- // Wrap all items by slide element
- this._slides = this._items.map(function (item, index) {
- return _this2._createSlide(item, index);
- });
-
- this.reset();
-
- this._bindEvents();
-
- this._ui.container.style.opacity = 1;
- this._ui.wrapper.classList.remove('is-loading');
- }
-
- /**
- * Bind all events
- * @method _bindEvents
- * @return {void}
- */
-
- }, {
- key: '_bindEvents',
- value: function _bindEvents() {
- this.on('show', this.onShow);
- }
- }, {
- key: '_unbindEvents',
- value: function _unbindEvents() {
- this.off('show', this.onShow);
- }
- }, {
- key: '_createSlide',
- value: function _createSlide(item, index) {
- var slide = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_14__templates_item__["a" /* default */])()).firstChild;
- slide.dataset.sliderIndex = index;
- slide.appendChild(item);
- return slide;
- }
-
- /**
- * Calculate slider dimensions
- */
-
- }, {
- key: '_setDimensions',
- value: function _setDimensions() {
- var _this3 = this;
-
- if (!this.options.vertical) {
- if (this.options.centerMode) {
- this._ui.wrapper.style.padding = '0px ' + this.options.centerPadding;
- }
- } else {
- this._ui.wrapper.style.height = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._slides[0]) * this.slidesToShow;
- if (this.options.centerMode) {
- this._ui.wrapper.style.padding = this.options.centerPadding + ' 0px';
- }
- }
-
- this._wrapperWidth = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["e" /* width */])(this._ui.wrapper);
- this._wrapperHeight = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._ui.wrapper);
-
- if (!this.options.vertical) {
- this._slideWidth = Math.ceil(this._wrapperWidth / this.slidesToShow);
- this._containerWidth = Math.ceil(this._slideWidth * this._slides.length);
- this._ui.container.style.width = this._containerWidth + 'px';
- } else {
- this._slideWidth = Math.ceil(this._wrapperWidth);
- this._containerHeight = Math.ceil(Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._slides[0]) * this._slides.length);
- this._ui.container.style.height = this._containerHeight + 'px';
- }
-
- this._slides.forEach(function (slide) {
- slide.style.width = _this3._slideWidth + 'px';
- });
- }
- }, {
- key: '_setHeight',
- value: function _setHeight() {
- if (this.options.effect !== 'translate') {
- this._ui.container.style.height = Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["c" /* outerHeight */])(this._slides[this.state.index]) + 'px';
- }
- }
-
- // Update slides classes
-
- }, {
- key: '_setClasses',
- value: function _setClasses() {
- var _this4 = this;
-
- this._slides.forEach(function (slide) {
- Object(__WEBPACK_IMPORTED_MODULE_1__utils_css__["d" /* removeClasses */])(slide, 'is-active is-current is-slide-previous is-slide-next');
- if (Math.abs((_this4.state.index - 1) % _this4.state.length) === parseInt(slide.dataset.sliderIndex, 10)) {
- slide.classList.add('is-slide-previous');
- }
- if (Math.abs(_this4.state.index % _this4.state.length) === parseInt(slide.dataset.sliderIndex, 10)) {
- slide.classList.add('is-current');
- }
- if (Math.abs((_this4.state.index + 1) % _this4.state.length) === parseInt(slide.dataset.sliderIndex, 10)) {
- slide.classList.add('is-slide-next');
- }
- });
- }
-
- /****************************************************
- * *
- * GETTERS and SETTERS *
- * *
- ****************************************************/
-
- /**
- * Get id of current datePicker
- */
-
- }, {
- key: 'onShow',
-
-
- /****************************************************
- * *
- * EVENTS FUNCTIONS *
- * *
- ****************************************************/
- value: function onShow(e) {
- this._navigation.refresh();
- this._pagination.refresh();
- this._setClasses();
- }
-
- /****************************************************
- * *
- * PUBLIC FUNCTIONS *
- * *
- ****************************************************/
-
- }, {
- key: 'next',
- value: function next() {
- if (!this.options.loop && !this.options.infinite && this.state.index + this.slidesToScroll > this.state.length - this.slidesToShow && !this.options.centerMode) {
- this.state.next = this.state.index;
- } else {
- this.state.next = this.state.index + this.slidesToScroll;
- }
- this.show();
- }
- }, {
- key: 'previous',
- value: function previous() {
- if (!this.options.loop && !this.options.infinite && this.state.index === 0) {
- this.state.next = this.state.index;
- } else {
- this.state.next = this.state.index - this.slidesToScroll;
- }
- this.show();
- }
- }, {
- key: 'start',
- value: function start() {
- this._autoplay.start();
- }
- }, {
- key: 'pause',
- value: function pause() {
- this._autoplay.pause();
- }
- }, {
- key: 'stop',
- value: function stop() {
- this._autoplay.stop();
- }
- }, {
- key: 'show',
- value: function show(index) {
- var force = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
-
- // If all slides are already visible then return
- if (!this.state.length || this.state.length <= this.slidesToShow) {
- return;
- }
-
- if (typeof index === 'Number') {
- this.state.next = index;
- }
-
- if (this.options.loop) {
- this._loop.apply();
- }
- if (this.options.infinite) {
- this._infinite.apply();
- }
-
- // If new slide is already the current one then return
- if (this.state.index === this.state.next) {
- return;
- }
-
- this.emit('before:show', this.state);
- this._transitioner.apply(force, this._setHeight.bind(this));
- this.emit('after:show', this.state);
-
- this.emit('show', this);
- }
- }, {
- key: 'reset',
- value: function reset() {
- var _this5 = this;
-
- this.state = {
- length: this._items.length,
- index: Math.abs(this.options.initialSlide),
- next: Math.abs(this.options.initialSlide),
- prev: undefined
- };
-
- // Fix options
- if (this.options.loop && this.options.infinite) {
- this.options.loop = false;
- }
- if (this.options.slidesToScroll > this.options.slidesToShow) {
- this.options.slidesToScroll = this.slidesToShow;
- }
- this._breakpoint.init();
-
- if (this.state.index >= this.state.length && this.state.index !== 0) {
- this.state.index = this.state.index - this.slidesToScroll;
- }
- if (this.state.length <= this.slidesToShow) {
- this.state.index = 0;
- }
-
- this._ui.wrapper.appendChild(this._navigation.init().render());
- this._ui.wrapper.appendChild(this._pagination.init().render());
-
- if (this.options.navigationSwipe) {
- this._swipe.bindEvents();
- } else {
- this._swipe._bindEvents();
- }
-
- this._breakpoint.apply();
- // Move all created slides into slider
- this._slides.forEach(function (slide) {
- return _this5._ui.container.appendChild(slide);
- });
- this._transitioner.init().apply(true, this._setHeight.bind(this));
-
- if (this.options.autoplay) {
- this._autoplay.init().start();
- }
- }
-
- /**
- * Destroy Slider
- * @method destroy
- */
-
- }, {
- key: 'destroy',
- value: function destroy() {
- var _this6 = this;
-
- this._unbindEvents();
- this._items.forEach(function (item) {
- _this6.element.appendChild(item);
- });
- this.node.remove();
- }
- }, {
- key: 'id',
- get: function get() {
- return this._id;
- }
- }, {
- key: 'index',
- set: function set(index) {
- this._index = index;
- },
- get: function get() {
- return this._index;
- }
- }, {
- key: 'length',
- set: function set(length) {
- this._length = length;
- },
- get: function get() {
- return this._length;
- }
- }, {
- key: 'slides',
- get: function get() {
- return this._slides;
- },
- set: function set(slides) {
- this._slides = slides;
- }
- }, {
- key: 'slidesToScroll',
- get: function get() {
- return this.options.effect === 'translate' ? this._breakpoint.getSlidesToScroll() : 1;
- }
- }, {
- key: 'slidesToShow',
- get: function get() {
- return this.options.effect === 'translate' ? this._breakpoint.getSlidesToShow() : 1;
- }
- }, {
- key: 'direction',
- get: function get() {
- return this.element.dir.toLowerCase() === 'rtl' || this.element.style.direction === 'rtl' ? 'rtl' : 'ltr';
- }
- }, {
- key: 'wrapper',
- get: function get() {
- return this._ui.wrapper;
- }
- }, {
- key: 'wrapperWidth',
- get: function get() {
- return this._wrapperWidth || 0;
- }
- }, {
- key: 'container',
- get: function get() {
- return this._ui.container;
- }
- }, {
- key: 'containerWidth',
- get: function get() {
- return this._containerWidth || 0;
- }
- }, {
- key: 'slideWidth',
- get: function get() {
- return this._slideWidth || 0;
- }
- }, {
- key: 'transitioner',
- get: function get() {
- return this._transitioner;
- }
- }], [{
- key: 'attach',
- value: function attach() {
- var _this7 = this;
-
- var selector = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : '.slider';
- var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
-
- var instances = new Array();
-
- var elements = Object(__WEBPACK_IMPORTED_MODULE_2__utils_type__["c" /* isString */])(selector) ? document.querySelectorAll(selector) : Array.isArray(selector) ? selector : [selector];
- [].forEach.call(elements, function (element) {
- if (typeof element[_this7.constructor.name] === 'undefined') {
- var instance = new bulmaCarousel(element, options);
- element[_this7.constructor.name] = instance;
- instances.push(instance);
- } else {
- instances.push(element[_this7.constructor.name]);
- }
- });
-
- return instances;
- }
- }]);
-
- return bulmaCarousel;
-}(__WEBPACK_IMPORTED_MODULE_3__utils_eventEmitter__["a" /* default */]);
-
-/* harmony default export */ __webpack_exports__["default"] = (bulmaCarousel);
-
-/***/ }),
-/* 6 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return uuid; });
-/* unused harmony export isRtl */
-/* unused harmony export defer */
-/* unused harmony export getNodeIndex */
-/* unused harmony export camelize */
-function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
-
-var uuid = function uuid() {
- var prefix = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : '';
- return prefix + ([1e7] + -1e3 + -4e3 + -8e3 + -1e11).replace(/[018]/g, function (c) {
- return (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16);
- });
-};
-var isRtl = function isRtl() {
- return document.documentElement.getAttribute('dir') === 'rtl';
-};
-
-var defer = function defer() {
- this.promise = new Promise(function (resolve, reject) {
- this.resolve = resolve;
- this.reject = reject;
- }.bind(this));
-
- this.then = this.promise.then.bind(this.promise);
- this.catch = this.promise.catch.bind(this.promise);
-};
-
-var getNodeIndex = function getNodeIndex(node) {
- return [].concat(_toConsumableArray(node.parentNode.children)).indexOf(node);
-};
-var camelize = function camelize(str) {
- return str.replace(/-(\w)/g, toUpper);
-};
-
-/***/ }),
-/* 7 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_eventEmitter__ = __webpack_require__(3);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_device__ = __webpack_require__(8);
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
-
-function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
-
-
-
-
-var onVisibilityChange = Symbol('onVisibilityChange');
-var onMouseEnter = Symbol('onMouseEnter');
-var onMouseLeave = Symbol('onMouseLeave');
-
-var defaultOptions = {
- autoplay: false,
- autoplaySpeed: 3000
-};
-
-var Autoplay = function (_EventEmitter) {
- _inherits(Autoplay, _EventEmitter);
-
- function Autoplay(slider) {
- _classCallCheck(this, Autoplay);
-
- var _this = _possibleConstructorReturn(this, (Autoplay.__proto__ || Object.getPrototypeOf(Autoplay)).call(this));
-
- _this.slider = slider;
-
- _this.onVisibilityChange = _this.onVisibilityChange.bind(_this);
- _this.onMouseEnter = _this.onMouseEnter.bind(_this);
- _this.onMouseLeave = _this.onMouseLeave.bind(_this);
- return _this;
- }
-
- _createClass(Autoplay, [{
- key: 'init',
- value: function init() {
- this._bindEvents();
- return this;
- }
- }, {
- key: '_bindEvents',
- value: function _bindEvents() {
- document.addEventListener('visibilitychange', this.onVisibilityChange);
- if (this.slider.options.pauseOnHover) {
- this.slider.container.addEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["a" /* pointerEnter */], this.onMouseEnter);
- this.slider.container.addEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["b" /* pointerLeave */], this.onMouseLeave);
- }
- }
- }, {
- key: '_unbindEvents',
- value: function _unbindEvents() {
- document.removeEventListener('visibilitychange', this.onVisibilityChange);
- this.slider.container.removeEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["a" /* pointerEnter */], this.onMouseEnter);
- this.slider.container.removeEventListener(__WEBPACK_IMPORTED_MODULE_1__utils_device__["b" /* pointerLeave */], this.onMouseLeave);
- }
- }, {
- key: 'start',
- value: function start() {
- var _this2 = this;
-
- this.stop();
- if (this.slider.options.autoplay) {
- this.emit('start', this);
- this._interval = setInterval(function () {
- if (!(_this2._hovering && _this2.slider.options.pauseOnHover)) {
- if (!_this2.slider.options.centerMode && _this2.slider.state.next >= _this2.slider.state.length - _this2.slider.slidesToShow && !_this2.slider.options.loop && !_this2.slider.options.infinite) {
- _this2.stop();
- } else {
- _this2.slider.next();
- }
- }
- }, this.slider.options.autoplaySpeed);
- }
- }
- }, {
- key: 'stop',
- value: function stop() {
- this._interval = clearInterval(this._interval);
- this.emit('stop', this);
- }
- }, {
- key: 'pause',
- value: function pause() {
- var _this3 = this;
-
- var speed = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
-
- if (this.paused) {
- return;
- }
- if (this.timer) {
- this.stop();
- }
- this.paused = true;
- if (speed === 0) {
- this.paused = false;
- this.start();
- } else {
- this.slider.on('transition:end', function () {
- if (!_this3) {
- return;
- }
- _this3.paused = false;
- if (!_this3.run) {
- _this3.stop();
- } else {
- _this3.start();
- }
- });
- }
- }
- }, {
- key: 'onVisibilityChange',
- value: function onVisibilityChange(e) {
- if (document.hidden) {
- this.stop();
- } else {
- this.start();
- }
- }
- }, {
- key: 'onMouseEnter',
- value: function onMouseEnter(e) {
- this._hovering = true;
- if (this.slider.options.pauseOnHover) {
- this.pause();
- }
- }
- }, {
- key: 'onMouseLeave',
- value: function onMouseLeave(e) {
- this._hovering = false;
- if (this.slider.options.pauseOnHover) {
- this.pause();
- }
- }
- }]);
-
- return Autoplay;
-}(__WEBPACK_IMPORTED_MODULE_0__utils_eventEmitter__["a" /* default */]);
-
-/* harmony default export */ __webpack_exports__["a"] = (Autoplay);
-
-/***/ }),
-/* 8 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* unused harmony export isIE */
-/* unused harmony export isIETouch */
-/* unused harmony export isAndroid */
-/* unused harmony export isiPad */
-/* unused harmony export isiPod */
-/* unused harmony export isiPhone */
-/* unused harmony export isSafari */
-/* unused harmony export isUiWebView */
-/* unused harmony export supportsTouchEvents */
-/* unused harmony export supportsPointerEvents */
-/* unused harmony export supportsTouch */
-/* unused harmony export pointerDown */
-/* unused harmony export pointerMove */
-/* unused harmony export pointerUp */
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return pointerEnter; });
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return pointerLeave; });
-var isIE = window.navigator.pointerEnabled || window.navigator.msPointerEnabled;
-var isIETouch = window.navigator.msPointerEnabled && window.navigator.msMaxTouchPoints > 1 || window.navigator.pointerEnabled && window.navigator.maxTouchPoints > 1;
-var isAndroid = navigator.userAgent.match(/(Android);?[\s\/]+([\d.]+)?/);
-var isiPad = navigator.userAgent.match(/(iPad).*OS\s([\d_]+)/);
-var isiPod = navigator.userAgent.match(/(iPod)(.*OS\s([\d_]+))?/);
-var isiPhone = !navigator.userAgent.match(/(iPad).*OS\s([\d_]+)/) && navigator.userAgent.match(/(iPhone\sOS)\s([\d_]+)/);
-var isSafari = navigator.userAgent.toLowerCase().indexOf('safari') >= 0 && navigator.userAgent.toLowerCase().indexOf('chrome') < 0 && navigator.userAgent.toLowerCase().indexOf('android') < 0;
-var isUiWebView = /(iPhone|iPod|iPad).*AppleWebKit(?!.*Safari)/i.test(navigator.userAgent);
-
-var supportsTouchEvents = !!('ontouchstart' in window);
-var supportsPointerEvents = !!('PointerEvent' in window);
-var supportsTouch = supportsTouchEvents || window.DocumentTouch && document instanceof DocumentTouch || navigator.maxTouchPoints; // IE >=11
-var pointerDown = !supportsTouch ? 'mousedown' : 'mousedown ' + (supportsTouchEvents ? 'touchstart' : 'pointerdown');
-var pointerMove = !supportsTouch ? 'mousemove' : 'mousemove ' + (supportsTouchEvents ? 'touchmove' : 'pointermove');
-var pointerUp = !supportsTouch ? 'mouseup' : 'mouseup ' + (supportsTouchEvents ? 'touchend' : 'pointerup');
-var pointerEnter = supportsTouch && supportsPointerEvents ? 'pointerenter' : 'mouseenter';
-var pointerLeave = supportsTouch && supportsPointerEvents ? 'pointerleave' : 'mouseleave';
-
-/***/ }),
-/* 9 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-var onResize = Symbol('onResize');
-
-var Breakpoints = function () {
- function Breakpoints(slider) {
- _classCallCheck(this, Breakpoints);
-
- this.slider = slider;
- this.options = slider.options;
-
- this[onResize] = this[onResize].bind(this);
-
- this._bindEvents();
- }
-
- _createClass(Breakpoints, [{
- key: 'init',
- value: function init() {
- this._defaultBreakpoint = {
- slidesToShow: this.options.slidesToShow,
- slidesToScroll: this.options.slidesToScroll
- };
- this.options.breakpoints.sort(function (a, b) {
- return parseInt(a.changePoint, 10) > parseInt(b.changePoint, 10);
- });
- this._currentBreakpoint = this._getActiveBreakpoint();
-
- return this;
- }
- }, {
- key: 'destroy',
- value: function destroy() {
- this._unbindEvents();
- }
- }, {
- key: '_bindEvents',
- value: function _bindEvents() {
- window.addEventListener('resize', this[onResize]);
- window.addEventListener('orientationchange', this[onResize]);
- }
- }, {
- key: '_unbindEvents',
- value: function _unbindEvents() {
- window.removeEventListener('resize', this[onResize]);
- window.removeEventListener('orientationchange', this[onResize]);
- }
- }, {
- key: '_getActiveBreakpoint',
- value: function _getActiveBreakpoint() {
- //Get breakpoint for window width
- var _iteratorNormalCompletion = true;
- var _didIteratorError = false;
- var _iteratorError = undefined;
-
- try {
- for (var _iterator = this.options.breakpoints[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) {
- var point = _step.value;
-
- if (point.changePoint >= window.innerWidth) {
- return point;
- }
- }
- } catch (err) {
- _didIteratorError = true;
- _iteratorError = err;
- } finally {
- try {
- if (!_iteratorNormalCompletion && _iterator.return) {
- _iterator.return();
- }
- } finally {
- if (_didIteratorError) {
- throw _iteratorError;
- }
- }
- }
-
- return this._defaultBreakpoint;
- }
- }, {
- key: 'getSlidesToShow',
- value: function getSlidesToShow() {
- return this._currentBreakpoint ? this._currentBreakpoint.slidesToShow : this._defaultBreakpoint.slidesToShow;
- }
- }, {
- key: 'getSlidesToScroll',
- value: function getSlidesToScroll() {
- return this._currentBreakpoint ? this._currentBreakpoint.slidesToScroll : this._defaultBreakpoint.slidesToScroll;
- }
- }, {
- key: 'apply',
- value: function apply() {
- if (this.slider.state.index >= this.slider.state.length && this.slider.state.index !== 0) {
- this.slider.state.index = this.slider.state.index - this._currentBreakpoint.slidesToScroll;
- }
- if (this.slider.state.length <= this._currentBreakpoint.slidesToShow) {
- this.slider.state.index = 0;
- }
-
- if (this.options.loop) {
- this.slider._loop.init().apply();
- }
-
- if (this.options.infinite) {
- this.slider._infinite.init().apply();
- }
-
- this.slider._setDimensions();
- this.slider._transitioner.init().apply(true, this.slider._setHeight.bind(this.slider));
- this.slider._setClasses();
-
- this.slider._navigation.refresh();
- this.slider._pagination.refresh();
- }
- }, {
- key: onResize,
- value: function value(e) {
- var newBreakPoint = this._getActiveBreakpoint();
- if (newBreakPoint.slidesToShow !== this._currentBreakpoint.slidesToShow) {
- this._currentBreakpoint = newBreakPoint;
- this.apply();
- }
- }
- }]);
-
- return Breakpoints;
-}();
-
-/* harmony default export */ __webpack_exports__["a"] = (Breakpoints);
-
-/***/ }),
-/* 10 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } }
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-var Infinite = function () {
- function Infinite(slider) {
- _classCallCheck(this, Infinite);
-
- this.slider = slider;
- }
-
- _createClass(Infinite, [{
- key: 'init',
- value: function init() {
- if (this.slider.options.infinite && this.slider.options.effect === 'translate') {
- if (this.slider.options.centerMode) {
- this._infiniteCount = Math.ceil(this.slider.slidesToShow + this.slider.slidesToShow / 2);
- } else {
- this._infiniteCount = this.slider.slidesToShow;
- }
-
- var frontClones = [];
- var slideIndex = 0;
- for (var i = this.slider.state.length; i > this.slider.state.length - 1 - this._infiniteCount; i -= 1) {
- slideIndex = i - 1;
- frontClones.unshift(this._cloneSlide(this.slider.slides[slideIndex], slideIndex - this.slider.state.length));
- }
-
- var backClones = [];
- for (var _i = 0; _i < this._infiniteCount + this.slider.state.length; _i += 1) {
- backClones.push(this._cloneSlide(this.slider.slides[_i % this.slider.state.length], _i + this.slider.state.length));
- }
-
- this.slider.slides = [].concat(frontClones, _toConsumableArray(this.slider.slides), backClones);
- }
- return this;
- }
- }, {
- key: 'apply',
- value: function apply() {}
- }, {
- key: 'onTransitionEnd',
- value: function onTransitionEnd(e) {
- if (this.slider.options.infinite) {
- if (this.slider.state.next >= this.slider.state.length) {
- this.slider.state.index = this.slider.state.next = this.slider.state.next - this.slider.state.length;
- this.slider.transitioner.apply(true);
- } else if (this.slider.state.next < 0) {
- this.slider.state.index = this.slider.state.next = this.slider.state.length + this.slider.state.next;
- this.slider.transitioner.apply(true);
- }
- }
- }
- }, {
- key: '_cloneSlide',
- value: function _cloneSlide(slide, index) {
- var newSlide = slide.cloneNode(true);
- newSlide.dataset.sliderIndex = index;
- newSlide.dataset.cloned = true;
- var ids = newSlide.querySelectorAll('[id]') || [];
- ids.forEach(function (id) {
- id.setAttribute('id', '');
- });
- return newSlide;
- }
- }]);
-
- return Infinite;
-}();
-
-/* harmony default export */ __webpack_exports__["a"] = (Infinite);
-
-/***/ }),
-/* 11 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__utils_dom__ = __webpack_require__(12);
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-
-
-var Loop = function () {
- function Loop(slider) {
- _classCallCheck(this, Loop);
-
- this.slider = slider;
- }
-
- _createClass(Loop, [{
- key: "init",
- value: function init() {
- return this;
- }
- }, {
- key: "apply",
- value: function apply() {
- if (this.slider.options.loop) {
- if (this.slider.state.next > 0) {
- if (this.slider.state.next < this.slider.state.length) {
- if (this.slider.state.next > this.slider.state.length - this.slider.slidesToShow && Object(__WEBPACK_IMPORTED_MODULE_0__utils_dom__["a" /* isInViewport */])(this.slider._slides[this.slider.state.length - 1], this.slider.wrapper)) {
- this.slider.state.next = 0;
- } else {
- this.slider.state.next = Math.min(Math.max(this.slider.state.next, 0), this.slider.state.length - this.slider.slidesToShow);
- }
- } else {
- this.slider.state.next = 0;
- }
- } else {
- if (this.slider.state.next <= 0 - this.slider.slidesToScroll) {
- this.slider.state.next = this.slider.state.length - this.slider.slidesToShow;
- } else {
- this.slider.state.next = 0;
- }
- }
- }
- }
- }]);
-
- return Loop;
-}();
-
-/* harmony default export */ __webpack_exports__["a"] = (Loop);
-
-/***/ }),
-/* 12 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return isInViewport; });
-var isInViewport = function isInViewport(element, html) {
- var rect = element.getBoundingClientRect();
- html = html || document.documentElement;
- return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || html.clientHeight) && rect.right <= (window.innerWidth || html.clientWidth);
-};
-
-/***/ }),
-/* 13 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__templates_navigation__ = __webpack_require__(14);
-/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__utils_detect_supportsPassive__ = __webpack_require__(1);
-var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-
-
-
-var Navigation = function () {
- function Navigation(slider) {
- _classCallCheck(this, Navigation);
-
- this.slider = slider;
-
- this._clickEvents = ['click', 'touch'];
- this._supportsPassive = Object(__WEBPACK_IMPORTED_MODULE_1__utils_detect_supportsPassive__["a" /* default */])();
-
- this.onPreviousClick = this.onPreviousClick.bind(this);
- this.onNextClick = this.onNextClick.bind(this);
- this.onKeyUp = this.onKeyUp.bind(this);
- }
-
- _createClass(Navigation, [{
- key: 'init',
- value: function init() {
- this.node = document.createRange().createContextualFragment(Object(__WEBPACK_IMPORTED_MODULE_0__templates_navigation__["a" /* default */])(this.slider.options.icons));
- this._ui = {
- previous: this.node.querySelector('.slider-navigation-previous'),
- next: this.node.querySelector('.slider-navigation-next')
- };
-
- this._unbindEvents();
- this._bindEvents();
-
- this.refresh();
-
- return this;
- }
- }, {
- key: 'destroy',
- value: function destroy() {
- this._unbindEvents();
- }
- }, {
- key: '_bindEvents',
- value: function _bindEvents() {
- var _this = this;
-
- this.slider.wrapper.addEventListener('keyup', this.onKeyUp);
- this._clickEvents.forEach(function (clickEvent) {
- _this._ui.previous.addEventListener(clickEvent, _this.onPreviousClick);
- _this._ui.next.addEventListener(clickEvent, _this.onNextClick);
- });
- }
- }, {
- key: '_unbindEvents',
- value: function _unbindEvents() {
- var _this2 = this;
-
- this.slider.wrapper.removeEventListener('keyup', this.onKeyUp);
- this._clickEvents.forEach(function (clickEvent) {
- _this2._ui.previous.removeEventListener(clickEvent, _this2.onPreviousClick);
- _this2._ui.next.removeEventListener(clickEvent, _this2.onNextClick);
- });
- }
- }, {
- key: 'onNextClick',
- value: function onNextClick(e) {
- if (!this._supportsPassive) {
- e.preventDefault();
- }
-
- if (this.slider.options.navigation) {
- this.slider.next();
- }
- }
- }, {
- key: 'onPreviousClick',
- value: function onPreviousClick(e) {
- if (!this._supportsPassive) {
- e.preventDefault();
- }
-
- if (this.slider.options.navigation) {
- this.slider.previous();
- }
- }
- }, {
- key: 'onKeyUp',
- value: function onKeyUp(e) {
- if (this.slider.options.keyNavigation) {
- if (e.key === 'ArrowRight' || e.key === 'Right') {
- this.slider.next();
- } else if (e.key === 'ArrowLeft' || e.key === 'Left') {
- this.slider.previous();
- }
- }
- }
- }, {
- key: 'refresh',
- value: function refresh() {
- // let centerOffset = Math.floor(this.options.slidesToShow / 2);
- if (!this.slider.options.loop && !this.slider.options.infinite) {
- if (this.slider.options.navigation && this.slider.state.length > this.slider.slidesToShow) {
- this._ui.previous.classList.remove('is-hidden');
- this._ui.next.classList.remove('is-hidden');
- if (this.slider.state.next === 0) {
- this._ui.previous.classList.add('is-hidden');
- this._ui.next.classList.remove('is-hidden');
- } else if (this.slider.state.next >= this.slider.state.length - this.slider.slidesToShow && !this.slider.options.centerMode) {
- this._ui.previous.classList.remove('is-hidden');
- this._ui.next.classList.add('is-hidden');
- } else if (this.slider.state.next >= this.slider.state.length - 1 && this.slider.options.centerMode) {
- this._ui.previous.classList.remove('is-hidden');
- this._ui.next.classList.add('is-hidden');
- }
- } else {
- this._ui.previous.classList.add('is-hidden');
- this._ui.next.classList.add('is-hidden');
- }
- }
- }
- }, {
- key: 'render',
- value: function render() {
- return this.node;
- }
- }]);
-
- return Navigation;
-}();
-
-/* harmony default export */ __webpack_exports__["a"] = (Navigation);
-
-/***/ }),
-/* 14 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony default export */ __webpack_exports__["a"] = (function (icons) {
- return "
";
-});
-
-/***/ }),
-/* 24 */
-/***/ (function(module, __webpack_exports__, __webpack_require__) {
-
-"use strict";
-/* harmony default export */ __webpack_exports__["a"] = (function () {
- return "";
-});
-
-/***/ })
-/******/ ])["default"];
-});
\ No newline at end of file
diff --git a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_inference.py b/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_inference.py
deleted file mode 100644
index 258b618cd338322365dfa25bec468a0a3f70ccd1..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/Lovelive_Nijigasaki_VITS/ONNXVITS_inference.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import logging
-logging.getLogger('numba').setLevel(logging.WARNING)
-import IPython.display as ipd
-import torch
-import commons
-import utils
-import ONNXVITS_infer
-from text import text_to_sequence
-
-def get_text(text, hps):
- text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = torch.LongTensor(text_norm)
- return text_norm
-
-hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json")
-
-net_g = ONNXVITS_infer.SynthesizerTrn(
- len(hps.symbols),
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- **hps.model)
-_ = net_g.eval()
-
-_ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g)
-
-text1 = get_text("おはようございます。", hps)
-stn_tst = text1
-with torch.no_grad():
- x_tst = stn_tst.unsqueeze(0)
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
- sid = torch.LongTensor([0])
- audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
-print(audio)
\ No newline at end of file
diff --git a/spaces/Marshalls/testmtd/analysis/pymo/data.py b/spaces/Marshalls/testmtd/analysis/pymo/data.py
deleted file mode 100644
index 448fbb0cdc30d80b7b6276b0e89a013052c77c35..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/analysis/pymo/data.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import numpy as np
-
-class Joint():
- def __init__(self, name, parent=None, children=None):
- self.name = name
- self.parent = parent
- self.children = children
-
-class MocapData():
- def __init__(self):
- self.skeleton = {}
- self.values = None
- self.channel_names = []
- self.framerate = 0.0
- self.root_name = ''
-
- def traverse(self, j=None):
- stack = [self.root_name]
- while stack:
- joint = stack.pop()
- yield joint
- for c in self.skeleton[joint]['children']:
- stack.append(c)
-
- def clone(self):
- import copy
- new_data = MocapData()
- new_data.skeleton = copy.deepcopy(self.skeleton)
- new_data.values = copy.deepcopy(self.values)
- new_data.channel_names = copy.deepcopy(self.channel_names)
- new_data.root_name = copy.deepcopy(self.root_name)
- new_data.framerate = copy.deepcopy(self.framerate)
- return new_data
-
- def get_all_channels(self):
- '''Returns all of the channels parsed from the file as a 2D numpy array'''
-
- frames = [f[1] for f in self.values]
- return np.asarray([[channel[2] for channel in frame] for frame in frames])
-
- def get_skeleton_tree(self):
- tree = []
- root_key = [j for j in self.skeleton if self.skeleton[j]['parent']==None][0]
-
- root_joint = Joint(root_key)
-
- def get_empty_channels(self):
- #TODO
- pass
-
- def get_constant_channels(self):
- #TODO
- pass
diff --git a/spaces/Mashir0/pximg/utils/PixivClient.js b/spaces/Mashir0/pximg/utils/PixivClient.js
deleted file mode 100644
index 1dacaa1cfae69f7bc9313f0b09c11f1b1cb598ae..0000000000000000000000000000000000000000
--- a/spaces/Mashir0/pximg/utils/PixivClient.js
+++ /dev/null
@@ -1,69 +0,0 @@
-const PixivApi = require('pixiv-api-client');
-const AwaitLock = require('await-lock').default;
-
-const loginLock = new AwaitLock();
-
-module.exports = class PixivClient {
- /**
- * @param {string} refreshToken
- */
- constructor(refreshToken) {
- this.api = new PixivApi();
- this.expireTime = 0;
- this.refreshToken = refreshToken;
- }
-
- get loginExpired() {
- return Date.now() / 1000 > this.expireTime - 60;
- }
-
- async login() {
- if (!this.loginExpired) return;
- await loginLock.acquireAsync();
- if (!this.loginExpired) {
- loginLock.release();
- return;
- }
- try {
- const { expires_in: expiresIn } = await this.api.refreshAccessToken(this.refreshToken);
- this.expireTime = Date.now() / 1000 + expiresIn;
- } finally {
- loginLock.release();
- }
- }
-
- /**
- * @param {string} id
- * @param {string} [language]
- * @returns {PixivClientIllust}
- */
- async illustDetail(id, language) {
- await this.login();
- if (language) this.api.setLanguage(language);
- try {
- const { illust } = await this.api.illustDetail(id);
- if (!illust.title || !illust.user.name) {
- throw new Error('unexpected illust result');
- }
- return illust;
- } catch (error) {
- throw error.error || error;
- }
- }
-
- /**
- * @param {string} id
- * @param {string} [language]
- */
- async illustPages(id, language) {
- const illust = await this.illustDetail(id, language);
- if (illust.meta_pages.length) {
- return illust.meta_pages.map(p => p.image_urls.original);
- }
- return [illust.meta_single_page.original_image_url];
- }
-
- static getClient(token) {
- return token ? new PixivClient(token) : null;
- }
-};
diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/json_utils/utilities.py b/spaces/MetaWabbit/Auto-GPT/autogpt/json_utils/utilities.py
deleted file mode 100644
index eb9bb687750460fed2f4547b67e41f8e8c877a41..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/autogpt/json_utils/utilities.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Utilities for the json_fixes package."""
-import json
-import re
-
-from jsonschema import Draft7Validator
-
-from autogpt.config import Config
-from autogpt.logs import logger
-
-CFG = Config()
-
-
-def extract_char_position(error_message: str) -> int:
- """Extract the character position from the JSONDecodeError message.
-
- Args:
- error_message (str): The error message from the JSONDecodeError
- exception.
-
- Returns:
- int: The character position.
- """
-
- char_pattern = re.compile(r"\(char (\d+)\)")
- if match := char_pattern.search(error_message):
- return int(match[1])
- else:
- raise ValueError("Character position not found in the error message.")
-
-
-def validate_json(json_object: object, schema_name: object) -> object:
- """
- :type schema_name: object
- :param schema_name:
- :type json_object: object
- """
- with open(f"autogpt/json_utils/{schema_name}.json", "r") as f:
- schema = json.load(f)
- validator = Draft7Validator(schema)
-
- if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
- logger.error("The JSON object is invalid.")
- if CFG.debug_mode:
- logger.error(
- json.dumps(json_object, indent=4)
- ) # Replace 'json_object' with the variable containing the JSON data
- logger.error("The following issues were found:")
-
- for error in errors:
- logger.error(f"Error: {error.message}")
- elif CFG.debug_mode:
- print("The JSON object is valid.")
-
- return json_object
diff --git a/spaces/MiguelVGP/bearclassifier/README.md b/spaces/MiguelVGP/bearclassifier/README.md
deleted file mode 100644
index eaf8ba06b5599aef4219d07a438735f9bf9d09dd..0000000000000000000000000000000000000000
--- a/spaces/MiguelVGP/bearclassifier/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Bearclassifier
-emoji: 💻
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MirageML/sjc/sd1/ldm/__init__.py b/spaces/MirageML/sjc/sd1/ldm/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Miuzarte/SUI-svc-3.0/models.py b/spaces/Miuzarte/SUI-svc-3.0/models.py
deleted file mode 100644
index 5d8f154887a43a5c5f67cf6340f74268398e32d5..0000000000000000000000000000000000000000
--- a/spaces/Miuzarte/SUI-svc-3.0/models.py
+++ /dev/null
@@ -1,351 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import attentions
-import commons
-import modules
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-from vdecoder.hifigan.models import Generator
-from utils import f0_to_coarse
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class Encoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- # print(x.shape,x_lengths.shape)
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- filter_channels=None,
- n_heads=None,
- p_dropout=None):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
- self.f0_emb = nn.Embedding(256, hidden_channels)
-
- self.enc_ = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
-
- def forward(self, x, x_lengths, f0=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = x + self.f0_emb(f0).transpose(1,2)
- x = self.enc_(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
-
- return z, m, logs, x_mask
-
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SpeakerEncoder(torch.nn.Module):
- def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
- super(SpeakerEncoder, self).__init__()
- self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
- self.linear = nn.Linear(model_hidden_size, model_embedding_size)
- self.relu = nn.ReLU()
-
- def forward(self, mels):
- self.lstm.flatten_parameters()
- _, (hidden, _) = self.lstm(mels)
- embeds_raw = self.relu(self.linear(hidden[-1]))
- return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
-
- def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
- mel_slices = []
- for i in range(0, total_frames-partial_frames, partial_hop):
- mel_range = torch.arange(i, i+partial_frames)
- mel_slices.append(mel_range)
-
- return mel_slices
-
- def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
- mel_len = mel.size(1)
- last_mel = mel[:,-partial_frames:]
-
- if mel_len > partial_frames:
- mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
- mels = list(mel[:,s] for s in mel_slices)
- mels.append(last_mel)
- mels = torch.stack(tuple(mels), 0).squeeze(1)
-
- with torch.no_grad():
- partial_embeds = self(mels)
- embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
- #embed = embed / torch.linalg.norm(embed, 2)
- else:
- with torch.no_grad():
- embed = self(last_mel)
-
- return embed
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- ssl_dim,
- n_speakers,
- **kwargs):
-
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- self.ssl_dim = ssl_dim
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout)
- hps = {
- "sampling_rate": 48000,
- "inter_channels": 192,
- "resblock": "1",
- "resblock_kernel_sizes": [3, 7, 11],
- "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- "upsample_rates": [10, 8, 2, 2],
- "upsample_initial_channel": 512,
- "upsample_kernel_sizes": [16, 16, 4, 4],
- "gin_channels": 256,
- }
- self.dec = Generator(h=hps)
- self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- def forward(self, c, f0, spec, g=None, mel=None, c_lengths=None, spec_lengths=None):
- if c_lengths == None:
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- if spec_lengths == None:
- spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device)
-
- g = self.emb_g(g).transpose(1,2)
-
- z_ptemp, m_p, logs_p, _ = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0))
- z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
-
- z_p = self.flow(z, spec_mask, g=g)
- z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size)
-
- # o = self.dec(z_slice, g=g)
- o = self.dec(z_slice, g=g, f0=pitch_slice)
-
- return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, c, f0, g=None, mel=None, c_lengths=None):
- if c_lengths == None:
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- g = self.emb_g(g).transpose(1,2)
-
- z_p, m_p, logs_p, c_mask = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0))
- z = self.flow(z_p, c_mask, g=g, reverse=True)
-
- o = self.dec(z * c_mask, g=g, f0=f0)
-
- return o
diff --git a/spaces/Miuzarte/SUI-svc-4.0/inference/infer_tool.py b/spaces/Miuzarte/SUI-svc-4.0/inference/infer_tool.py
deleted file mode 100644
index dbaff46f4f6eb792808e0a0cbb37fb86cb8372e2..0000000000000000000000000000000000000000
--- a/spaces/Miuzarte/SUI-svc-4.0/inference/infer_tool.py
+++ /dev/null
@@ -1,233 +0,0 @@
-import hashlib
-import io
-import json
-import logging
-import os
-import time
-from pathlib import Path
-from inference import slicer
-
-import librosa
-import numpy as np
-# import onnxruntime
-import parselmouth
-import soundfile
-import torch
-import torchaudio
-
-import cluster
-from hubert import hubert_model
-import utils
-from models import SynthesizerTrn
-
-logging.getLogger('matplotlib').setLevel(logging.WARNING)
-
-
-def read_temp(file_name):
- if not os.path.exists(file_name):
- with open(file_name, "w") as f:
- f.write(json.dumps({"info": "temp_dict"}))
- return {}
- else:
- try:
- with open(file_name, "r") as f:
- data = f.read()
- data_dict = json.loads(data)
- if os.path.getsize(file_name) > 50 * 1024 * 1024:
- f_name = file_name.replace("\\", "/").split("/")[-1]
- print(f"clean {f_name}")
- for wav_hash in list(data_dict.keys()):
- if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
- del data_dict[wav_hash]
- except Exception as e:
- print(e)
- print(f"{file_name} error,auto rebuild file")
- data_dict = {"info": "temp_dict"}
- return data_dict
-
-
-def write_temp(file_name, data):
- with open(file_name, "w") as f:
- f.write(json.dumps(data))
-
-
-def timeit(func):
- def run(*args, **kwargs):
- t = time.time()
- res = func(*args, **kwargs)
- print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
- return res
-
- return run
-
-
-def format_wav(audio_path):
- if Path(audio_path).suffix == '.wav':
- return
- raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None)
- soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
-
-
-def get_end_file(dir_path, end):
- file_lists = []
- for root, dirs, files in os.walk(dir_path):
- files = [f for f in files if f[0] != '.']
- dirs[:] = [d for d in dirs if d[0] != '.']
- for f_file in files:
- if f_file.endswith(end):
- file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
- return file_lists
-
-
-def get_md5(content):
- return hashlib.new("md5", content).hexdigest()
-
-def fill_a_to_b(a, b):
- if len(a) < len(b):
- for _ in range(0, len(b) - len(a)):
- a.append(a[0])
-
-def mkdir(paths: list):
- for path in paths:
- if not os.path.exists(path):
- os.mkdir(path)
-
-
-class Svc(object):
- def __init__(self, net_g_path, config_path,
- device=None,
- cluster_model_path="logs/44k/kmeans_10000.pt"):
- self.net_g_path = net_g_path
- if device is None:
- self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- else:
- self.dev = torch.device(device)
- self.net_g_ms = None
- self.hps_ms = utils.get_hparams_from_file(config_path)
- self.target_sample = self.hps_ms.data.sampling_rate
- self.hop_size = self.hps_ms.data.hop_length
- self.spk2id = self.hps_ms.spk
- # 加载hubert
- self.hubert_model = utils.get_hubert_model().to(self.dev)
- self.load_model()
- if os.path.exists(cluster_model_path):
- self.cluster_model = cluster.get_cluster_model(cluster_model_path)
-
- def load_model(self):
- # 获取模型配置
- self.net_g_ms = SynthesizerTrn(
- self.hps_ms.data.filter_length // 2 + 1,
- self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
- **self.hps_ms.model)
- _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None)
- if "half" in self.net_g_path and torch.cuda.is_available():
- _ = self.net_g_ms.half().eval().to(self.dev)
- else:
- _ = self.net_g_ms.eval().to(self.dev)
-
-
-
- def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker):
-
- wav, sr = librosa.load(in_path, sr=self.target_sample)
-
- f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
- f0, uv = utils.interpolate_f0(f0)
- f0 = torch.FloatTensor(f0)
- uv = torch.FloatTensor(uv)
- f0 = f0 * 2 ** (tran / 12)
- f0 = f0.unsqueeze(0).to(self.dev)
- uv = uv.unsqueeze(0).to(self.dev)
-
- wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
- wav16k = torch.from_numpy(wav16k).to(self.dev)
- c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
- c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
-
- if cluster_infer_ratio !=0:
- cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.numpy().T, speaker).T
- cluster_c = torch.FloatTensor(cluster_c)
- c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
-
- c = c.unsqueeze(0)
- return c, f0, uv
-
- def infer(self, speaker, tran, raw_path,
- cluster_infer_ratio=0,
- auto_predict_f0=False,
- noice_scale=0.4):
- speaker_id = self.spk2id[speaker]
- sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
- c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker)
- if "half" in self.net_g_path and torch.cuda.is_available():
- c = c.half()
- with torch.no_grad():
- start = time.time()
- audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0,0].data.float()
- use_time = time.time() - start
- print("vits use time:{}".format(use_time))
- return audio, audio.shape[-1]
-
- def slice_inference(self,raw_audio_path, spk, tran, slice_db,cluster_infer_ratio, auto_predict_f0,noice_scale, pad_seconds=0.5):
- wav_path = raw_audio_path
- chunks = slicer.cut(wav_path, db_thresh=slice_db)
- audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
-
- audio = []
- for (slice_tag, data) in audio_data:
- print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
- # padd
- pad_len = int(audio_sr * pad_seconds)
- data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])])
- length = int(np.ceil(len(data) / audio_sr * self.target_sample))
- raw_path = io.BytesIO()
- soundfile.write(raw_path, data, audio_sr, format="wav")
- raw_path.seek(0)
- if slice_tag:
- print('jump empty segment')
- _audio = np.zeros(length)
- else:
- out_audio, out_sr = self.infer(spk, tran, raw_path,
- cluster_infer_ratio=cluster_infer_ratio,
- auto_predict_f0=auto_predict_f0,
- noice_scale=noice_scale
- )
- _audio = out_audio.cpu().numpy()
-
- pad_len = int(self.target_sample * pad_seconds)
- _audio = _audio[pad_len:-pad_len]
- audio.extend(list(_audio))
- return np.array(audio)
-
-
-class RealTimeVC:
- def __init__(self):
- self.last_chunk = None
- self.last_o = None
- self.chunk_len = 16000 # 区块长度
- self.pre_len = 3840 # 交叉淡化长度,640的倍数
-
- """输入输出都是1维numpy 音频波形数组"""
-
- def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path):
- import maad
- audio, sr = torchaudio.load(input_wav_path)
- audio = audio.cpu().numpy()[0]
- temp_wav = io.BytesIO()
- if self.last_chunk is None:
- input_wav_path.seek(0)
- audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path)
- audio = audio.cpu().numpy()
- self.last_chunk = audio[-self.pre_len:]
- self.last_o = audio
- return audio[-self.chunk_len:]
- else:
- audio = np.concatenate([self.last_chunk, audio])
- soundfile.write(temp_wav, audio, sr, format="wav")
- temp_wav.seek(0)
- audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav)
- audio = audio.cpu().numpy()
- ret = maad.util.crossfade(self.last_o, audio, self.pre_len)
- self.last_chunk = audio[-self.pre_len:]
- self.last_o = audio
- return ret[self.chunk_len:2 * self.chunk_len]
diff --git a/spaces/MohamedRafik/Password_Generator/README.md b/spaces/MohamedRafik/Password_Generator/README.md
deleted file mode 100644
index ebf6fac8ef39209014333e3280c3f3dd64487f9c..0000000000000000000000000000000000000000
--- a/spaces/MohamedRafik/Password_Generator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Password Generator
-emoji: 🔥
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.0.24
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MoonQiu/LongerCrafter/scripts/evaluation/inference_freenoise.py b/spaces/MoonQiu/LongerCrafter/scripts/evaluation/inference_freenoise.py
deleted file mode 100644
index 2694fc6d68221059355b5d24fa40a8ec5f96cf5f..0000000000000000000000000000000000000000
--- a/spaces/MoonQiu/LongerCrafter/scripts/evaluation/inference_freenoise.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import argparse, os, sys, glob, yaml, math, random
-import datetime, time
-import numpy as np
-from omegaconf import OmegaConf
-from collections import OrderedDict
-from tqdm import trange, tqdm
-from einops import repeat
-from einops import rearrange, repeat
-from functools import partial
-import torch
-from pytorch_lightning import seed_everything
-
-from funcs import load_model_checkpoint, load_prompts, load_image_batch, get_filelist, save_videos
-from funcs import batch_ddim_sampling_freenoise
-from utils.utils import instantiate_from_config
-
-
-def get_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument("--seed", type=int, default=20230211, help="seed for seed_everything")
- parser.add_argument("--mode", default="base", type=str, help="which kind of inference mode: {'base', 'i2v'}")
- parser.add_argument("--ckpt_path", type=str, default=None, help="checkpoint path")
- parser.add_argument("--config", type=str, help="config (yaml) path")
- parser.add_argument("--prompt_file", type=str, default=None, help="a text file containing many prompts")
- parser.add_argument("--savedir", type=str, default=None, help="results saving path")
- parser.add_argument("--savefps", type=str, default=10, help="video fps to generate")
- parser.add_argument("--n_samples", type=int, default=1, help="num of samples per prompt",)
- parser.add_argument("--ddim_steps", type=int, default=50, help="steps of ddim if positive, otherwise use DDPM",)
- parser.add_argument("--ddim_eta", type=float, default=1.0, help="eta for ddim sampling (0.0 yields deterministic sampling)",)
- parser.add_argument("--bs", type=int, default=1, help="batch size for inference")
- parser.add_argument("--height", type=int, default=512, help="image height, in pixel space")
- parser.add_argument("--width", type=int, default=512, help="image width, in pixel space")
- parser.add_argument("--frames", type=int, default=-1, help="frames num to inference")
- parser.add_argument("--fps", type=int, default=24)
- parser.add_argument("--unconditional_guidance_scale", type=float, default=1.0, help="prompt classifier-free guidance")
- parser.add_argument("--unconditional_guidance_scale_temporal", type=float, default=None, help="temporal consistency guidance")
- ## for conditional i2v only
- parser.add_argument("--cond_input", type=str, default=None, help="data dir of conditional input")
- ## for freenoise only
- parser.add_argument("--window_size", type=int, default=16, help="window_size")
- parser.add_argument("--window_stride", type=int, default=4, help="window_stride")
- return parser
-
-
-def run_inference(args, gpu_num, gpu_no, **kwargs):
- ## step 1: model config
- ## -----------------------------------------------------------------
- config = OmegaConf.load(args.config)
- #data_config = config.pop("data", OmegaConf.create())
- model_config = config.pop("model", OmegaConf.create())
- model = instantiate_from_config(model_config)
- model = model.cuda(gpu_no)
- assert os.path.exists(args.ckpt_path), f"Error: checkpoint [{args.ckpt_path}] Not Found!"
- model = load_model_checkpoint(model, args.ckpt_path)
- model.eval()
-
- ## sample shape
- assert (args.height % 16 == 0) and (args.width % 16 == 0), "Error: image size [h,w] should be multiples of 16!"
- ## latent noise shape
- h, w = args.height // 8, args.width // 8
- frames = model.temporal_length if args.frames < 0 else args.frames
- channels = model.channels
-
- ## saving folders
- os.makedirs(args.savedir, exist_ok=True)
-
- ## step 2: load data
- ## -----------------------------------------------------------------
- assert os.path.exists(args.prompt_file), "Error: prompt file NOT Found!"
- prompt_list = load_prompts(args.prompt_file)
- num_samples = len(prompt_list)
- filename_list = [f"{id+1:04d}" for id in range(num_samples)]
-
- samples_split = num_samples // gpu_num
- residual_tail = num_samples % gpu_num
- print(f'[rank:{gpu_no}] {samples_split}/{num_samples} samples loaded.')
- indices = list(range(samples_split*gpu_no, samples_split*(gpu_no+1)))
- if gpu_no == 0 and residual_tail != 0:
- indices = indices + list(range(num_samples-residual_tail, num_samples))
- prompt_list_rank = [prompt_list[i] for i in indices]
-
- ## conditional input
- if args.mode == "i2v":
- ## each video or frames dir per prompt
- cond_inputs = get_filelist(args.cond_input, ext='[mpj][pn][4gj]') # '[mpj][pn][4gj]'
- assert len(cond_inputs) == num_samples, f"Error: conditional input ({len(cond_inputs)}) NOT match prompt ({num_samples})!"
- filename_list = [f"{os.path.split(cond_inputs[id])[-1][:-4]}" for id in range(num_samples)]
- cond_inputs_rank = [cond_inputs[i] for i in indices]
-
- filename_list_rank = [filename_list[i] for i in indices]
-
- ## step 3: run over samples
- ## -----------------------------------------------------------------
- start = time.time()
- n_rounds = len(prompt_list_rank) // args.bs
- n_rounds = n_rounds+1 if len(prompt_list_rank) % args.bs != 0 else n_rounds
-
- x_T_total = torch.randn([args.n_samples, 1, channels, frames, h, w], device=model.device).repeat(1, args.bs, 1, 1, 1, 1)
- for frame_index in range(args.window_size, args.frames, args.window_stride):
- list_index = list(range(frame_index-args.window_size, frame_index+args.window_stride-args.window_size))
- random.shuffle(list_index)
- x_T_total[:, :, :, frame_index:frame_index+args.window_stride] = x_T_total[:, :, :, list_index]
-
- for idx in range(0, n_rounds):
- print(f'[rank:{gpu_no}] batch-{idx+1} ({args.bs})x{args.n_samples} ...')
- idx_s = idx*args.bs
- idx_e = min(idx_s+args.bs, len(prompt_list_rank))
- batch_size = idx_e - idx_s
- filenames = filename_list_rank[idx_s:idx_e]
- noise_shape = [batch_size, channels, frames, h, w]
- fps = torch.tensor([args.fps]*batch_size).to(model.device).long()
-
- prompts = prompt_list_rank[idx_s:idx_e]
- if isinstance(prompts, str):
- prompts = [prompts]
- #prompts = batch_size * [""]
- text_emb = model.get_learned_conditioning(prompts)
-
- if args.mode == 'base':
- cond = {"c_crossattn": [text_emb], "fps": fps}
- elif args.mode == 'i2v':
- #cond_images = torch.zeros(noise_shape[0],3,224,224).to(model.device)
- cond_images = load_image_batch(cond_inputs_rank[idx_s:idx_e], (args.height, args.width))
- cond_images = cond_images.to(model.device)
- img_emb = model.get_image_embeds(cond_images)
- imtext_cond = torch.cat([text_emb, img_emb], dim=1)
- cond = {"c_crossattn": [imtext_cond], "fps": fps}
- else:
- raise NotImplementedError
-
- ## inference
- batch_samples = batch_ddim_sampling_freenoise(model, cond, noise_shape, args.n_samples, \
- args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, args=args, x_T_total=x_T_total, **kwargs)
- ## b,samples,c,t,h,w
- save_videos(batch_samples, args.savedir, filenames, fps=args.savefps)
-
- print(f"Saved in {args.savedir}. Time used: {(time.time() - start):.2f} seconds")
-
-
-if __name__ == '__main__':
- now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
- print("@CoLVDM Inference: %s"%now)
- parser = get_parser()
- args = parser.parse_args()
- seed_everything(args.seed)
- rank, gpu_num = 0, 1
- run_inference(args, gpu_num, rank)
\ No newline at end of file
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/backbones/resnet.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/backbones/resnet.py
deleted file mode 100644
index bb17a8cdcbb732cc04674106fc043560555bec2e..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/backbones/resnet.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Dict, List, Optional, Tuple, Union
-
-import torch
-from mmcv.cnn import ConvModule, build_plugin_layer
-from mmengine.model import BaseModule, Sequential
-
-import mmocr.utils as utils
-from mmocr.models.textrecog.layers import BasicBlock
-from mmocr.registry import MODELS
-
-
-@MODELS.register_module()
-class ResNet(BaseModule):
- """
- Args:
- in_channels (int): Number of channels of input image tensor.
- stem_channels (list[int]): List of channels in each stem layer. E.g.,
- [64, 128] stands for 64 and 128 channels in the first and second
- stem layers.
- block_cfgs (dict): Configs of block
- arch_layers (list[int]): List of Block number for each stage.
- arch_channels (list[int]): List of channels for each stage.
- strides (Sequence[int] or Sequence[tuple]): Strides of the first block
- of each stage.
- out_indices (Sequence[int], optional): Indices of output stages. If not
- specified, only the last stage will be returned.
- plugins (dict, optional): Configs of stage plugins
- init_cfg (dict or list[dict], optional): Initialization config dict.
- """
-
- def __init__(self,
- in_channels: int,
- stem_channels: List[int],
- block_cfgs: dict,
- arch_layers: List[int],
- arch_channels: List[int],
- strides: Union[List[int], List[Tuple]],
- out_indices: Optional[List[int]] = None,
- plugins: Optional[Dict] = None,
- init_cfg: Optional[Union[Dict, List[Dict]]] = [
- dict(type='Xavier', layer='Conv2d'),
- dict(type='Constant', val=1, layer='BatchNorm2d'),
- ]):
- super().__init__(init_cfg=init_cfg)
- assert isinstance(in_channels, int)
- assert isinstance(stem_channels, int) or utils.is_type_list(
- stem_channels, int)
- assert utils.is_type_list(arch_layers, int)
- assert utils.is_type_list(arch_channels, int)
- assert utils.is_type_list(strides, tuple) or utils.is_type_list(
- strides, int)
- assert len(arch_layers) == len(arch_channels) == len(strides)
- assert out_indices is None or isinstance(out_indices, (list, tuple))
-
- self.out_indices = out_indices
- self._make_stem_layer(in_channels, stem_channels)
- self.num_stages = len(arch_layers)
- self.use_plugins = False
- self.arch_channels = arch_channels
- self.res_layers = []
- if plugins is not None:
- self.plugin_ahead_names = []
- self.plugin_after_names = []
- self.use_plugins = True
- for i, num_blocks in enumerate(arch_layers):
- stride = strides[i]
- channel = arch_channels[i]
-
- if self.use_plugins:
- self._make_stage_plugins(plugins, stage_idx=i)
-
- res_layer = self._make_layer(
- block_cfgs=block_cfgs,
- inplanes=self.inplanes,
- planes=channel,
- blocks=num_blocks,
- stride=stride,
- )
- self.inplanes = channel
- layer_name = f'layer{i + 1}'
- self.add_module(layer_name, res_layer)
- self.res_layers.append(layer_name)
-
- def _make_layer(self, block_cfgs: Dict, inplanes: int, planes: int,
- blocks: int, stride: int) -> Sequential:
- """Build resnet layer.
-
- Args:
- block_cfgs (dict): Configs of blocks.
- inplanes (int): Number of input channels.
- planes (int): Number of output channels.
- blocks (int): Number of blocks.
- stride (int): Stride of the first block.
-
- Returns:
- Sequential: A sequence of blocks.
- """
- layers = []
- downsample = None
- block_cfgs_ = block_cfgs.copy()
- if isinstance(stride, int):
- stride = (stride, stride)
-
- if stride[0] != 1 or stride[1] != 1 or inplanes != planes:
- downsample = ConvModule(
- inplanes,
- planes,
- 1,
- stride,
- norm_cfg=dict(type='BN'),
- act_cfg=None)
-
- if block_cfgs_['type'] == 'BasicBlock':
- block = BasicBlock
- block_cfgs_.pop('type')
- else:
- raise ValueError('{} not implement yet'.format(block['type']))
-
- layers.append(
- block(
- inplanes,
- planes,
- stride=stride,
- downsample=downsample,
- **block_cfgs_))
- inplanes = planes
- for _ in range(1, blocks):
- layers.append(block(inplanes, planes, **block_cfgs_))
-
- return Sequential(*layers)
-
- def _make_stem_layer(self, in_channels: int,
- stem_channels: Union[int, List[int]]) -> None:
- """Make stem layers.
-
- Args:
- in_channels (int): Number of input channels.
- stem_channels (list[int] or int): List of channels in each stem
- layer. If int, only one stem layer will be created.
- """
- if isinstance(stem_channels, int):
- stem_channels = [stem_channels]
- stem_layers = []
- for _, channels in enumerate(stem_channels):
- stem_layer = ConvModule(
- in_channels,
- channels,
- kernel_size=3,
- stride=1,
- padding=1,
- bias=False,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU'))
- in_channels = channels
- stem_layers.append(stem_layer)
- self.stem_layers = Sequential(*stem_layers)
- self.inplanes = stem_channels[-1]
-
- def _make_stage_plugins(self, plugins: List[Dict], stage_idx: int) -> None:
- """Make plugins for ResNet ``stage_idx``th stage.
-
- Currently we support inserting ``nn.Maxpooling``,
- ``mmcv.cnn.Convmodule``into the backbone. Originally designed
- for ResNet31-like architectures.
-
- Examples:
- >>> plugins=[
- ... dict(cfg=dict(type="Maxpooling", arg=(2,2)),
- ... stages=(True, True, False, False),
- ... position='before_stage'),
- ... dict(cfg=dict(type="Maxpooling", arg=(2,1)),
- ... stages=(False, False, True, Flase),
- ... position='before_stage'),
- ... dict(cfg=dict(
- ... type='ConvModule',
- ... kernel_size=3,
- ... stride=1,
- ... padding=1,
- ... norm_cfg=dict(type='BN'),
- ... act_cfg=dict(type='ReLU')),
- ... stages=(True, True, True, True),
- ... position='after_stage')]
-
- Suppose ``stage_idx=1``, the structure of stage would be:
-
- .. code-block:: none
-
- Maxpooling -> A set of Basicblocks -> ConvModule
-
- Args:
- plugins (list[dict]): List of plugin configs to build.
- stage_idx (int): Index of stage to build
- """
- in_channels = self.arch_channels[stage_idx]
- self.plugin_ahead_names.append([])
- self.plugin_after_names.append([])
- for plugin in plugins:
- plugin = plugin.copy()
- stages = plugin.pop('stages', None)
- position = plugin.pop('position', None)
- assert stages is None or len(stages) == self.num_stages
- if stages[stage_idx]:
- if position == 'before_stage':
- name, layer = build_plugin_layer(
- plugin['cfg'],
- f'_before_stage_{stage_idx+1}',
- in_channels=in_channels,
- out_channels=in_channels)
- self.plugin_ahead_names[stage_idx].append(name)
- self.add_module(name, layer)
- elif position == 'after_stage':
- name, layer = build_plugin_layer(
- plugin['cfg'],
- f'_after_stage_{stage_idx+1}',
- in_channels=in_channels,
- out_channels=in_channels)
- self.plugin_after_names[stage_idx].append(name)
- self.add_module(name, layer)
- else:
- raise ValueError('uncorrect plugin position')
-
- def forward_plugin(self, x: torch.Tensor,
- plugin_name: List[str]) -> torch.Tensor:
- """Forward tensor through plugin.
-
- Args:
- x (torch.Tensor): Input tensor.
- plugin_name (list[str]): Name of plugins.
-
- Returns:
- torch.Tensor: Output tensor.
- """
- out = x
- for name in plugin_name:
- out = getattr(self, name)(out)
- return out
-
- def forward(self,
- x: torch.Tensor) -> Union[torch.Tensor, List[torch.Tensor]]:
- """
- Args: x (Tensor): Image tensor of shape :math:`(N, 3, H, W)`.
-
- Returns:
- Tensor or list[Tensor]: Feature tensor. It can be a list of
- feature outputs at specific layers if ``out_indices`` is specified.
- """
- x = self.stem_layers(x)
-
- outs = []
- for i, layer_name in enumerate(self.res_layers):
- res_layer = getattr(self, layer_name)
- if not self.use_plugins:
- x = res_layer(x)
- if self.out_indices and i in self.out_indices:
- outs.append(x)
- else:
- x = self.forward_plugin(x, self.plugin_ahead_names[i])
- x = res_layer(x)
- x = self.forward_plugin(x, self.plugin_after_names[i])
- if self.out_indices and i in self.out_indices:
- outs.append(x)
-
- return tuple(outs) if self.out_indices else x
diff --git a/spaces/MrBodean/VoiceClone/vocoder/inference.py b/spaces/MrBodean/VoiceClone/vocoder/inference.py
deleted file mode 100644
index 7e546845da0b8cdb18b34fbd332b9aaa39cea55c..0000000000000000000000000000000000000000
--- a/spaces/MrBodean/VoiceClone/vocoder/inference.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from vocoder.models.fatchord_version import WaveRNN
-from vocoder import hparams as hp
-import torch
-
-
-_model = None # type: WaveRNN
-
-def load_model(weights_fpath, verbose=True):
- global _model, _device
-
- if verbose:
- print("Building Wave-RNN")
- _model = WaveRNN(
- rnn_dims=hp.voc_rnn_dims,
- fc_dims=hp.voc_fc_dims,
- bits=hp.bits,
- pad=hp.voc_pad,
- upsample_factors=hp.voc_upsample_factors,
- feat_dims=hp.num_mels,
- compute_dims=hp.voc_compute_dims,
- res_out_dims=hp.voc_res_out_dims,
- res_blocks=hp.voc_res_blocks,
- hop_length=hp.hop_length,
- sample_rate=hp.sample_rate,
- mode=hp.voc_mode
- )
-
- if torch.cuda.is_available():
- _model = _model.cuda()
- _device = torch.device('cuda')
- else:
- _device = torch.device('cpu')
-
- if verbose:
- print("Loading model weights at %s" % weights_fpath)
- checkpoint = torch.load(weights_fpath, _device)
- _model.load_state_dict(checkpoint['model_state'])
- _model.eval()
-
-
-def is_loaded():
- return _model is not None
-
-
-def infer_waveform(mel, normalize=True, batched=True, target=8000, overlap=800,
- progress_callback=None):
- """
- Infers the waveform of a mel spectrogram output by the synthesizer (the format must match
- that of the synthesizer!)
-
- :param normalize:
- :param batched:
- :param target:
- :param overlap:
- :return:
- """
- if _model is None:
- raise Exception("Please load Wave-RNN in memory before using it")
-
- if normalize:
- mel = mel / hp.mel_max_abs_value
- mel = torch.from_numpy(mel[None, ...])
- wav = _model.generate(mel, batched, target, overlap, hp.mu_law, progress_callback)
- return wav
diff --git a/spaces/Mysterykey/todd/greeting.md b/spaces/Mysterykey/todd/greeting.md
deleted file mode 100644
index 3d5afd2f997fde45d6fd898c50c15da765f6e5b5..0000000000000000000000000000000000000000
--- a/spaces/Mysterykey/todd/greeting.md
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/data_pipeline.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/data_pipeline.py
deleted file mode 100644
index cedd2c309d3194a07841610f8f1039a1a1e7ac51..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/data_pipeline.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Input pipeline for the transformer model to read, filter, and batch examples.
-
-Two things to note in the pipeline:
-
-1. Batching scheme
-
- The examples encoded in the TFRecord files contain data in the format:
- {"inputs": [variable length array of integers],
- "targets": [variable length array of integers]}
- Where integers in the arrays refer to tokens in the English and German vocab
- file (named `vocab.ende.32768`).
-
- Prior to batching, elements in the dataset are grouped by length (max between
- "inputs" and "targets" length). Each group is then batched such that:
- group_batch_size * length <= batch_size.
-
- Another way to view batch_size is the maximum number of tokens in each batch.
-
- Once batched, each element in the dataset will have the shape:
- {"inputs": [group_batch_size, padded_input_length],
- "targets": [group_batch_size, padded_target_length]}
- Lengths are padded to the longest "inputs" or "targets" sequence in the batch
- (padded_input_length and padded_target_length can be different).
-
- This batching scheme decreases the fraction of padding tokens per training
- batch, thus improving the training speed significantly.
-
-2. Shuffling
-
- While training, the dataset is shuffled in two places in the code. The first
- is the list of training files. Second, while reading records using
- `parallel_interleave`, the `sloppy` argument is used to generate randomness
- in the order of the examples.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import os
-
-from absl import logging
-import tensorflow as tf
-
-from official.utils.misc import model_helpers
-
-# Buffer size for reading records from a TFRecord file. Each training file is
-# 7.2 MB, so 8 MB allows an entire file to be kept in memory.
-_READ_RECORD_BUFFER = 8 * 1000 * 1000
-
-# Example grouping constants. Defines length boundaries for each group.
-# These values are the defaults used in Tensor2Tensor.
-_MIN_BOUNDARY = 8
-_BOUNDARY_SCALE = 1.1
-
-
-def _load_records(filename):
- """Read file and return a dataset of tf.Examples."""
- return tf.data.TFRecordDataset(filename, buffer_size=_READ_RECORD_BUFFER)
-
-
-def _parse_example(serialized_example):
- """Return inputs and targets Tensors from a serialized tf.Example."""
- data_fields = {
- "inputs": tf.io.VarLenFeature(tf.int64),
- "targets": tf.io.VarLenFeature(tf.int64)
- }
- parsed = tf.io.parse_single_example(serialized_example, data_fields)
- inputs = tf.sparse.to_dense(parsed["inputs"])
- targets = tf.sparse.to_dense(parsed["targets"])
- return inputs, targets
-
-
-def _filter_max_length(example, max_length=256):
- """Indicates whether the example's length is lower than the maximum length."""
- return tf.logical_and(tf.size(example[0]) <= max_length,
- tf.size(example[1]) <= max_length)
-
-
-def _get_example_length(example):
- """Returns the maximum length between the example inputs and targets."""
- length = tf.maximum(tf.shape(example[0])[0], tf.shape(example[1])[0])
- return length
-
-
-def _create_min_max_boundaries(
- max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE):
- """Create min and max boundary lists up to max_length.
-
- For example, when max_length=24, min_boundary=4 and boundary_scale=2, the
- returned values will be:
- buckets_min = [0, 4, 8, 16, 24]
- buckets_max = [4, 8, 16, 24, 25]
-
- Args:
- max_length: The maximum length of example in dataset.
- min_boundary: Minimum length in boundary.
- boundary_scale: Amount to scale consecutive boundaries in the list.
-
- Returns:
- min and max boundary lists
-
- """
- # Create bucket boundaries list by scaling the previous boundary or adding 1
- # (to ensure increasing boundary sizes).
- bucket_boundaries = []
- x = min_boundary
- while x < max_length:
- bucket_boundaries.append(x)
- x = max(x + 1, int(x * boundary_scale))
-
- # Create min and max boundary lists from the initial list.
- buckets_min = [0] + bucket_boundaries
- buckets_max = bucket_boundaries + [max_length + 1]
- return buckets_min, buckets_max
-
-
-def _batch_examples(dataset, batch_size, max_length):
- """Group examples by similar lengths, and return batched dataset.
-
- Each batch of similar-length examples are padded to the same length, and may
- have different number of elements in each batch, such that:
- group_batch_size * padded_length <= batch_size.
-
- This decreases the number of padding tokens per batch, which improves the
- training speed.
-
- Args:
- dataset: Dataset of unbatched examples.
- batch_size: Max number of tokens per batch of examples.
- max_length: Max number of tokens in an example input or target sequence.
-
- Returns:
- Dataset of batched examples with similar lengths.
- """
- # Get min and max boundary lists for each example. These are used to calculate
- # the `bucket_id`, which is the index at which:
- # buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id]
- # Note that using both min and max lists improves the performance.
- buckets_min, buckets_max = _create_min_max_boundaries(max_length)
-
- # Create list of batch sizes for each bucket_id, so that
- # bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size
- bucket_batch_sizes = [int(batch_size) // x for x in buckets_max]
- # bucket_id will be a tensor, so convert this list to a tensor as well.
- bucket_batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
-
- def example_to_bucket_id(example_input, example_target):
- """Return int64 bucket id for this example, calculated based on length."""
- seq_length = _get_example_length((example_input, example_target))
-
- # TODO(xunkai): investigate if removing code branching improves performance.
- conditions_c = tf.logical_and(
- tf.less_equal(buckets_min, seq_length),
- tf.less(seq_length, buckets_max))
- bucket_id = tf.reduce_min(tf.where(conditions_c))
- return bucket_id
-
- def window_size_fn(bucket_id):
- """Return number of examples to be grouped when given a bucket id."""
- return bucket_batch_sizes[bucket_id]
-
- def batching_fn(bucket_id, grouped_dataset):
- """Batch and add padding to a dataset of elements with similar lengths."""
- bucket_batch_size = window_size_fn(bucket_id)
-
- # Batch the dataset and add padding so that all input sequences in the
- # examples have the same length, and all target sequences have the same
- # lengths as well. Resulting lengths of inputs and targets can differ.
- return grouped_dataset.padded_batch(bucket_batch_size, ([None], [None]))
-
- return dataset.apply(tf.data.experimental.group_by_window(
- key_func=example_to_bucket_id,
- reduce_func=batching_fn,
- window_size=None,
- window_size_func=window_size_fn))
-
-
-def _read_and_batch_from_files(
- file_pattern, batch_size, max_length, max_io_parallelism, shuffle, repeat,
- static_batch=False, num_replicas=1, ctx=None):
- """Create dataset where each item is a dict of "inputs" and "targets".
-
- Args:
- file_pattern: String used to match the input TFRecord files.
- batch_size: Maximum number of tokens per global batch of examples.
- max_length: Maximum number of tokens per example
- max_io_parallelism: Max number of cpu cores for parallel input processing.
- shuffle: If true, randomizes order of elements.
- repeat: Number of times to repeat the dataset. If None, the dataset is
- repeated forever.
- static_batch: Whether the batches in the dataset should have static shapes.
- If True, the input is batched so that every batch has the
- shape [batch_size // max_length, max_length]. If False, the input is
- grouped by length, and batched so that batches may have different
- shapes [N, M], where:
- N * M <= batch_size
- M <= max_length
- In general, this setting should be False. Dynamic shapes allow the inputs
- to be grouped so that the number of padding tokens is minimized, and helps
- model training. In cases where the input shape must be static
- (e.g. running on TPU), this setting should be set to True.
- num_replicas: Number of GPUs or other workers. We will generate global
- batches, and each global batch is equally divisible by number of replicas.
- Currently it is only effective when static_batch==True. TODO: make it
- effective when static_batch=False.
- ctx: Input context.
-
- Returns:
- tf.data.Dataset object containing examples loaded from the files.
- """
- dataset = tf.data.Dataset.list_files(file_pattern, shuffle=shuffle)
-
- if ctx and ctx.num_input_pipelines > 1:
- logging.info("Shard %d of the dataset.", ctx.input_pipeline_id)
- dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
-
- # Read files and interleave results. When training, the order of the examples
- # will be non-deterministic.
- options = tf.data.Options()
- options.experimental_deterministic = False
- dataset = dataset.interleave(
- _load_records,
- cycle_length=max_io_parallelism,
- num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options)
-
- # Parse each tf.Example into a dictionary
- # TODO: Look into prefetch_input_elements for performance optimization.
- dataset = dataset.map(_parse_example,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
-
- # Remove examples where the input or target length exceeds the maximum length,
- dataset = dataset.filter(lambda x, y: _filter_max_length((x, y), max_length))
-
- if static_batch:
- dataset = dataset.padded_batch(
- # First calculate batch size (token number) per worker, then divide it
- # into sentences, and finally expand to a global batch. It could prove
- # the global batch divisble for distribution strategy.
- int(batch_size // num_replicas // max_length * num_replicas),
- ([max_length], [max_length]), drop_remainder=True)
- else:
- # Group and batch such that each batch has examples of similar length.
- # TODO(xunkai): _batch_examples might need to do something special for
- # num_replicas.
- dataset = _batch_examples(dataset, batch_size, max_length)
-
- dataset = dataset.repeat(repeat)
-
- # Prefetch the next element to improve speed of input pipeline.
- dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
- return dataset
-
-
-def _generate_synthetic_data(params):
- """Create synthetic data based on the parameter batch size."""
- batch_size = int(params["batch_size"] // params["max_length"])
- length = params["max_length"]
- dataset = model_helpers.generate_synthetic_data(
- input_shape=tf.TensorShape([length]),
- input_value=1,
- input_dtype=tf.int64,
- label_shape=tf.TensorShape([length]),
- label_value=1,
- label_dtype=tf.int64,
- )
- if params["static_batch"]:
- dataset = dataset.batch(batch_size, drop_remainder=True)
- else:
- dataset = dataset.padded_batch(batch_size, ([None], [None]))
- return dataset
-
-
-def train_input_fn(params, ctx=None):
- """Load and return dataset of batched examples for use during training."""
- file_pattern = os.path.join(params["data_dir"] or "", "*train*")
- if params["use_synthetic_data"]:
- return _generate_synthetic_data(params)
- return _read_and_batch_from_files(
- file_pattern, params["batch_size"], params["max_length"],
- params["max_io_parallelism"], shuffle=True,
- repeat=params["repeat_dataset"], static_batch=params["static_batch"],
- num_replicas=params["num_gpus"], ctx=ctx)
-
-
-def eval_input_fn(params, ctx=None):
- """Load and return dataset of batched examples for use during evaluation."""
- file_pattern = os.path.join(params["data_dir"] or "", "*dev*")
- if params["use_synthetic_data"]:
- return _generate_synthetic_data(params)
- return _read_and_batch_from_files(
- file_pattern, params["batch_size"], params["max_length"],
- params["max_io_parallelism"], shuffle=False, repeat=1,
- static_batch=params["static_batch"], num_replicas=params["num_gpus"],
- ctx=ctx)
-
-
-def map_data_for_transformer_fn(x, y):
- """Maps data for training, and handles weried behaviors for different vers."""
- # Will transform input x and targets y into tuple(x, y) as new model inputs.
- # For TF v2, the 2nd parameter is omitted to make Keras training work.
- return ((x, y),)
diff --git a/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_preprocessing.py b/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_preprocessing.py
deleted file mode 100644
index 3d7a3f856a7d8de45ff00ff3a0e1a6e6eacadd3a..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/recommendation/data_preprocessing.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Preprocess dataset and construct any necessary artifacts."""
-
-from __future__ import absolute_import
-from __future__ import division
-# from __future__ import google_type_annotations
-from __future__ import print_function
-
-import os
-import pickle
-import time
-import timeit
-
-# pylint: disable=wrong-import-order
-from absl import logging
-import numpy as np
-import pandas as pd
-import tensorflow as tf
-import typing
-from typing import Dict, Text, Tuple
-# pylint: enable=wrong-import-order
-
-from official.recommendation import constants as rconst
-from official.recommendation import data_pipeline
-from official.recommendation import movielens
-
-
-_EXPECTED_CACHE_KEYS = (
- rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,
- rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)
-
-
-def read_dataframe(
- raw_rating_path: Text
-) -> Tuple[Dict[int, int], Dict[int, int], pd.DataFrame]:
- """Read in data CSV, and output DataFrame for downstream processing.
-
- This function reads in the raw CSV of positive items, and performs three
- preprocessing transformations:
-
- 1) Filter out all users who have not rated at least a certain number
- of items. (Typically 20 items)
-
- 2) Zero index the users and items such that the largest user_id is
- `num_users - 1` and the largest item_id is `num_items - 1`
-
- 3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
- This allows the dataframe to be sliced by user in-place, and for the last
- item to be selected simply by calling the `-1` index of a user's slice.
-
- Args:
- raw_rating_path: The path to the CSV which contains the raw dataset.
-
- Returns:
- A dict mapping raw user IDs to regularized user IDs, a dict mapping raw
- item IDs to regularized item IDs, and a filtered, zero-index remapped,
- sorted dataframe.
- """
- with tf.io.gfile.GFile(raw_rating_path) as f:
- df = pd.read_csv(f)
-
- # Get the info of users who have more than 20 ratings on items
- grouped = df.groupby(movielens.USER_COLUMN)
- df = grouped.filter(
- lambda x: len(x) >= rconst.MIN_NUM_RATINGS) # type: pd.DataFrame
-
- original_users = df[movielens.USER_COLUMN].unique()
- original_items = df[movielens.ITEM_COLUMN].unique()
-
- # Map the ids of user and item to 0 based index for following processing
- logging.info("Generating user_map and item_map...")
- user_map = {user: index for index, user in enumerate(original_users)}
- item_map = {item: index for index, item in enumerate(original_items)}
-
- df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
- lambda user: user_map[user])
- df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
- lambda item: item_map[item])
-
- num_users = len(original_users)
- num_items = len(original_items)
-
- assert num_users <= np.iinfo(rconst.USER_DTYPE).max
- assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
- assert df[movielens.USER_COLUMN].max() == num_users - 1
- assert df[movielens.ITEM_COLUMN].max() == num_items - 1
-
- # This sort is used to shard the dataframe by user, and later to select
- # the last item for a user to be used in validation.
- logging.info("Sorting by user, timestamp...")
-
- # This sort is equivalent to
- # df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
- # inplace=True)
- # except that the order of items with the same user and timestamp are
- # sometimes different. For some reason, this sort results in a better
- # hit-rate during evaluation, matching the performance of the MLPerf
- # reference implementation.
- df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
- df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
- inplace=True,
- kind="mergesort")
-
- # The dataframe does not reconstruct indices in the sort or filter steps.
- return user_map, item_map, df.reset_index()
-
-
-def _filter_index_sort(raw_rating_path: Text,
- cache_path: Text) -> Tuple[pd.DataFrame, bool]:
- """Read in data CSV, and output structured data.
-
- This function reads in the raw CSV of positive items, and performs three
- preprocessing transformations:
-
- 1) Filter out all users who have not rated at least a certain number
- of items. (Typically 20 items)
-
- 2) Zero index the users and items such that the largest user_id is
- `num_users - 1` and the largest item_id is `num_items - 1`
-
- 3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
- This allows the dataframe to be sliced by user in-place, and for the last
- item to be selected simply by calling the `-1` index of a user's slice.
-
- While all of these transformations are performed by Pandas (and are therefore
- single-threaded), they only take ~2 minutes, and the overhead to apply a
- MapReduce pattern to parallel process the dataset adds significant complexity
- for no computational gain. For a larger dataset parallelizing this
- preprocessing could yield speedups. (Also, this preprocessing step is only
- performed once for an entire run.
-
- Args:
- raw_rating_path: The path to the CSV which contains the raw dataset.
- cache_path: The path to the file where results of this function are saved.
-
- Returns:
- A filtered, zero-index remapped, sorted dataframe, a dict mapping raw user
- IDs to regularized user IDs, and a dict mapping raw item IDs to regularized
- item IDs.
- """
- valid_cache = tf.io.gfile.exists(cache_path)
- if valid_cache:
- with tf.io.gfile.GFile(cache_path, "rb") as f:
- cached_data = pickle.load(f)
-
- # (nnigania)disabled this check as the dataset is not expected to change
- # cache_age = time.time() - cached_data.get("create_time", 0)
- # if cache_age > rconst.CACHE_INVALIDATION_SEC:
- # valid_cache = False
-
- for key in _EXPECTED_CACHE_KEYS:
- if key not in cached_data:
- valid_cache = False
-
- if not valid_cache:
- logging.info("Removing stale raw data cache file.")
- tf.io.gfile.remove(cache_path)
-
- if valid_cache:
- data = cached_data
- else:
- user_map, item_map, df = read_dataframe(raw_rating_path)
-
- grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)
- eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])
-
- data = {
- rconst.TRAIN_USER_KEY: train_df[movielens.USER_COLUMN]
- .values.astype(rconst.USER_DTYPE),
- rconst.TRAIN_ITEM_KEY: train_df[movielens.ITEM_COLUMN]
- .values.astype(rconst.ITEM_DTYPE),
- rconst.EVAL_USER_KEY: eval_df[movielens.USER_COLUMN]
- .values.astype(rconst.USER_DTYPE),
- rconst.EVAL_ITEM_KEY: eval_df[movielens.ITEM_COLUMN]
- .values.astype(rconst.ITEM_DTYPE),
- rconst.USER_MAP: user_map,
- rconst.ITEM_MAP: item_map,
- "create_time": time.time(),
- }
-
- logging.info("Writing raw data cache.")
- with tf.io.gfile.GFile(cache_path, "wb") as f:
- pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
-
- # TODO(robieta): MLPerf cache clear.
- return data, valid_cache
-
-
-def instantiate_pipeline(dataset,
- data_dir,
- params,
- constructor_type=None,
- deterministic=False,
- epoch_dir=None,
- generate_data_offline=False):
- # type: (str, str, dict, typing.Optional[str], bool, typing.Optional[str], bool) -> (int, int, data_pipeline.BaseDataConstructor)
- """Load and digest data CSV into a usable form.
-
- Args:
- dataset: The name of the dataset to be used.
- data_dir: The root directory of the dataset.
- params: dict of parameters for the run.
- constructor_type: The name of the constructor subclass that should be used
- for the input pipeline.
- deterministic: Tell the data constructor to produce deterministically.
- epoch_dir: Directory in which to store the training epochs.
- generate_data_offline: Boolean, whether current pipeline is done offline
- or while training.
- """
- logging.info("Beginning data preprocessing.")
-
- st = timeit.default_timer()
- raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
- cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)
-
- raw_data, _ = _filter_index_sort(raw_rating_path, cache_path)
- user_map, item_map = raw_data["user_map"], raw_data["item_map"]
- num_users, num_items = movielens.DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
-
- if num_users != len(user_map):
- raise ValueError("Expected to find {} users, but found {}".format(
- num_users, len(user_map)))
- if num_items != len(item_map):
- raise ValueError("Expected to find {} items, but found {}".format(
- num_items, len(item_map)))
-
- producer = data_pipeline.get_constructor(constructor_type or "materialized")(
- maximum_number_epochs=params["train_epochs"],
- num_users=num_users,
- num_items=num_items,
- user_map=user_map,
- item_map=item_map,
- train_pos_users=raw_data[rconst.TRAIN_USER_KEY],
- train_pos_items=raw_data[rconst.TRAIN_ITEM_KEY],
- train_batch_size=params["batch_size"],
- batches_per_train_step=params["batches_per_step"],
- num_train_negatives=params["num_neg"],
- eval_pos_users=raw_data[rconst.EVAL_USER_KEY],
- eval_pos_items=raw_data[rconst.EVAL_ITEM_KEY],
- eval_batch_size=params["eval_batch_size"],
- batches_per_eval_step=params["batches_per_step"],
- stream_files=params["stream_files"],
- deterministic=deterministic,
- epoch_dir=epoch_dir,
- create_data_offline=generate_data_offline)
-
- run_time = timeit.default_timer() - st
- logging.info("Data preprocessing complete. Time: {:.1f} sec."
- .format(run_time))
-
- print(producer)
- return num_users, num_items, producer
diff --git a/spaces/NCTCMumbai/NCTC/models/research/audioset/yamnet/features.py b/spaces/NCTCMumbai/NCTC/models/research/audioset/yamnet/features.py
deleted file mode 100644
index 98661124787c1b3f672185483c5715edb375cb2a..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/audioset/yamnet/features.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2019 The TensorFlow Authors All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Feature computation for YAMNet."""
-
-import numpy as np
-import tensorflow as tf
-
-
-def waveform_to_log_mel_spectrogram(waveform, params):
- """Compute log mel spectrogram of a 1-D waveform."""
- with tf.name_scope('log_mel_features'):
- # waveform has shape [<# samples>]
-
- # Convert waveform into spectrogram using a Short-Time Fourier Transform.
- # Note that tf.signal.stft() uses a periodic Hann window by default.
- window_length_samples = int(
- round(params.SAMPLE_RATE * params.STFT_WINDOW_SECONDS))
- hop_length_samples = int(
- round(params.SAMPLE_RATE * params.STFT_HOP_SECONDS))
- fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0)))
- num_spectrogram_bins = fft_length // 2 + 1
- magnitude_spectrogram = tf.abs(tf.signal.stft(
- signals=waveform,
- frame_length=window_length_samples,
- frame_step=hop_length_samples,
- fft_length=fft_length))
- # magnitude_spectrogram has shape [<# STFT frames>, num_spectrogram_bins]
-
- # Convert spectrogram into log mel spectrogram.
- linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
- num_mel_bins=params.MEL_BANDS,
- num_spectrogram_bins=num_spectrogram_bins,
- sample_rate=params.SAMPLE_RATE,
- lower_edge_hertz=params.MEL_MIN_HZ,
- upper_edge_hertz=params.MEL_MAX_HZ)
- mel_spectrogram = tf.matmul(
- magnitude_spectrogram, linear_to_mel_weight_matrix)
- log_mel_spectrogram = tf.math.log(mel_spectrogram + params.LOG_OFFSET)
- # log_mel_spectrogram has shape [<# STFT frames>, MEL_BANDS]
-
- return log_mel_spectrogram
-
-
-def spectrogram_to_patches(spectrogram, params):
- """Break up a spectrogram into a stack of fixed-size patches."""
- with tf.name_scope('feature_patches'):
- # Frame spectrogram (shape [<# STFT frames>, MEL_BANDS]) into patches
- # (the input examples).
- # Only complete frames are emitted, so if there is less than
- # PATCH_WINDOW_SECONDS of waveform then nothing is emitted
- # (to avoid this, zero-pad before processing).
- hop_length_samples = int(
- round(params.SAMPLE_RATE * params.STFT_HOP_SECONDS))
- spectrogram_sr = params.SAMPLE_RATE / hop_length_samples
- patch_window_length_samples = int(
- round(spectrogram_sr * params.PATCH_WINDOW_SECONDS))
- patch_hop_length_samples = int(
- round(spectrogram_sr * params.PATCH_HOP_SECONDS))
- features = tf.signal.frame(
- signal=spectrogram,
- frame_length=patch_window_length_samples,
- frame_step=patch_hop_length_samples,
- axis=0)
- # features has shape [<# patches>, <# STFT frames in an patch>, MEL_BANDS]
-
- return features
diff --git a/spaces/Nee001/bing0/src/components/ui/dialog.tsx b/spaces/Nee001/bing0/src/components/ui/dialog.tsx
deleted file mode 100644
index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000
--- a/spaces/Nee001/bing0/src/components/ui/dialog.tsx
+++ /dev/null
@@ -1,128 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as DialogPrimitive from '@radix-ui/react-dialog'
-
-import { cn } from '@/lib/utils'
-import { IconClose } from '@/components/ui/icons'
-
-const Dialog = DialogPrimitive.Root
-
-const DialogTrigger = DialogPrimitive.Trigger
-
-const DialogPortal = ({
- className,
- children,
- ...props
-}: DialogPrimitive.DialogPortalProps) => (
-
-
- {children}
-
-
-)
-DialogPortal.displayName = DialogPrimitive.Portal.displayName
-
-const DialogOverlay = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
-
-const DialogContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
-
- {children}
-
-
- Close
-
-
-
-))
-DialogContent.displayName = DialogPrimitive.Content.displayName
-
-const DialogHeader = ({
- className,
- ...props
-}: React.HTMLAttributes) => (
-
-)
-DialogHeader.displayName = 'DialogHeader'
-
-const DialogFooter = ({
- className,
- ...props
-}: React.HTMLAttributes) => (
-
-)
-DialogFooter.displayName = 'DialogFooter'
-
-const DialogTitle = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-DialogTitle.displayName = DialogPrimitive.Title.displayName
-
-const DialogDescription = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-DialogDescription.displayName = DialogPrimitive.Description.displayName
-
-export {
- Dialog,
- DialogTrigger,
- DialogContent,
- DialogHeader,
- DialogFooter,
- DialogTitle,
- DialogDescription
-}
diff --git a/spaces/NeuML/baseball/README.md b/spaces/NeuML/baseball/README.md
deleted file mode 100644
index fc80112a0c900352097c63a935d09b85587607f7..0000000000000000000000000000000000000000
--- a/spaces/NeuML/baseball/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Baseball
-emoji: ⚾
-colorFrom: blue
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.24.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OAOA/DifFace/basicsr/losses/loss_util.py b/spaces/OAOA/DifFace/basicsr/losses/loss_util.py
deleted file mode 100644
index fd293ff9e6a22814e5aeff6ae11fb54d2e4bafff..0000000000000000000000000000000000000000
--- a/spaces/OAOA/DifFace/basicsr/losses/loss_util.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import functools
-import torch
-from torch.nn import functional as F
-
-
-def reduce_loss(loss, reduction):
- """Reduce loss as specified.
-
- Args:
- loss (Tensor): Elementwise loss tensor.
- reduction (str): Options are 'none', 'mean' and 'sum'.
-
- Returns:
- Tensor: Reduced loss tensor.
- """
- reduction_enum = F._Reduction.get_enum(reduction)
- # none: 0, elementwise_mean:1, sum: 2
- if reduction_enum == 0:
- return loss
- elif reduction_enum == 1:
- return loss.mean()
- else:
- return loss.sum()
-
-
-def weight_reduce_loss(loss, weight=None, reduction='mean'):
- """Apply element-wise weight and reduce loss.
-
- Args:
- loss (Tensor): Element-wise loss.
- weight (Tensor): Element-wise weights. Default: None.
- reduction (str): Same as built-in losses of PyTorch. Options are
- 'none', 'mean' and 'sum'. Default: 'mean'.
-
- Returns:
- Tensor: Loss values.
- """
- # if weight is specified, apply element-wise weight
- if weight is not None:
- assert weight.dim() == loss.dim()
- assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
- loss = loss * weight
-
- # if weight is not specified or reduction is sum, just reduce the loss
- if weight is None or reduction == 'sum':
- loss = reduce_loss(loss, reduction)
- # if reduction is mean, then compute mean over weight region
- elif reduction == 'mean':
- if weight.size(1) > 1:
- weight = weight.sum()
- else:
- weight = weight.sum() * loss.size(1)
- loss = loss.sum() / weight
-
- return loss
-
-
-def weighted_loss(loss_func):
- """Create a weighted version of a given loss function.
-
- To use this decorator, the loss function must have the signature like
- `loss_func(pred, target, **kwargs)`. The function only needs to compute
- element-wise loss without any reduction. This decorator will add weight
- and reduction arguments to the function. The decorated function will have
- the signature like `loss_func(pred, target, weight=None, reduction='mean',
- **kwargs)`.
-
- :Example:
-
- >>> import torch
- >>> @weighted_loss
- >>> def l1_loss(pred, target):
- >>> return (pred - target).abs()
-
- >>> pred = torch.Tensor([0, 2, 3])
- >>> target = torch.Tensor([1, 1, 1])
- >>> weight = torch.Tensor([1, 0, 1])
-
- >>> l1_loss(pred, target)
- tensor(1.3333)
- >>> l1_loss(pred, target, weight)
- tensor(1.5000)
- >>> l1_loss(pred, target, reduction='none')
- tensor([1., 1., 2.])
- >>> l1_loss(pred, target, weight, reduction='sum')
- tensor(3.)
- """
-
- @functools.wraps(loss_func)
- def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
- # get element-wise loss
- loss = loss_func(pred, target, **kwargs)
- loss = weight_reduce_loss(loss, weight, reduction)
- return loss
-
- return wrapper
-
-
-def get_local_weights(residual, ksize):
- """Get local weights for generating the artifact map of LDL.
-
- It is only called by the `get_refined_artifact_map` function.
-
- Args:
- residual (Tensor): Residual between predicted and ground truth images.
- ksize (Int): size of the local window.
-
- Returns:
- Tensor: weight for each pixel to be discriminated as an artifact pixel
- """
-
- pad = (ksize - 1) // 2
- residual_pad = F.pad(residual, pad=[pad, pad, pad, pad], mode='reflect')
-
- unfolded_residual = residual_pad.unfold(2, ksize, 1).unfold(3, ksize, 1)
- pixel_level_weight = torch.var(unfolded_residual, dim=(-1, -2), unbiased=True, keepdim=True).squeeze(-1).squeeze(-1)
-
- return pixel_level_weight
-
-
-def get_refined_artifact_map(img_gt, img_output, img_ema, ksize):
- """Calculate the artifact map of LDL
- (Details or Artifacts: A Locally Discriminative Learning Approach to Realistic Image Super-Resolution. In CVPR 2022)
-
- Args:
- img_gt (Tensor): ground truth images.
- img_output (Tensor): output images given by the optimizing model.
- img_ema (Tensor): output images given by the ema model.
- ksize (Int): size of the local window.
-
- Returns:
- overall_weight: weight for each pixel to be discriminated as an artifact pixel
- (calculated based on both local and global observations).
- """
-
- residual_ema = torch.sum(torch.abs(img_gt - img_ema), 1, keepdim=True)
- residual_sr = torch.sum(torch.abs(img_gt - img_output), 1, keepdim=True)
-
- patch_level_weight = torch.var(residual_sr.clone(), dim=(-1, -2, -3), keepdim=True)**(1 / 5)
- pixel_level_weight = get_local_weights(residual_sr.clone(), ksize)
- overall_weight = patch_level_weight * pixel_level_weight
-
- overall_weight[residual_sr < residual_ema] = 0
-
- return overall_weight
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adagrad.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adagrad.py
deleted file mode 100644
index 4f539541c1c91d8c822f7ce624fa6eabf744f60e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/adagrad.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch.optim
-
-from . import LegacyFairseqOptimizer, register_optimizer
-
-
-@register_optimizer("adagrad")
-class Adagrad(LegacyFairseqOptimizer):
- def __init__(self, args, params):
- super().__init__(args)
- self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
-
- @staticmethod
- def add_args(parser):
- """Add optimizer-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
- help='weight decay')
- # fmt: on
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- """
- return {
- "lr": self.args.lr[0],
- "weight_decay": self.args.weight_decay,
- }
-
- @property
- def supports_flat_params(self):
- return False
diff --git a/spaces/ORI-Muchim/MarinTTS/modules.py b/spaces/ORI-Muchim/MarinTTS/modules.py
deleted file mode 100644
index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/MarinTTS/modules.py
+++ /dev/null
@@ -1,390 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/trainers/default.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/trainers/default.py
deleted file mode 100644
index 86c7f0fab42924bfc93a031e851117634c70f593..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/trainers/default.py
+++ /dev/null
@@ -1,175 +0,0 @@
-import logging
-
-import torch
-import torch.nn.functional as F
-from omegaconf import OmegaConf
-
-from saicinpainting.training.data.datasets import make_constant_area_crop_params
-from saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter
-from saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss
-from saicinpainting.training.modules.fake_fakes import FakeFakesGenerator
-from saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise
-from saicinpainting.utils import add_prefix_to_keys, get_ramp
-
-LOGGER = logging.getLogger(__name__)
-
-
-def make_constant_area_crop_batch(batch, **kwargs):
- crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2],
- img_width=batch['image'].shape[3],
- **kwargs)
- batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width]
- batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width]
- return batch
-
-
-class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule):
- def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image',
- add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None,
- distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False,
- fake_fakes_proba=0, fake_fakes_generator_kwargs=None,
- **kwargs):
- super().__init__(*args, **kwargs)
- self.concat_mask = concat_mask
- self.rescale_size_getter = get_ramp(**rescale_scheduler_kwargs) if rescale_scheduler_kwargs is not None else None
- self.image_to_discriminator = image_to_discriminator
- self.add_noise_kwargs = add_noise_kwargs
- self.noise_fill_hole = noise_fill_hole
- self.const_area_crop_kwargs = const_area_crop_kwargs
- self.refine_mask_for_losses = make_mask_distance_weighter(**distance_weighter_kwargs) \
- if distance_weighter_kwargs is not None else None
- self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr
-
- self.fake_fakes_proba = fake_fakes_proba
- if self.fake_fakes_proba > 1e-3:
- self.fake_fakes_gen = FakeFakesGenerator(**(fake_fakes_generator_kwargs or {}))
-
- def forward(self, batch):
- if self.training and self.rescale_size_getter is not None:
- cur_size = self.rescale_size_getter(self.global_step)
- batch['image'] = F.interpolate(batch['image'], size=cur_size, mode='bilinear', align_corners=False)
- batch['mask'] = F.interpolate(batch['mask'], size=cur_size, mode='nearest')
-
- if self.training and self.const_area_crop_kwargs is not None:
- batch = make_constant_area_crop_batch(batch, **self.const_area_crop_kwargs)
-
- img = batch['image']
- mask = batch['mask']
-
- masked_img = img * (1 - mask)
-
- if self.add_noise_kwargs is not None:
- noise = make_multiscale_noise(masked_img, **self.add_noise_kwargs)
- if self.noise_fill_hole:
- masked_img = masked_img + mask * noise[:, :masked_img.shape[1]]
- masked_img = torch.cat([masked_img, noise], dim=1)
-
- if self.concat_mask:
- masked_img = torch.cat([masked_img, mask], dim=1)
-
- batch['predicted_image'] = self.generator(masked_img)
- batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image']
-
- if self.fake_fakes_proba > 1e-3:
- if self.training and torch.rand(1).item() < self.fake_fakes_proba:
- batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask)
- batch['use_fake_fakes'] = True
- else:
- batch['fake_fakes'] = torch.zeros_like(img)
- batch['fake_fakes_masks'] = torch.zeros_like(mask)
- batch['use_fake_fakes'] = False
-
- batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \
- if self.refine_mask_for_losses is not None and self.training \
- else mask
-
- return batch
-
- def generator_loss(self, batch):
- img = batch['image']
- predicted_img = batch[self.image_to_discriminator]
- original_mask = batch['mask']
- supervised_mask = batch['mask_for_losses']
-
- # L1
- l1_value = masked_l1_loss(predicted_img, img, supervised_mask,
- self.config.losses.l1.weight_known,
- self.config.losses.l1.weight_missing)
-
- total_loss = l1_value
- metrics = dict(gen_l1=l1_value)
-
- # vgg-based perceptual loss
- if self.config.losses.perceptual.weight > 0:
- pl_value = self.loss_pl(predicted_img, img, mask=supervised_mask).sum() * self.config.losses.perceptual.weight
- total_loss = total_loss + pl_value
- metrics['gen_pl'] = pl_value
-
- # discriminator
- # adversarial_loss calls backward by itself
- mask_for_discr = supervised_mask if self.distance_weighted_mask_for_discr else original_mask
- self.adversarial_loss.pre_generator_step(real_batch=img, fake_batch=predicted_img,
- generator=self.generator, discriminator=self.discriminator)
- discr_real_pred, discr_real_features = self.discriminator(img)
- discr_fake_pred, discr_fake_features = self.discriminator(predicted_img)
- adv_gen_loss, adv_metrics = self.adversarial_loss.generator_loss(real_batch=img,
- fake_batch=predicted_img,
- discr_real_pred=discr_real_pred,
- discr_fake_pred=discr_fake_pred,
- mask=mask_for_discr)
- total_loss = total_loss + adv_gen_loss
- metrics['gen_adv'] = adv_gen_loss
- metrics.update(add_prefix_to_keys(adv_metrics, 'adv_'))
-
- # feature matching
- if self.config.losses.feature_matching.weight > 0:
- need_mask_in_fm = OmegaConf.to_container(self.config.losses.feature_matching).get('pass_mask', False)
- mask_for_fm = supervised_mask if need_mask_in_fm else None
- fm_value = feature_matching_loss(discr_fake_features, discr_real_features,
- mask=mask_for_fm) * self.config.losses.feature_matching.weight
- total_loss = total_loss + fm_value
- metrics['gen_fm'] = fm_value
-
- if self.loss_resnet_pl is not None:
- resnet_pl_value = self.loss_resnet_pl(predicted_img, img)
- total_loss = total_loss + resnet_pl_value
- metrics['gen_resnet_pl'] = resnet_pl_value
-
- return total_loss, metrics
-
- def discriminator_loss(self, batch):
- total_loss = 0
- metrics = {}
-
- predicted_img = batch[self.image_to_discriminator].detach()
- self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=predicted_img,
- generator=self.generator, discriminator=self.discriminator)
- discr_real_pred, discr_real_features = self.discriminator(batch['image'])
- discr_fake_pred, discr_fake_features = self.discriminator(predicted_img)
- adv_discr_loss, adv_metrics = self.adversarial_loss.discriminator_loss(real_batch=batch['image'],
- fake_batch=predicted_img,
- discr_real_pred=discr_real_pred,
- discr_fake_pred=discr_fake_pred,
- mask=batch['mask'])
- total_loss = total_loss + adv_discr_loss
- metrics['discr_adv'] = adv_discr_loss
- metrics.update(add_prefix_to_keys(adv_metrics, 'adv_'))
-
-
- if batch.get('use_fake_fakes', False):
- fake_fakes = batch['fake_fakes']
- self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=fake_fakes,
- generator=self.generator, discriminator=self.discriminator)
- discr_fake_fakes_pred, _ = self.discriminator(fake_fakes)
- fake_fakes_adv_discr_loss, fake_fakes_adv_metrics = self.adversarial_loss.discriminator_loss(
- real_batch=batch['image'],
- fake_batch=fake_fakes,
- discr_real_pred=discr_real_pred,
- discr_fake_pred=discr_fake_fakes_pred,
- mask=batch['mask']
- )
- total_loss = total_loss + fake_fakes_adv_discr_loss
- metrics['discr_adv_fake_fakes'] = fake_fakes_adv_discr_loss
- metrics.update(add_prefix_to_keys(fake_fakes_adv_metrics, 'adv_'))
-
- return total_loss, metrics
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/mgpt_lm.py b/spaces/OpenMotionLab/MotionGPT/mGPT/archs/mgpt_lm.py
deleted file mode 100644
index c30307adbcdbc9c8137f8e021c991fac783dbf0f..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/archs/mgpt_lm.py
+++ /dev/null
@@ -1,592 +0,0 @@
-import os
-from typing import List, Union
-import numpy as np
-import math
-import time
-import heapq
-import torch
-from torch import Tensor, nn
-from torch.distributions.distribution import Distribution
-from transformers import AutoModelForSeq2SeqLM, T5ForConditionalGeneration, T5Tokenizer, AutoTokenizer, GPT2LMHeadModel, GPT2Tokenizer
-import random
-from typing import Optional
-from .tools.token_emb import NewTokenEmb
-
-
-class MLM(nn.Module):
-
- def __init__(
- self,
- model_path: str,
- model_type: str = "t5",
- stage: str = "lm_pretrain",
- new_token_type: str = "insert",
- motion_codebook_size: int = 512,
- framerate: float = 20.0,
- down_t: int = 4,
- predict_ratio: float = 0.2,
- inbetween_ratio: float = 0.25,
- max_length: int = 256,
- lora: bool = False,
- quota_ratio: float = 0.5,
- noise_density: float = 0.15,
- mean_noise_span_length: int = 3,
- **kwargs,
- ) -> None:
-
- super().__init__()
-
- # Parameters
- self.m_codebook_size = motion_codebook_size
- self.max_length = max_length
- self.framerate = framerate
- self.down_t = down_t
- self.predict_ratio = predict_ratio
- self.inbetween_ratio = inbetween_ratio
- self.noise_density = noise_density
- self.mean_noise_span_length = mean_noise_span_length
- self.quota_ratio = quota_ratio
- self.stage = stage
-
- # Instantiate language model
- self.tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=True)
- if model_type == "t5":
- self.language_model = T5ForConditionalGeneration.from_pretrained(
- model_path)
- self.lm_type = 'encdec'
- elif model_type == "gpt2":
- self.language_model = GPT2LMHeadModel.from_pretrained(model_path)
- self.lm_type = 'dec'
- else:
- raise ValueError("type must be either seq2seq or conditional")
-
- if self.lm_type == 'dec':
- self.tokenizer.pad_token = self.tokenizer.eos_token
-
- # Add motion tokens
- self.tokenizer.add_tokens(
- [f'' for i in range(self.m_codebook_size + 3)])
-
- if new_token_type == "insert":
- self.language_model.resize_token_embeddings(len(self.tokenizer))
- elif new_token_type == "mlp":
- shared = NewTokenEmb(self.language_model.shared,
- self.m_codebook_size + 3)
- # lm_head = NewTokenEmb(self.language_model.lm_head,
- # self.m_codebook_size + 3)
- self.language_model.resize_token_embeddings(len(self.tokenizer))
- self.language_model.shared = shared
- # self.language_model.lm_head = lm_head
-
- # Lora
- if lora:
- from peft import LoraConfig, TaskType, get_peft_model, get_peft_model_state_dict
- from peft.utils.other import fsdp_auto_wrap_policy
- peft_config = LoraConfig(
- bias="none",
- task_type="CAUSAL_LM",
- # inference_mode=False,
- r=8,
- lora_alpha=16,
- lora_dropout=0.05)
- self.language_model = get_peft_model(self.language_model,
- peft_config)
-
- def forward(self, texts: List[str], motion_tokens: Tensor,
- lengths: List[int], tasks: dict):
- if self.lm_type == 'encdec':
- return self.forward_encdec(texts, motion_tokens, lengths, tasks)
- elif self.lm_type == 'dec':
- return self.forward_dec(texts, motion_tokens, lengths, tasks)
- else:
- raise NotImplementedError("Only conditional_multitask supported")
-
- def forward_encdec(
- self,
- texts: List[str],
- motion_tokens: Tensor,
- lengths: List[int],
- tasks: dict,
- ):
-
- # Tensor to string
- motion_strings = self.motion_token_to_string(motion_tokens, lengths)
-
- # Supervised or unsupervised
- # condition = random.choice(
- # ['text', 'motion', 'supervised', 'supervised', 'supervised'])
- condition = random.choice(['supervised', 'supervised', 'supervised'])
-
- if condition == 'text':
- inputs = texts
- outputs = texts
- elif condition == 'motion':
- inputs = motion_strings
- outputs = motion_strings
- else:
- inputs, outputs = self.template_fulfill(tasks, lengths,
- motion_strings, texts)
-
- # Tokenize
- source_encoding = self.tokenizer(inputs,
- padding='max_length',
- max_length=self.max_length,
- truncation=True,
- return_attention_mask=True,
- add_special_tokens=True,
- return_tensors="pt")
-
- source_attention_mask = source_encoding.attention_mask.to(
- motion_tokens.device)
- source_input_ids = source_encoding.input_ids.to(motion_tokens.device)
-
- if condition in ['text', 'motion']:
- batch_size, expandend_input_length = source_input_ids.shape
- mask_indices = np.asarray([
- self.random_spans_noise_mask(expandend_input_length)
- for i in range(batch_size)
- ])
- target_mask = ~mask_indices
- input_ids_sentinel = self.create_sentinel_ids(
- mask_indices.astype(np.int8))
- target_sentinel = self.create_sentinel_ids(
- target_mask.astype(np.int8))
-
- labels_input_ids = self.filter_input_ids(source_input_ids,
- target_sentinel)
- source_input_ids = self.filter_input_ids(source_input_ids,
- input_ids_sentinel)
-
- else:
- target_inputs = self.tokenizer(outputs,
- padding='max_length',
- max_length=self.max_length,
- truncation=True,
- return_attention_mask=True,
- add_special_tokens=True,
- return_tensors="pt")
-
- labels_input_ids = target_inputs.input_ids.to(motion_tokens.device)
- lables_attention_mask = target_inputs.attention_mask.to(
- motion_tokens.device)
-
- labels_input_ids[labels_input_ids == 0] = -100
- outputs = self.language_model(
- input_ids=source_input_ids,
- attention_mask=source_attention_mask
- if condition == 'supervised' else None,
- labels=labels_input_ids,
- decoder_attention_mask=lables_attention_mask
- if condition == 'supervised' else None,
- )
-
- return outputs
-
- def forward_dec(
- self,
- texts: List[str],
- motion_tokens: Tensor,
- lengths: List[int],
- tasks: dict,
- ):
- self.tokenizer.padding_side = "right"
-
- # Tensor to string
- motion_strings = self.motion_token_to_string(motion_tokens, lengths)
-
- # Supervised or unsupervised
- condition = random.choice(
- ['text', 'motion', 'supervised', 'supervised', 'supervised'])
-
- if condition == 'text':
- labels = texts
- elif condition == 'motion':
- labels = motion_strings
- else:
- inputs, outputs = self.template_fulfill(tasks, lengths,
- motion_strings, texts)
- labels = []
- for i in range(len(inputs)):
- labels.append(inputs[i] + ' \n ' + outputs[i] +
- self.tokenizer.eos_token)
-
- # Tokenize
- inputs = self.tokenizer(labels,
- padding='max_length',
- max_length=self.max_length,
- truncation=True,
- return_attention_mask=True,
- return_tensors="pt")
-
- labels_input_ids = inputs.input_ids.to(motion_tokens.device)
- lables_attention_mask = inputs.attention_mask.to(motion_tokens.device)
-
- # print(labels_input_ids[0:5])
-
- outputs = self.language_model(input_ids=labels_input_ids,
- attention_mask=lables_attention_mask,
- labels=inputs["input_ids"])
-
- return outputs
-
- def generate_direct(self,
- texts: List[str],
- max_length: int = 256,
- num_beams: int = 1,
- do_sample: bool = True,
- bad_words_ids: List[int] = None):
-
- # Device
- self.device = self.language_model.device
-
- # Tokenize
- if self.lm_type == 'dec':
- texts = [text + " \n " for text in texts]
-
- source_encoding = self.tokenizer(texts,
- padding='max_length',
- max_length=self.max_length,
- truncation=True,
- return_attention_mask=True,
- add_special_tokens=True,
- return_tensors="pt")
-
- source_input_ids = source_encoding.input_ids.to(self.device)
- source_attention_mask = source_encoding.attention_mask.to(self.device)
-
- if self.lm_type == 'encdec':
- outputs = self.language_model.generate(
- source_input_ids,
- max_length=max_length,
- num_beams=num_beams,
- do_sample=do_sample,
- bad_words_ids=bad_words_ids,
- )
- elif self.lm_type == 'dec':
- outputs = self.language_model.generate(
- input_ids=source_input_ids,
- attention_mask=source_attention_mask,
- pad_token_id=self.tokenizer.pad_token_id,
- do_sample=do_sample,
- max_new_tokens=max_length)
- self.tokenizer.padding_side = 'left'
-
- outputs_string = self.tokenizer.batch_decode(outputs,
- skip_special_tokens=True)
-
- print(texts[:2])
- print(outputs_string[:2])
-
- outputs_tokens, cleaned_text = self.motion_string_to_token(
- outputs_string)
-
- return outputs_tokens, cleaned_text
-
- def generate_conditional(self,
- texts: Optional[List[str]] = None,
- motion_tokens: Optional[Tensor] = None,
- lengths: Optional[List[int]] = None,
- task: str = "t2m",
- with_len: bool = False,
- stage: str = 'train',
- tasks: dict = None):
-
- self.device = self.language_model.device
-
- if task in ["t2m", "m2m", "pred", "inbetween"]:
-
- if task == "t2m":
- assert texts is not None
- motion_strings = [''] * len(texts)
- if not with_len:
- if tasks is None:
- tasks = [{
- 'input':
- ['Generate motion: '],
- 'output': ['']
- }] * len(texts)
-
- lengths = [0] * len(texts)
- else:
- tasks = [{
- 'input': [
- 'Generate motion with frames: '
- ],
- 'output': ['']
- }] * len(texts)
-
- elif task == "pred":
- assert motion_tokens is not None and lengths is not None
- texts = [''] * len(lengths)
- tasks = [{
- 'input': ['Predict motion: '],
- 'output': ['']
- }] * len(lengths)
-
- motion_strings_old = self.motion_token_to_string(
- motion_tokens, lengths)
- motion_strings = []
- for i, length in enumerate(lengths):
- split = length // 5
- motion_strings.append(
- '>'.join(motion_strings_old[i].split('>')[:split]) +
- '>')
-
- elif task == "inbetween":
- assert motion_tokens is not None and lengths is not None
- texts = [''] * len(lengths)
- tasks = [{
- 'input': [
- "Complete the masked motion: "
- ],
- 'output': ['']
- }] * len(lengths)
- motion_strings = self.motion_token_to_string(
- motion_tokens, lengths)
-
- inputs, outputs = self.template_fulfill(tasks, lengths,
- motion_strings, texts,
- stage)
-
- outputs_tokens, cleaned_text = self.generate_direct(inputs,
- max_length=128,
- num_beams=1,
- do_sample=True)
-
- return outputs_tokens
-
- elif task == "m2t":
- assert motion_tokens is not None and lengths is not None
-
- motion_strings = self.motion_token_to_string(
- motion_tokens, lengths)
-
- if not with_len:
- tasks = [{
- 'input': ['Generate text: '],
- 'output': ['']
- }] * len(lengths)
- else:
- tasks = [{
- 'input': [
- 'Generate text with frames: '
- ],
- 'output': ['']
- }] * len(lengths)
-
- texts = [''] * len(lengths)
-
- inputs, outputs = self.template_fulfill(tasks, lengths,
- motion_strings, texts)
- outputs_tokens, cleaned_text = self.generate_direct(
- inputs,
- max_length=40,
- num_beams=1,
- do_sample=False,
- # bad_words_ids=self.bad_words_ids
- )
- return cleaned_text
-
- def motion_token_to_string(self, motion_token: Tensor, lengths: List[int]):
- motion_string = []
- for i in range(len(motion_token)):
- motion_i = motion_token[i].cpu(
- ) if motion_token[i].device.type == 'cuda' else motion_token[i]
- motion_list = motion_i.tolist()[:lengths[i]]
- motion_string.append(
- (f'' +
- ''.join([f'' for i in motion_list]) +
- f''))
- return motion_string
-
- def motion_token_list_to_string(self, motion_token: Tensor):
- motion_string = []
- for i in range(len(motion_token)):
- motion_i = motion_token[i].cpu(
- ) if motion_token[i].device.type == 'cuda' else motion_token[i]
- motion_list = motion_i.tolist()
- motion_string.append(
- (f'' +
- ''.join([f'' for i in motion_list]) +
- f''))
- return motion_string
-
- def motion_string_to_token(self, motion_string: List[str]):
- motion_tokens = []
- output_string = []
- for i in range(len(motion_string)):
- string = self.get_middle_str(
- motion_string[i], f'',
- f'')
- string_list = string.split('><')
- token_list = [
- int(i.split('_')[-1].replace('>', ''))
- for i in string_list[1:-1]
- ]
- if len(token_list) == 0:
- token_list = [0]
- token_list_padded = torch.tensor(token_list,
- dtype=int).to(self.device)
- motion_tokens.append(token_list_padded)
- output_string.append(motion_string[i].replace(
- string, ''))
-
- return motion_tokens, output_string
-
- def placeholder_fulfill(self, prompt: str, length: int, motion_string: str,
- text: str):
-
- seconds = math.floor(length / self.framerate)
- motion_splited = motion_string.split('>')
- token_length = length / self.down_t
- predict_head = int(token_length * self.predict_ratio + 1)
- masked_head = int(token_length * self.inbetween_ratio + 1)
- masked_tail = int(token_length * (1 - self.inbetween_ratio) + 1)
-
- motion_predict_head = '>'.join(
- motion_splited[:predict_head]
- ) + f'>'
- motion_predict_last = f'' + '>'.join(
- motion_splited[predict_head:])
-
- motion_masked = '>'.join(
- motion_splited[:masked_head]
- ) + '>' + f'' * (
- masked_tail - masked_head) + '>'.join(motion_splited[masked_tail:])
-
- if random.random() < self.quota_ratio:
- text = f'\"{text}\"'
-
- prompt = prompt.replace('', text).replace(
- '',
- motion_string).replace('', f'{length}').replace(
- '', '%.1f' % seconds).replace(
- '', motion_predict_head).replace(
- '',
- motion_predict_last).replace(
- '', motion_masked)
-
- return prompt
-
- def template_fulfill(self,
- tasks,
- lengths,
- motion_strings,
- texts,
- stage='test'):
- inputs = []
- outputs = []
- for i in range(len(lengths)):
- input_template = random.choice(tasks[i]['input'])
- output_template = random.choice(tasks[i]['output'])
- length = lengths[i]
- inputs.append(
- self.placeholder_fulfill(input_template, length,
- motion_strings[i], texts[i]))
- outputs.append(
- self.placeholder_fulfill(output_template, length,
- motion_strings[i], texts[i]))
-
- return inputs, outputs
-
- def get_middle_str(self, content, startStr, endStr):
- try:
- startIndex = content.index(startStr)
- if startIndex >= 0:
- startIndex += len(startStr)
- endIndex = content.index(endStr)
- except:
- return f''
-
- return f'' + content[
- startIndex:endIndex] + f''
-
- def random_spans_noise_mask(self, length):
- # From https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py
-
- orig_length = length
-
- num_noise_tokens = int(np.round(length * self.noise_density))
- # avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
- num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
- num_noise_spans = int(
- np.round(num_noise_tokens / self.mean_noise_span_length))
-
- # avoid degeneracy by ensuring positive number of noise spans
- num_noise_spans = max(num_noise_spans, 1)
- num_nonnoise_tokens = length - num_noise_tokens
-
- # pick the lengths of the noise spans and the non-noise spans
- def _random_segmentation(num_items, num_segments):
- """Partition a sequence of items randomly into non-empty segments.
- Args:
- num_items: an integer scalar > 0
- num_segments: an integer scalar in [1, num_items]
- Returns:
- a Tensor with shape [num_segments] containing positive integers that add
- up to num_items
- """
- mask_indices = np.arange(num_items - 1) < (num_segments - 1)
- np.random.shuffle(mask_indices)
- first_in_segment = np.pad(mask_indices, [[1, 0]])
- segment_id = np.cumsum(first_in_segment)
- # count length of sub segments assuming that list is sorted
- _, segment_length = np.unique(segment_id, return_counts=True)
- return segment_length
-
- noise_span_lengths = _random_segmentation(num_noise_tokens,
- num_noise_spans)
- nonnoise_span_lengths = _random_segmentation(num_nonnoise_tokens,
- num_noise_spans)
-
- interleaved_span_lengths = np.reshape(
- np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1),
- [num_noise_spans * 2],
- )
- span_starts = np.cumsum(interleaved_span_lengths)[:-1]
- span_start_indicator = np.zeros((length, ), dtype=np.int8)
- span_start_indicator[span_starts] = True
- span_num = np.cumsum(span_start_indicator)
- is_noise = np.equal(span_num % 2, 1)
-
- return is_noise[:orig_length]
-
- def create_sentinel_ids(self, mask_indices):
- # From https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_t5_mlm_flax.py
- start_indices = mask_indices - np.roll(mask_indices, 1,
- axis=-1) * mask_indices
- start_indices[:, 0] = mask_indices[:, 0]
-
- sentinel_ids = np.where(start_indices != 0,
- np.cumsum(start_indices, axis=-1),
- start_indices)
- sentinel_ids = np.where(sentinel_ids != 0,
- (len(self.tokenizer) - sentinel_ids), 0)
- sentinel_ids -= mask_indices - start_indices
-
- return sentinel_ids
-
- def filter_input_ids(self, input_ids, sentinel_ids):
- # From https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_t5_mlm_flax.py
- batch_size = input_ids.shape[0]
-
- input_ids_full = np.where(sentinel_ids != 0, sentinel_ids,
- input_ids.to('cpu'))
-
- # input_ids tokens and sentinel tokens are >= 0, tokens < 0 are
- # masked tokens coming after sentinel tokens and should be removed
- input_ids = input_ids_full[input_ids_full >= 0].reshape(
- (batch_size, -1))
- input_ids = np.concatenate(
- [
- input_ids,
- np.full((batch_size, 1),
- self.tokenizer.eos_token_id,
- dtype=np.int32),
- ],
- axis=-1,
- )
-
- input_ids = torch.tensor(input_ids, device=self.device)
-
- return input_ids
diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/utils.py b/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/utils.py
deleted file mode 100644
index 9a9d3b5b66370fa98da9e067ba53ead848ea9a59..0000000000000000000000000000000000000000
--- a/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/utils.py
+++ /dev/null
@@ -1,189 +0,0 @@
-"""Utils for monoDepth."""
-import sys
-import re
-import numpy as np
-import cv2
-import torch
-
-
-def read_pfm(path):
- """Read pfm file.
-
- Args:
- path (str): path to file
-
- Returns:
- tuple: (data, scale)
- """
- with open(path, "rb") as file:
-
- color = None
- width = None
- height = None
- scale = None
- endian = None
-
- header = file.readline().rstrip()
- if header.decode("ascii") == "PF":
- color = True
- elif header.decode("ascii") == "Pf":
- color = False
- else:
- raise Exception("Not a PFM file: " + path)
-
- dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
- if dim_match:
- width, height = list(map(int, dim_match.groups()))
- else:
- raise Exception("Malformed PFM header.")
-
- scale = float(file.readline().decode("ascii").rstrip())
- if scale < 0:
- # little-endian
- endian = "<"
- scale = -scale
- else:
- # big-endian
- endian = ">"
-
- data = np.fromfile(file, endian + "f")
- shape = (height, width, 3) if color else (height, width)
-
- data = np.reshape(data, shape)
- data = np.flipud(data)
-
- return data, scale
-
-
-def write_pfm(path, image, scale=1):
- """Write pfm file.
-
- Args:
- path (str): pathto file
- image (array): data
- scale (int, optional): Scale. Defaults to 1.
- """
-
- with open(path, "wb") as file:
- color = None
-
- if image.dtype.name != "float32":
- raise Exception("Image dtype must be float32.")
-
- image = np.flipud(image)
-
- if len(image.shape) == 3 and image.shape[2] == 3: # color image
- color = True
- elif (
- len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
- ): # greyscale
- color = False
- else:
- raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
-
- file.write("PF\n" if color else "Pf\n".encode())
- file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
-
- endian = image.dtype.byteorder
-
- if endian == "<" or endian == "=" and sys.byteorder == "little":
- scale = -scale
-
- file.write("%f\n".encode() % scale)
-
- image.tofile(file)
-
-
-def read_image(path):
- """Read image and output RGB image (0-1).
-
- Args:
- path (str): path to file
-
- Returns:
- array: RGB image (0-1)
- """
- img = cv2.imread(path)
-
- if img.ndim == 2:
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
-
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
-
- return img
-
-
-def resize_image(img):
- """Resize image and make it fit for network.
-
- Args:
- img (array): image
-
- Returns:
- tensor: data ready for network
- """
- height_orig = img.shape[0]
- width_orig = img.shape[1]
-
- if width_orig > height_orig:
- scale = width_orig / 384
- else:
- scale = height_orig / 384
-
- height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
- width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
-
- img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
-
- img_resized = (
- torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
- )
- img_resized = img_resized.unsqueeze(0)
-
- return img_resized
-
-
-def resize_depth(depth, width, height):
- """Resize depth map and bring to CPU (numpy).
-
- Args:
- depth (tensor): depth
- width (int): image width
- height (int): image height
-
- Returns:
- array: processed depth
- """
- depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
-
- depth_resized = cv2.resize(
- depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
- )
-
- return depth_resized
-
-def write_depth(path, depth, bits=1):
- """Write depth map to pfm and png file.
-
- Args:
- path (str): filepath without extension
- depth (array): depth
- """
- write_pfm(path + ".pfm", depth.astype(np.float32))
-
- depth_min = depth.min()
- depth_max = depth.max()
-
- max_val = (2**(8*bits))-1
-
- if depth_max - depth_min > np.finfo("float").eps:
- out = max_val * (depth - depth_min) / (depth_max - depth_min)
- else:
- out = np.zeros(depth.shape, dtype=depth.type)
-
- if bits == 1:
- cv2.imwrite(path + ".png", out.astype("uint8"))
- elif bits == 2:
- cv2.imwrite(path + ".png", out.astype("uint16"))
-
- return
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/res_layer.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/res_layer.py
deleted file mode 100644
index b2c07b47007e92e4c3945b989e79f9d50306f5fe..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/res_layer.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from annotator.uniformer.mmcv.cnn import build_conv_layer, build_norm_layer
-from torch import nn as nn
-
-
-class ResLayer(nn.Sequential):
- """ResLayer to build ResNet style backbone.
-
- Args:
- block (nn.Module): block used to build ResLayer.
- inplanes (int): inplanes of block.
- planes (int): planes of block.
- num_blocks (int): number of blocks.
- stride (int): stride of the first block. Default: 1
- avg_down (bool): Use AvgPool instead of stride conv when
- downsampling in the bottleneck. Default: False
- conv_cfg (dict): dictionary to construct and config conv layer.
- Default: None
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: dict(type='BN')
- multi_grid (int | None): Multi grid dilation rates of last
- stage. Default: None
- contract_dilation (bool): Whether contract first dilation of each layer
- Default: False
- """
-
- def __init__(self,
- block,
- inplanes,
- planes,
- num_blocks,
- stride=1,
- dilation=1,
- avg_down=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- multi_grid=None,
- contract_dilation=False,
- **kwargs):
- self.block = block
-
- downsample = None
- if stride != 1 or inplanes != planes * block.expansion:
- downsample = []
- conv_stride = stride
- if avg_down:
- conv_stride = 1
- downsample.append(
- nn.AvgPool2d(
- kernel_size=stride,
- stride=stride,
- ceil_mode=True,
- count_include_pad=False))
- downsample.extend([
- build_conv_layer(
- conv_cfg,
- inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=conv_stride,
- bias=False),
- build_norm_layer(norm_cfg, planes * block.expansion)[1]
- ])
- downsample = nn.Sequential(*downsample)
-
- layers = []
- if multi_grid is None:
- if dilation > 1 and contract_dilation:
- first_dilation = dilation // 2
- else:
- first_dilation = dilation
- else:
- first_dilation = multi_grid[0]
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=stride,
- dilation=first_dilation,
- downsample=downsample,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- **kwargs))
- inplanes = planes * block.expansion
- for i in range(1, num_blocks):
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=1,
- dilation=dilation if multi_grid is None else multi_grid[i],
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- **kwargs))
- super(ResLayer, self).__init__(*layers)
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/elisp/spec.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/elisp/spec.go
deleted file mode 100644
index f7feb45b8415e001a46bd7632bbdcf1d594cf163..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/elisp/spec.go and /dev/null differ
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/detector/__init__.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/detector/__init__.py
deleted file mode 100644
index 4ccccd580d9470841c4529d797aa9b635271718e..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/detector/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from .generalized_rcnn import GeneralizedRCNN
-from .generalized_vl_rcnn import GeneralizedVLRCNN
-
-_DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN,
- "GeneralizedVLRCNN": GeneralizedVLRCNN
- }
-
-
-def build_detection_model(cfg):
- meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE]
- return meta_arch(cfg)
diff --git a/spaces/Priyanka-Kumavat/Object-Detection/app.py b/spaces/Priyanka-Kumavat/Object-Detection/app.py
deleted file mode 100644
index 35f2b5119c73a21bd2919f6c8a52ae6bb1055294..0000000000000000000000000000000000000000
--- a/spaces/Priyanka-Kumavat/Object-Detection/app.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import streamlit as st
-import torch
-import torchvision
-import torchvision.transforms as transforms
-from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
-from torchvision.transforms import ToTensor
-from PIL import Image, ImageDraw
-import cv2
-import numpy as np
-import pandas as pd
-import os
-
-import tempfile
-from tempfile import NamedTemporaryFile
-
-# Create an FRCNN model instance with the same structure as the saved model
-model = torchvision.models.detection.fasterrcnn_resnet50_fpn(num_classes=91)
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-# Load the saved parameters into the model
-model.load_state_dict(torch.load("frcnn_model.pth"))
-
-# Define the classes for object detection
-classes = [
- '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
- 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
- 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
- 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A',
- 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
- 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
- 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork',
- 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
- 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
- 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet', 'N/A',
- 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
- 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase',
- 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
- ]
-
-# Set the threshold for object detection. It is IoU (Intersection over Union)
-threshold = 0.5
-
-st.title(""" Image Object Detections """)
-
-# st.subheader("Prediction of Object Detection")
-
-st.write(""" The Faster R-CNN (Region-based Convolutional Neural Network) is a cutting-edge object detection model that combines deep
- learning with region proposal networks to achieve highly accurate object detection in images.
- It is trained on a large dataset of images and can detect a wide range of objects with high Precision and Recall.
- The model is based on the ResNet-50 architecture, which allows it to capture complex visual features from the input image.
- It uses a two-stage approach, first proposing regions of interest (RoIs) in the image and then classifying and refining the
- object boundaries within these RoIs. This approach makes it extremely efficient and accurate in detecting multiple objects
- in a single image.
- """)
-
-
-# images = ["test2.jpg","img7.jpg","img20.jpg","img23.jpg"]
-# with st.sidebar:
-# st.write("Choose an image")
-# selected_image = st.selectbox("Select an image", images)
-
-
-images = ["test2.jpg","img7.jpg","img20.jpg","img23.jpg"]
-with st.sidebar:
- st.write("Choose an image")
- st.image(images)
-
-
-# define the function to perform object detection on an image
-def detect_objects(image_path):
- # load the image
- image = Image.open(image_path).convert('RGB')
-
- # convert the image to a tensor
- image_tensor = ToTensor()(image).to(device)
-
- # run the image through the model to get the predictions
- model.eval()
- with torch.no_grad():
- predictions = model([image_tensor])
-
- # filter out the predictions below the threshold
- scores = predictions[0]['scores'].cpu().numpy()
- boxes = predictions[0]['boxes'].cpu().numpy()
- labels = predictions[0]['labels'].cpu().numpy()
- mask = scores > threshold
- scores = scores[mask]
- boxes = boxes[mask]
- labels = labels[mask]
-
- # create a new image with the predicted objects outlined in rectangles
- draw = ImageDraw.Draw(image)
- for box, label in zip(boxes, labels):
-
- # draw the rectangle around the object
- draw.rectangle([(box[0], box[1]), (box[2], box[3])], outline='red')
-
- # write the object class above the rectangle
- class_name = classes[label]
- draw.text((box[0], box[1]), class_name, fill='yellow')
-
- # show the image
- st.write("Obects detected in the image are: ")
- st.image(image, use_column_width=True)
- # st.image.show()
-
-
-file = st.file_uploader('Upload an Image', type=(["jpeg", "jpg", "png"]))
-
-if file is None:
- st.write("Please upload an image file")
-else:
- image = Image.open(file)
- st.write("Input Image")
- st.image(image, use_column_width=True)
- with NamedTemporaryFile(dir='.', suffix='.jpeg') as f: # this line gives error and only accepts .jpeg and so used above snippet
- f.write(file.getbuffer()) # which will accepts all formats of images.
- # your_function_which_takes_a_path(f.name)
- detect_objects(f.name)
-
-# if file is None:
-# st.write("Please upload an image file")
-# else:
-# image = Image.open(file)
-# st.write("Input Image")
-# st.image(image, use_column_width=True)
-# with NamedTemporaryFile(dir='.', suffix='.' + file.name.split('.')[-1]) as f:
-# f.write(file.getbuffer())
-# # your_function_which_takes_a_path(f.name)
-# detect_objects(f.name)
-
-
-st.write(""" This Streamlit app provides a user-friendly interface for uploading an image and visualizing the output of the Faster R-CNN
- model. It displays the uploaded image along with the predicted objects highlighted with bounding box overlays. The app allows
- users to explore the detected objects in the image, providing valuable insights and understanding of the model's predictions.
- It can be used for a wide range of applications, such as object recognition, image analysis, and visual storytelling.
- Whether it's identifying objects in real-world images or understanding the capabilities of state-of-the-art object detection
- models, this Streamlit app powered by Faster R-CNN is a powerful tool for computer vision tasks.
- """)
diff --git a/spaces/Rakot2223/faster-whisper-webui/README.md b/spaces/Rakot2223/faster-whisper-webui/README.md
deleted file mode 100644
index ea2c81a62fc4216c3d5ac1c110e8abdedad0cafc..0000000000000000000000000000000000000000
--- a/spaces/Rakot2223/faster-whisper-webui/README.md
+++ /dev/null
@@ -1,179 +0,0 @@
----
-title: Faster Whisper Webui
-emoji: 🚀
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: aadnk/faster-whisper-webui
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-# Running Locally
-
-To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies:
-```
-pip install -r requirements.txt
-```
-
-You can find detailed instructions for how to install this on Windows 10/11 [here (PDF)](docs/windows/install_win10_win11.pdf).
-
-Finally, run the full version (no audio length restrictions) of the app with parallel CPU/GPU enabled:
-```
-python app.py --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True
-```
-
-You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments:
-```
-python cli.py \
-[--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \
-[--vad_merge_window VAD_MERGE_WINDOW] \
-[--vad_max_merge_size VAD_MAX_MERGE_SIZE] \
-[--vad_padding VAD_PADDING] \
-[--vad_prompt_window VAD_PROMPT_WINDOW]
-[--vad_cpu_cores NUMBER_OF_CORES]
-[--vad_parallel_devices COMMA_DELIMITED_DEVICES]
-[--auto_parallel BOOLEAN]
-```
-In addition, you may also use URL's in addition to file paths as input.
-```
-python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM"
-```
-
-Rather than supplying arguments to `app.py` or `cli.py`, you can also use the configuration file [config.json5](config.json5). See that file for more information.
-If you want to use a different configuration file, you can use the `WHISPER_WEBUI_CONFIG` environment variable to specify the path to another file.
-
-### Multiple Files
-
-You can upload multiple files either through the "Upload files" option, or as a playlist on YouTube.
-Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section.
-When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files.
-
-## Whisper Implementation
-
-You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the
-default Whisper which achieves up to a 4x speedup and 2x reduction in memory usage.
-
-You can install the requirements for a specific Whisper implementation in `requirements-fastWhisper.txt`
-or `requirements-whisper.txt`:
-```
-pip install -r requirements-fastWhisper.txt
-```
-And then run the App or the CLI with the `--whisper_implementation fast-whisper` flag:
-```
-python app.py --whisper_implementation fast-whisper --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True
-```
-You can also select the whisper implementation in `config.json5`:
-```json5
-{
- "whisper_implementation": "fast-whisper"
-}
-```
-### GPU Acceleration
-
-In order to use GPU acceleration with Faster Whisper, both CUDA 11.2 and cuDNN 8 must be installed. You may want to install it in a virtual environment like Anaconda.
-
-## Google Colab
-
-You can also run this Web UI directly on [Google Colab](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing), if you haven't got a GPU powerful enough to run the larger models.
-
-See the [colab documentation](docs/colab.md) for more information.
-
-## Parallel Execution
-
-You can also run both the Web-UI or the CLI on multiple GPUs in parallel, using the `vad_parallel_devices` option. This takes a comma-delimited list of
-device IDs (0, 1, etc.) that Whisper should be distributed to and run on concurrently:
-```
-python cli.py --model large --vad silero-vad --language Japanese \
---vad_parallel_devices 0,1 "https://www.youtube.com/watch?v=4cICErqqRSM"
-```
-
-Note that this requires a VAD to function properly, otherwise only the first GPU will be used. Though you could use `period-vad` to avoid taking the hit
-of running Silero-Vad, at a slight cost to accuracy.
-
-This is achieved by creating N child processes (where N is the number of selected devices), where Whisper is run concurrently. In `app.py`, you can also
-set the `vad_process_timeout` option. This configures the number of seconds until a process is killed due to inactivity, freeing RAM and video memory.
-The default value is 30 minutes.
-
-```
-python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600
-```
-
-To execute the Silero VAD itself in parallel, use the `vad_cpu_cores` option:
-```
-python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 --vad_cpu_cores 4
-```
-
-You may also use `vad_process_timeout` with a single device (`--vad_parallel_devices 0`), if you prefer to always free video memory after a period of time.
-
-### Auto Parallel
-
-You can also set `auto_parallel` to `True`. This will set `vad_parallel_devices` to use all the GPU devices on the system, and `vad_cpu_cores` to be equal to the number of
-cores (up to 8):
-```
-python app.py --input_audio_max_duration -1 --auto_parallel True
-```
-
-# Docker
-
-To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU.
-Then either use the GitLab hosted container below, or check out this repository and build an image:
-```
-sudo docker build -t whisper-webui:1 .
-```
-
-You can then start the WebUI with GPU support like so:
-```
-sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1
-```
-
-Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only:
-```
-sudo docker run -d -p 7860:7860 whisper-webui:1
-```
-
-# GitLab Docker Registry
-
-This Docker container is also hosted on GitLab:
-
-```
-sudo docker run -d --gpus=all -p 7860:7860 registry.gitlab.com/aadnk/whisper-webui:latest
-```
-
-## Custom Arguments
-
-You can also pass custom arguments to `app.py` in the Docker container, for instance to be able to use all the GPUs in parallel (replace administrator with your user):
-```
-sudo docker run -d --gpus all -p 7860:7860 \
---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \
---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \
---restart=on-failure:15 registry.gitlab.com/aadnk/whisper-webui:latest \
-app.py --input_audio_max_duration -1 --server_name 0.0.0.0 --auto_parallel True \
---default_vad silero-vad --default_model_name large
-```
-
-You can also call `cli.py` the same way:
-```
-sudo docker run --gpus all \
---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \
---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \
---mount type=bind,source=${PWD},target=/app/data \
-registry.gitlab.com/aadnk/whisper-webui:latest \
-cli.py --model large --auto_parallel True --vad silero-vad \
---output_dir /app/data /app/data/YOUR-FILE-HERE.mp4
-```
-
-## Caching
-
-Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand.
-To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally)
-prepopulate the directory with the different Whisper models.
-```
-sudo docker run -d --gpus=all -p 7860:7860 \
---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \
-registry.gitlab.com/aadnk/whisper-webui:latest
-```
\ No newline at end of file
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/upload.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/upload.py
deleted file mode 100644
index 6af5394339437497134fa74a97463e3e5309ff29..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/upload.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""
-distutils.command.upload
-
-Implements the Distutils 'upload' subcommand (upload package to a package
-index).
-"""
-
-import os
-import io
-import hashlib
-from base64 import standard_b64encode
-from urllib.request import urlopen, Request, HTTPError
-from urllib.parse import urlparse
-from distutils.errors import DistutilsError, DistutilsOptionError
-from distutils.core import PyPIRCCommand
-from distutils.spawn import spawn
-from distutils import log
-
-
-# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256)
-# https://bugs.python.org/issue40698
-_FILE_CONTENT_DIGESTS = {
- "md5_digest": getattr(hashlib, "md5", None),
- "sha256_digest": getattr(hashlib, "sha256", None),
- "blake2_256_digest": getattr(hashlib, "blake2b", None),
-}
-
-
-class upload(PyPIRCCommand):
-
- description = "upload binary package to PyPI"
-
- user_options = PyPIRCCommand.user_options + [
- ('sign', 's', 'sign files to upload using gpg'),
- ('identity=', 'i', 'GPG identity used to sign files'),
- ]
-
- boolean_options = PyPIRCCommand.boolean_options + ['sign']
-
- def initialize_options(self):
- PyPIRCCommand.initialize_options(self)
- self.username = ''
- self.password = ''
- self.show_response = 0
- self.sign = False
- self.identity = None
-
- def finalize_options(self):
- PyPIRCCommand.finalize_options(self)
- if self.identity and not self.sign:
- raise DistutilsOptionError("Must use --sign for --identity to have meaning")
- config = self._read_pypirc()
- if config != {}:
- self.username = config['username']
- self.password = config['password']
- self.repository = config['repository']
- self.realm = config['realm']
-
- # getting the password from the distribution
- # if previously set by the register command
- if not self.password and self.distribution.password:
- self.password = self.distribution.password
-
- def run(self):
- if not self.distribution.dist_files:
- msg = (
- "Must create and upload files in one command "
- "(e.g. setup.py sdist upload)"
- )
- raise DistutilsOptionError(msg)
- for command, pyversion, filename in self.distribution.dist_files:
- self.upload_file(command, pyversion, filename)
-
- def upload_file(self, command, pyversion, filename): # noqa: C901
- # Makes sure the repository URL is compliant
- schema, netloc, url, params, query, fragments = urlparse(self.repository)
- if params or query or fragments:
- raise AssertionError("Incompatible url %s" % self.repository)
-
- if schema not in ('http', 'https'):
- raise AssertionError("unsupported schema " + schema)
-
- # Sign if requested
- if self.sign:
- gpg_args = ["gpg", "--detach-sign", "-a", filename]
- if self.identity:
- gpg_args[2:2] = ["--local-user", self.identity]
- spawn(gpg_args, dry_run=self.dry_run)
-
- # Fill in the data - send all the meta-data in case we need to
- # register a new release
- f = open(filename, 'rb')
- try:
- content = f.read()
- finally:
- f.close()
-
- meta = self.distribution.metadata
- data = {
- # action
- ':action': 'file_upload',
- 'protocol_version': '1',
- # identify release
- 'name': meta.get_name(),
- 'version': meta.get_version(),
- # file content
- 'content': (os.path.basename(filename), content),
- 'filetype': command,
- 'pyversion': pyversion,
- # additional meta-data
- 'metadata_version': '1.0',
- 'summary': meta.get_description(),
- 'home_page': meta.get_url(),
- 'author': meta.get_contact(),
- 'author_email': meta.get_contact_email(),
- 'license': meta.get_licence(),
- 'description': meta.get_long_description(),
- 'keywords': meta.get_keywords(),
- 'platform': meta.get_platforms(),
- 'classifiers': meta.get_classifiers(),
- 'download_url': meta.get_download_url(),
- # PEP 314
- 'provides': meta.get_provides(),
- 'requires': meta.get_requires(),
- 'obsoletes': meta.get_obsoletes(),
- }
-
- data['comment'] = ''
-
- # file content digests
- for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items():
- if digest_cons is None:
- continue
- try:
- data[digest_name] = digest_cons(content).hexdigest()
- except ValueError:
- # hash digest not available or blocked by security policy
- pass
-
- if self.sign:
- with open(filename + ".asc", "rb") as f:
- data['gpg_signature'] = (os.path.basename(filename) + ".asc", f.read())
-
- # set up the authentication
- user_pass = (self.username + ":" + self.password).encode('ascii')
- # The exact encoding of the authentication string is debated.
- # Anyway PyPI only accepts ascii for both username or password.
- auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
-
- # Build up the MIME payload for the POST data
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = b'\r\n--' + boundary.encode('ascii')
- end_boundary = sep_boundary + b'--\r\n'
- body = io.BytesIO()
- for key, value in data.items():
- title = '\r\nContent-Disposition: form-data; name="%s"' % key
- # handle multiple entries for the same name
- if not isinstance(value, list):
- value = [value]
- for value in value:
- if type(value) is tuple:
- title += '; filename="%s"' % value[0]
- value = value[1]
- else:
- value = str(value).encode('utf-8')
- body.write(sep_boundary)
- body.write(title.encode('utf-8'))
- body.write(b"\r\n\r\n")
- body.write(value)
- body.write(end_boundary)
- body = body.getvalue()
-
- msg = "Submitting {} to {}".format(filename, self.repository)
- self.announce(msg, log.INFO)
-
- # build the Request
- headers = {
- 'Content-type': 'multipart/form-data; boundary=%s' % boundary,
- 'Content-length': str(len(body)),
- 'Authorization': auth,
- }
-
- request = Request(self.repository, data=body, headers=headers)
- # send the data
- try:
- result = urlopen(request)
- status = result.getcode()
- reason = result.msg
- except HTTPError as e:
- status = e.code
- reason = e.msg
- except OSError as e:
- self.announce(str(e), log.ERROR)
- raise
-
- if status == 200:
- self.announce('Server response ({}): {}'.format(status, reason), log.INFO)
- if self.show_response:
- text = self._read_pypi_response(result)
- msg = '\n'.join(('-' * 75, text, '-' * 75))
- self.announce(msg, log.INFO)
- else:
- msg = 'Upload failed ({}): {}'.format(status, reason)
- self.announce(msg, log.ERROR)
- raise DistutilsError(msg)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/util.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/util.py
deleted file mode 100644
index 4763202b67cf3b7dc849fcca401be5df6adbf083..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/util.py
+++ /dev/null
@@ -1,513 +0,0 @@
-"""distutils.util
-
-Miscellaneous utility functions -- anything that doesn't fit into
-one of the other *util.py modules.
-"""
-
-import importlib.util
-import os
-import re
-import string
-import subprocess
-import sys
-import sysconfig
-import functools
-
-from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
-from distutils.dep_util import newer
-from distutils.spawn import spawn
-from distutils import log
-
-
-def get_host_platform():
- """
- Return a string that identifies the current platform. Use this
- function to distinguish platform-specific build directories and
- platform-specific built distributions.
- """
-
- # This function initially exposed platforms as defined in Python 3.9
- # even with older Python versions when distutils was split out.
- # Now it delegates to stdlib sysconfig, but maintains compatibility.
-
- if sys.version_info < (3, 8):
- if os.name == 'nt':
- if '(arm)' in sys.version.lower():
- return 'win-arm32'
- if '(arm64)' in sys.version.lower():
- return 'win-arm64'
-
- if sys.version_info < (3, 9):
- if os.name == "posix" and hasattr(os, 'uname'):
- osname, host, release, version, machine = os.uname()
- if osname[:3] == "aix":
- from .py38compat import aix_platform
-
- return aix_platform(osname, version, release)
-
- return sysconfig.get_platform()
-
-
-def get_platform():
- if os.name == 'nt':
- TARGET_TO_PLAT = {
- 'x86': 'win32',
- 'x64': 'win-amd64',
- 'arm': 'win-arm32',
- 'arm64': 'win-arm64',
- }
- target = os.environ.get('VSCMD_ARG_TGT_ARCH')
- return TARGET_TO_PLAT.get(target) or get_host_platform()
- return get_host_platform()
-
-
-if sys.platform == 'darwin':
- _syscfg_macosx_ver = None # cache the version pulled from sysconfig
-MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET'
-
-
-def _clear_cached_macosx_ver():
- """For testing only. Do not call."""
- global _syscfg_macosx_ver
- _syscfg_macosx_ver = None
-
-
-def get_macosx_target_ver_from_syscfg():
- """Get the version of macOS latched in the Python interpreter configuration.
- Returns the version as a string or None if can't obtain one. Cached."""
- global _syscfg_macosx_ver
- if _syscfg_macosx_ver is None:
- from distutils import sysconfig
-
- ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or ''
- if ver:
- _syscfg_macosx_ver = ver
- return _syscfg_macosx_ver
-
-
-def get_macosx_target_ver():
- """Return the version of macOS for which we are building.
-
- The target version defaults to the version in sysconfig latched at time
- the Python interpreter was built, unless overridden by an environment
- variable. If neither source has a value, then None is returned"""
-
- syscfg_ver = get_macosx_target_ver_from_syscfg()
- env_ver = os.environ.get(MACOSX_VERSION_VAR)
-
- if env_ver:
- # Validate overridden version against sysconfig version, if have both.
- # Ensure that the deployment target of the build process is not less
- # than 10.3 if the interpreter was built for 10.3 or later. This
- # ensures extension modules are built with correct compatibility
- # values, specifically LDSHARED which can use
- # '-undefined dynamic_lookup' which only works on >= 10.3.
- if (
- syscfg_ver
- and split_version(syscfg_ver) >= [10, 3]
- and split_version(env_ver) < [10, 3]
- ):
- my_msg = (
- '$' + MACOSX_VERSION_VAR + ' mismatch: '
- 'now "%s" but "%s" during configure; '
- 'must use 10.3 or later' % (env_ver, syscfg_ver)
- )
- raise DistutilsPlatformError(my_msg)
- return env_ver
- return syscfg_ver
-
-
-def split_version(s):
- """Convert a dot-separated string into a list of numbers for comparisons"""
- return [int(n) for n in s.split('.')]
-
-
-def convert_path(pathname):
- """Return 'pathname' as a name that will work on the native filesystem,
- i.e. split it on '/' and put it back together again using the current
- directory separator. Needed because filenames in the setup script are
- always supplied in Unix style, and have to be converted to the local
- convention before we can actually use them in the filesystem. Raises
- ValueError on non-Unix-ish systems if 'pathname' either starts or
- ends with a slash.
- """
- if os.sep == '/':
- return pathname
- if not pathname:
- return pathname
- if pathname[0] == '/':
- raise ValueError("path '%s' cannot be absolute" % pathname)
- if pathname[-1] == '/':
- raise ValueError("path '%s' cannot end with '/'" % pathname)
-
- paths = pathname.split('/')
- while '.' in paths:
- paths.remove('.')
- if not paths:
- return os.curdir
- return os.path.join(*paths)
-
-
-# convert_path ()
-
-
-def change_root(new_root, pathname):
- """Return 'pathname' with 'new_root' prepended. If 'pathname' is
- relative, this is equivalent to "os.path.join(new_root,pathname)".
- Otherwise, it requires making 'pathname' relative and then joining the
- two, which is tricky on DOS/Windows and Mac OS.
- """
- if os.name == 'posix':
- if not os.path.isabs(pathname):
- return os.path.join(new_root, pathname)
- else:
- return os.path.join(new_root, pathname[1:])
-
- elif os.name == 'nt':
- (drive, path) = os.path.splitdrive(pathname)
- if path[0] == '\\':
- path = path[1:]
- return os.path.join(new_root, path)
-
- raise DistutilsPlatformError(f"nothing known about platform '{os.name}'")
-
-
-@functools.lru_cache()
-def check_environ():
- """Ensure that 'os.environ' has all the environment variables we
- guarantee that users can use in config files, command-line options,
- etc. Currently this includes:
- HOME - user's home directory (Unix only)
- PLAT - description of the current platform, including hardware
- and OS (see 'get_platform()')
- """
- if os.name == 'posix' and 'HOME' not in os.environ:
- try:
- import pwd
-
- os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
- except (ImportError, KeyError):
- # bpo-10496: if the current user identifier doesn't exist in the
- # password database, do nothing
- pass
-
- if 'PLAT' not in os.environ:
- os.environ['PLAT'] = get_platform()
-
-
-def subst_vars(s, local_vars):
- """
- Perform variable substitution on 'string'.
- Variables are indicated by format-style braces ("{var}").
- Variable is substituted by the value found in the 'local_vars'
- dictionary or in 'os.environ' if it's not in 'local_vars'.
- 'os.environ' is first checked/augmented to guarantee that it contains
- certain values: see 'check_environ()'. Raise ValueError for any
- variables not found in either 'local_vars' or 'os.environ'.
- """
- check_environ()
- lookup = dict(os.environ)
- lookup.update((name, str(value)) for name, value in local_vars.items())
- try:
- return _subst_compat(s).format_map(lookup)
- except KeyError as var:
- raise ValueError(f"invalid variable {var}")
-
-
-def _subst_compat(s):
- """
- Replace shell/Perl-style variable substitution with
- format-style. For compatibility.
- """
-
- def _subst(match):
- return f'{{{match.group(1)}}}'
-
- repl = re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
- if repl != s:
- import warnings
-
- warnings.warn(
- "shell/Perl-style substitions are deprecated",
- DeprecationWarning,
- )
- return repl
-
-
-def grok_environment_error(exc, prefix="error: "):
- # Function kept for backward compatibility.
- # Used to try clever things with EnvironmentErrors,
- # but nowadays str(exception) produces good messages.
- return prefix + str(exc)
-
-
-# Needed by 'split_quoted()'
-_wordchars_re = _squote_re = _dquote_re = None
-
-
-def _init_regex():
- global _wordchars_re, _squote_re, _dquote_re
- _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
- _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
- _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-
-
-def split_quoted(s):
- """Split a string up according to Unix shell-like rules for quotes and
- backslashes. In short: words are delimited by spaces, as long as those
- spaces are not escaped by a backslash, or inside a quoted string.
- Single and double quotes are equivalent, and the quote characters can
- be backslash-escaped. The backslash is stripped from any two-character
- escape sequence, leaving only the escaped character. The quote
- characters are stripped from any quoted string. Returns a list of
- words.
- """
-
- # This is a nice algorithm for splitting up a single string, since it
- # doesn't require character-by-character examination. It was a little
- # bit of a brain-bender to get it working right, though...
- if _wordchars_re is None:
- _init_regex()
-
- s = s.strip()
- words = []
- pos = 0
-
- while s:
- m = _wordchars_re.match(s, pos)
- end = m.end()
- if end == len(s):
- words.append(s[:end])
- break
-
- if s[end] in string.whitespace:
- # unescaped, unquoted whitespace: now
- # we definitely have a word delimiter
- words.append(s[:end])
- s = s[end:].lstrip()
- pos = 0
-
- elif s[end] == '\\':
- # preserve whatever is being escaped;
- # will become part of the current word
- s = s[:end] + s[end + 1 :]
- pos = end + 1
-
- else:
- if s[end] == "'": # slurp singly-quoted string
- m = _squote_re.match(s, end)
- elif s[end] == '"': # slurp doubly-quoted string
- m = _dquote_re.match(s, end)
- else:
- raise RuntimeError("this can't happen (bad char '%c')" % s[end])
-
- if m is None:
- raise ValueError("bad string (mismatched %s quotes?)" % s[end])
-
- (beg, end) = m.span()
- s = s[:beg] + s[beg + 1 : end - 1] + s[end:]
- pos = m.end() - 2
-
- if pos >= len(s):
- words.append(s)
- break
-
- return words
-
-
-# split_quoted ()
-
-
-def execute(func, args, msg=None, verbose=0, dry_run=0):
- """Perform some action that affects the outside world (eg. by
- writing to the filesystem). Such actions are special because they
- are disabled by the 'dry_run' flag. This method takes care of all
- that bureaucracy for you; all you have to do is supply the
- function to call and an argument tuple for it (to embody the
- "external action" being performed), and an optional message to
- print.
- """
- if msg is None:
- msg = "{}{!r}".format(func.__name__, args)
- if msg[-2:] == ',)': # correct for singleton tuple
- msg = msg[0:-2] + ')'
-
- log.info(msg)
- if not dry_run:
- func(*args)
-
-
-def strtobool(val):
- """Convert a string representation of truth to true (1) or false (0).
-
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
- 'val' is anything else.
- """
- val = val.lower()
- if val in ('y', 'yes', 't', 'true', 'on', '1'):
- return 1
- elif val in ('n', 'no', 'f', 'false', 'off', '0'):
- return 0
- else:
- raise ValueError("invalid truth value {!r}".format(val))
-
-
-def byte_compile( # noqa: C901
- py_files,
- optimize=0,
- force=0,
- prefix=None,
- base_dir=None,
- verbose=1,
- dry_run=0,
- direct=None,
-):
- """Byte-compile a collection of Python source files to .pyc
- files in a __pycache__ subdirectory. 'py_files' is a list
- of files to compile; any files that don't end in ".py" are silently
- skipped. 'optimize' must be one of the following:
- 0 - don't optimize
- 1 - normal optimization (like "python -O")
- 2 - extra optimization (like "python -OO")
- If 'force' is true, all files are recompiled regardless of
- timestamps.
-
- The source filename encoded in each bytecode file defaults to the
- filenames listed in 'py_files'; you can modify these with 'prefix' and
- 'basedir'. 'prefix' is a string that will be stripped off of each
- source filename, and 'base_dir' is a directory name that will be
- prepended (after 'prefix' is stripped). You can supply either or both
- (or neither) of 'prefix' and 'base_dir', as you wish.
-
- If 'dry_run' is true, doesn't actually do anything that would
- affect the filesystem.
-
- Byte-compilation is either done directly in this interpreter process
- with the standard py_compile module, or indirectly by writing a
- temporary script and executing it. Normally, you should let
- 'byte_compile()' figure out to use direct compilation or not (see
- the source for details). The 'direct' flag is used by the script
- generated in indirect mode; unless you know what you're doing, leave
- it set to None.
- """
-
- # nothing is done if sys.dont_write_bytecode is True
- if sys.dont_write_bytecode:
- raise DistutilsByteCompileError('byte-compiling is disabled.')
-
- # First, if the caller didn't force us into direct or indirect mode,
- # figure out which mode we should be in. We take a conservative
- # approach: choose direct mode *only* if the current interpreter is
- # in debug mode and optimize is 0. If we're not in debug mode (-O
- # or -OO), we don't know which level of optimization this
- # interpreter is running with, so we can't do direct
- # byte-compilation and be certain that it's the right thing. Thus,
- # always compile indirectly if the current interpreter is in either
- # optimize mode, or if either optimization level was requested by
- # the caller.
- if direct is None:
- direct = __debug__ and optimize == 0
-
- # "Indirect" byte-compilation: write a temporary script and then
- # run it with the appropriate flags.
- if not direct:
- try:
- from tempfile import mkstemp
-
- (script_fd, script_name) = mkstemp(".py")
- except ImportError:
- from tempfile import mktemp
-
- (script_fd, script_name) = None, mktemp(".py")
- log.info("writing byte-compilation script '%s'", script_name)
- if not dry_run:
- if script_fd is not None:
- script = os.fdopen(script_fd, "w")
- else:
- script = open(script_name, "w")
-
- with script:
- script.write(
- """\
-from distutils.util import byte_compile
-files = [
-"""
- )
-
- # XXX would be nice to write absolute filenames, just for
- # safety's sake (script should be more robust in the face of
- # chdir'ing before running it). But this requires abspath'ing
- # 'prefix' as well, and that breaks the hack in build_lib's
- # 'byte_compile()' method that carefully tacks on a trailing
- # slash (os.sep really) to make sure the prefix here is "just
- # right". This whole prefix business is rather delicate -- the
- # problem is that it's really a directory, but I'm treating it
- # as a dumb string, so trailing slashes and so forth matter.
-
- script.write(",\n".join(map(repr, py_files)) + "]\n")
- script.write(
- """
-byte_compile(files, optimize=%r, force=%r,
- prefix=%r, base_dir=%r,
- verbose=%r, dry_run=0,
- direct=1)
-"""
- % (optimize, force, prefix, base_dir, verbose)
- )
-
- cmd = [sys.executable]
- cmd.extend(subprocess._optim_args_from_interpreter_flags())
- cmd.append(script_name)
- spawn(cmd, dry_run=dry_run)
- execute(os.remove, (script_name,), "removing %s" % script_name, dry_run=dry_run)
-
- # "Direct" byte-compilation: use the py_compile module to compile
- # right here, right now. Note that the script generated in indirect
- # mode simply calls 'byte_compile()' in direct mode, a weird sort of
- # cross-process recursion. Hey, it works!
- else:
- from py_compile import compile
-
- for file in py_files:
- if file[-3:] != ".py":
- # This lets us be lazy and not filter filenames in
- # the "install_lib" command.
- continue
-
- # Terminology from the py_compile module:
- # cfile - byte-compiled file
- # dfile - purported source filename (same as 'file' by default)
- if optimize >= 0:
- opt = '' if optimize == 0 else optimize
- cfile = importlib.util.cache_from_source(file, optimization=opt)
- else:
- cfile = importlib.util.cache_from_source(file)
- dfile = file
- if prefix:
- if file[: len(prefix)] != prefix:
- raise ValueError(
- "invalid prefix: filename %r doesn't start with %r"
- % (file, prefix)
- )
- dfile = dfile[len(prefix) :]
- if base_dir:
- dfile = os.path.join(base_dir, dfile)
-
- cfile_base = os.path.basename(cfile)
- if direct:
- if force or newer(file, cfile):
- log.info("byte-compiling %s to %s", file, cfile_base)
- if not dry_run:
- compile(file, cfile, dfile)
- else:
- log.debug("skipping byte-compilation of %s to %s", file, cfile_base)
-
-
-def rfc822_escape(header):
- """Return a version of the string escaped for inclusion in an
- RFC-822 header, by ensuring there are 8 spaces space after each newline.
- """
- lines = header.split('\n')
- sep = '\n' + 8 * ' '
- return sep.join(lines)
diff --git a/spaces/Ritori/Yura_GPT/README.md b/spaces/Ritori/Yura_GPT/README.md
deleted file mode 100644
index 720c2dd4c94f252b4920091f7ac95f3dc769fc9f..0000000000000000000000000000000000000000
--- a/spaces/Ritori/Yura_GPT/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: Yura_GPT
-app_file: yura gpt gradio.py
-sdk: gradio
-sdk_version: 3.36.1
----
diff --git a/spaces/Ritvik19/SudokuNet/app.py b/spaces/Ritvik19/SudokuNet/app.py
deleted file mode 100644
index 70954da8e37cd4484953436732d5a0b31c2578bf..0000000000000000000000000000000000000000
--- a/spaces/Ritvik19/SudokuNet/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import streamlit as st
-import numpy as np
-from sudokunet import SudokuSolver
-
-DEFAULT_PUZZLE = """
-0 0 4 3 0 0 2 0 9
-0 0 5 0 0 9 0 0 1
-0 7 0 0 6 0 0 4 3
-0 0 6 0 0 2 0 8 7
-1 9 0 0 0 7 4 0 0
-0 5 0 0 8 3 0 0 0
-6 0 0 0 0 0 1 0 5
-0 0 3 5 0 8 6 9 0
-0 4 2 9 1 0 3 0 0
-""".strip()
-
-
-st.header("Sudoku Net")
-st.write("This space is a demo for Sudoku Net v1 and v2 which are Neural Networks for solving Sudoku Puzzles")
-st.write("##### Try out Sudoku Net here:")
-col1, col2 = st.columns(2)
-solver_1 = SudokuSolver("Ritvik19/sudoku-net-v1")
-solver_2 = SudokuSolver('Ritvik19/sudoku-net-v2')
-with col1:
- puzzle = st.text_area("Enter puzzle here", value=DEFAULT_PUZZLE, height=250)
- puzzle = np.array([int(digit) for digit in puzzle.split()]).reshape(1, 9, 9)
- if st.button("Solve using v1"):
- with col2:
- solution = solver_1(puzzle)[0]
- st.write(solution)
- if st.button("Solve using v2"):
- with col2:
- solution = solver_2(puzzle)[0]
- st.write(solution)
\ No newline at end of file
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
deleted file mode 100644
index 4dd5011dc08def6c09eef86d3ce5b124c9fc5372..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import os.path as osp
-
-from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
-from ...dist_utils import master_only
-from ..hook import HOOKS
-from .base import LoggerHook
-
-
-@HOOKS.register_module()
-class TensorboardLoggerHook(LoggerHook):
-
- def __init__(self,
- log_dir=None,
- interval=10,
- ignore_last=True,
- reset_flag=False,
- by_epoch=True):
- super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
- reset_flag, by_epoch)
- self.log_dir = log_dir
-
- @master_only
- def before_run(self, runner):
- super(TensorboardLoggerHook, self).before_run(runner)
- if (TORCH_VERSION == 'parrots'
- or digit_version(TORCH_VERSION) < digit_version('1.1')):
- try:
- from tensorboardX import SummaryWriter
- except ImportError:
- raise ImportError('Please install tensorboardX to use '
- 'TensorboardLoggerHook.')
- else:
- try:
- from torch.utils.tensorboard import SummaryWriter
- except ImportError:
- raise ImportError(
- 'Please run "pip install future tensorboard" to install '
- 'the dependencies to use torch.utils.tensorboard '
- '(applicable to PyTorch 1.1 or higher)')
-
- if self.log_dir is None:
- self.log_dir = osp.join(runner.work_dir, 'tf_logs')
- self.writer = SummaryWriter(self.log_dir)
-
- @master_only
- def log(self, runner):
- tags = self.get_loggable_tags(runner, allow_text=True)
- for tag, val in tags.items():
- if isinstance(val, str):
- self.writer.add_text(tag, val, self.get_iter(runner))
- else:
- self.writer.add_scalar(tag, val, self.get_iter(runner))
-
- @master_only
- def after_run(self, runner):
- self.writer.close()
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/apc_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/apc_head.py
deleted file mode 100644
index c7038bdbe0edf2a1f184b6899486d2d190dda076..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/apc_head.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from annotator.uniformer.mmseg.ops import resize
-from ..builder import HEADS
-from .decode_head import BaseDecodeHead
-
-
-class ACM(nn.Module):
- """Adaptive Context Module used in APCNet.
-
- Args:
- pool_scale (int): Pooling scale used in Adaptive Context
- Module to extract region features.
- fusion (bool): Add one conv to fuse residual feature.
- in_channels (int): Input channels.
- channels (int): Channels after modules, before conv_seg.
- conv_cfg (dict | None): Config of conv layers.
- norm_cfg (dict | None): Config of norm layers.
- act_cfg (dict): Config of activation layers.
- """
-
- def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg,
- norm_cfg, act_cfg):
- super(ACM, self).__init__()
- self.pool_scale = pool_scale
- self.fusion = fusion
- self.in_channels = in_channels
- self.channels = channels
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.pooled_redu_conv = ConvModule(
- self.in_channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- self.input_redu_conv = ConvModule(
- self.in_channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- self.global_info = ConvModule(
- self.channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0)
-
- self.residual_conv = ConvModule(
- self.channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- if self.fusion:
- self.fusion_conv = ConvModule(
- self.channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, x):
- """Forward function."""
- pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale)
- # [batch_size, channels, h, w]
- x = self.input_redu_conv(x)
- # [batch_size, channels, pool_scale, pool_scale]
- pooled_x = self.pooled_redu_conv(pooled_x)
- batch_size = x.size(0)
- # [batch_size, pool_scale * pool_scale, channels]
- pooled_x = pooled_x.view(batch_size, self.channels,
- -1).permute(0, 2, 1).contiguous()
- # [batch_size, h * w, pool_scale * pool_scale]
- affinity_matrix = self.gla(x + resize(
- self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:])
- ).permute(0, 2, 3, 1).reshape(
- batch_size, -1, self.pool_scale**2)
- affinity_matrix = F.sigmoid(affinity_matrix)
- # [batch_size, h * w, channels]
- z_out = torch.matmul(affinity_matrix, pooled_x)
- # [batch_size, channels, h * w]
- z_out = z_out.permute(0, 2, 1).contiguous()
- # [batch_size, channels, h, w]
- z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3))
- z_out = self.residual_conv(z_out)
- z_out = F.relu(z_out + x)
- if self.fusion:
- z_out = self.fusion_conv(z_out)
-
- return z_out
-
-
-@HEADS.register_module()
-class APCHead(BaseDecodeHead):
- """Adaptive Pyramid Context Network for Semantic Segmentation.
-
- This head is the implementation of
- `APCNet `_.
-
- Args:
- pool_scales (tuple[int]): Pooling scales used in Adaptive Context
- Module. Default: (1, 2, 3, 6).
- fusion (bool): Add one conv to fuse residual feature.
- """
-
- def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs):
- super(APCHead, self).__init__(**kwargs)
- assert isinstance(pool_scales, (list, tuple))
- self.pool_scales = pool_scales
- self.fusion = fusion
- acm_modules = []
- for pool_scale in self.pool_scales:
- acm_modules.append(
- ACM(pool_scale,
- self.fusion,
- self.in_channels,
- self.channels,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- self.acm_modules = nn.ModuleList(acm_modules)
- self.bottleneck = ConvModule(
- self.in_channels + len(pool_scales) * self.channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- acm_outs = [x]
- for acm_module in self.acm_modules:
- acm_outs.append(acm_module(x))
- acm_outs = torch.cat(acm_outs, dim=1)
- output = self.bottleneck(acm_outs)
- output = self.cls_seg(output)
- return output
diff --git a/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/README.md b/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/README.md
deleted file mode 100644
index 264f5a73afcb7db76d86d58e2b7ddb96272506ab..0000000000000000000000000000000000000000
--- a/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin/README.md
+++ /dev/null
@@ -1,119 +0,0 @@
----
-title: Speech_Language_Processing_Jurafsky_Martin
-emoji: 📚
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 2.9.0
-python_version: 3.10.4
-app_file: app.py
-pinned: true
----
-
-
-# NLP FlashCards
-
-
-## DEMO
-
-View the demo at huggingface spaces:
-
-[DEMO](https://huggingface.co/spaces/RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin)
-
-
-## Dependencies
-
-Make sure you have the following tools installed:
-
-- [Python](https://www.python.org/downloads/) ^3.10,<3.11
-- [Poetry](https://python-poetry.org/) for Python package management;
-- [Docker](https://www.docker.com/get-started/) for running ElasticSearch.
-- [Git LFS](https://git-lfs.github.com/) for downloading binary files that do not fit in git.
-
-Then, run the following commands to install dependencies and Elasticsearch:
-
-```sh
-poetry install
-docker pull docker.elastic.co/elasticsearch/elasticsearch:8.1.1
-docker network create elastic
-docker run --name es01 --net elastic -p 9200:9200 -p 9300:9300 -it docker.elastic.co/elasticsearch/elasticsearch:8.1.1
-```
-
-After the last command, a password for the `elastic` user should show up in the
-terminal output (you might have to scroll up a bit). Copy this password, and
-create a copy of the `.env.example` file and rename it to `.env`. Replace the
-`` placeholder with your copied password. The .env file can be used to change configuration of the system, leave it as is for a replication study.
-
-Next, run the following command **from the root of the repository**:
-
-```sh
-docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt .
-```
-
-**NOTE 1:** If docker is not available or feasable. It is possible to use a trail hosted version of Elasticsearch at: https://www.elastic.co/cloud/
-
-**NOTE 2** Installing dependencies without poetry is possible, but it is not our recommendation. To do so execute `pip install -r requirements.txt`
-
-## Running
-
-To make sure we're using the dependencies managed by Poetry, run `poetry shell`
-before executing any of the following commands. Alternatively, replace any call
-like `python file.py` with `poetry run python file.py` (but we suggest the shell
-option, since it is much more convenient).
-
-### Using the QA system
-
-⚠️ **Important** ⚠️ _If you want to run an ElasticSearch query, make sure the
-docker container is running! You can check this by running `docker container
-ls`. If your container shows up (it's named `es01` if you followed these
-instructions), it's running. If not, you can run `docker start es01` to start
-it, or start it from Docker Desktop._
-
-To query the QA system, run any query as follows:
-
-```sh
-python query.py "Why can dot product be used as a similarity metric?"
-```
-
-By default, the best answer along with its location in the book will be
-returned. If you want to generate more answers (say, a top-5), you can supply
-the `--top=5` option. The default retriever uses [FAISS](https://faiss.ai/), but
-you can also use [ElasticSearch](https://www.elastic.co/elastic-stack/) using
-the `--retriever=es` option. You can also pick a language model using the
-`--lm` option, which accepts either `dpr` (Dense Passage Retrieval) or
-`longformer`. The language model is used to generate embeddings for FAISS, and
-is used to generate the answer.
-
-### CLI overview
-
-To get an overview of all available options, run `python query.py --help`. The
-options are also printed below.
-
-```sh
-usage: query.py [-h] [--top int] [--retriever {faiss,es}] [--lm {dpr,longformer}] str
-
-positional arguments:
- str The question to feed to the QA system
-
-options:
- -h, --help show this help message and exit
- --top int, -t int The number of answers to retrieve
- --retriever {faiss,es}, -r {faiss,es}
- The retrieval method to use
- --lm {dpr,longformer}, -l {dpr,longformer}
- The language model to use for the FAISS retriever
-```
-
-
-### Replicating the experiment
-
-To fully run experiments, you need to run the following command:
-
-```
-# in the root of the project and poetry environment activated
-python main.py
-```
-
-This command run all questions trough the system and stores the output to the `results/` directory.
-
-After performing the experiment, results can be analyzed and displayed by running `plot.py` and the `results/*_analysis.ipynb` files.
diff --git a/spaces/RustX/CSV-ChatBot/modules/history.py b/spaces/RustX/CSV-ChatBot/modules/history.py
deleted file mode 100644
index 33b606dc17f67cb8157e593ae9f9a699154a15a7..0000000000000000000000000000000000000000
--- a/spaces/RustX/CSV-ChatBot/modules/history.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-import streamlit as st
-from streamlit_chat import message
-
-
-class ChatHistory:
- def __init__(self):
- self.history = st.session_state.get("history", [])
- st.session_state["history"] = self.history
-
- def default_greeting(self):
- return "안녕 ! 👋"
-
- def default_prompt(self, topic):
- return f"안녕하세요 ! {topic}에 대해 무엇이든 물어보세요 🤗"
-
- def initialize_user_history(self):
- st.session_state["user"] = [self.default_greeting()]
-
- def initialize_assistant_history(self, uploaded_file):
- st.session_state["assistant"] = [self.default_prompt(uploaded_file.name)]
-
- def initialize(self, uploaded_file):
- if "assistant" not in st.session_state:
- self.initialize_assistant_history(uploaded_file)
- if "user" not in st.session_state:
- self.initialize_user_history()
-
- def reset(self, uploaded_file):
- st.session_state["history"] = []
- self.initialize_user_history()
- self.initialize_assistant_history(uploaded_file)
- st.session_state["reset_chat"] = False
-
- def append(self, mode, message):
- st.session_state[mode].append(message)
-
- def generate_messages(self, container):
- if st.session_state["assistant"]:
- with container:
- for i in range(len(st.session_state["assistant"])):
- message(
- st.session_state["user"][i],
- is_user=True,
- key=f"{i}_user",
- avatar_style="big-smile",
- )
- message(st.session_state["assistant"][i], key=str(i), avatar_style="thumbs")
-
- def load(self):
- if os.path.exists(self.history_file):
- with open(self.history_file, "r") as f:
- self.history = f.read().splitlines()
-
- def save(self):
- with open(self.history_file, "w") as f:
- f.write("\n".join(self.history))
\ No newline at end of file
diff --git a/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/src/__init__.py b/spaces/SIGGRAPH2022/DCT-Net/source/mtcnn_pytorch/src/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection/utils/plots.py b/spaces/Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection/utils/plots.py
deleted file mode 100644
index fdd8d0e853deb228badeeed52fbbe5fb8eb10632..0000000000000000000000000000000000000000
--- a/spaces/Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection/utils/plots.py
+++ /dev/null
@@ -1,489 +0,0 @@
-# Plotting utils
-
-import glob
-import math
-import os
-import random
-from copy import copy
-from pathlib import Path
-
-import cv2
-import matplotlib
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import seaborn as sns
-import torch
-import yaml
-from PIL import Image, ImageDraw, ImageFont
-from scipy.signal import butter, filtfilt
-
-from utils.general import xywh2xyxy, xyxy2xywh
-from utils.metrics import fitness
-
-# Settings
-matplotlib.rc('font', **{'size': 11})
-matplotlib.use('Agg') # for writing to files only
-
-
-def color_list():
- # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
- def hex2rgb(h):
- return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
-
- return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
-
-
-def hist2d(x, y, n=100):
- # 2d histogram used in labels.png and evolve.png
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
- return np.log(hist[xidx, yidx])
-
-
-def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
- def butter_lowpass(cutoff, fs, order):
- nyq = 0.5 * fs
- normal_cutoff = cutoff / nyq
- return butter(order, normal_cutoff, btype='low', analog=False)
-
- b, a = butter_lowpass(cutoff, fs, order=order)
- return filtfilt(b, a, data) # forward-backward filter
-
-
-def plot_one_box(x, img, color=None, label=None, line_thickness=3):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
- color = color or [random.randint(0, 255) for _ in range(3)]
- c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
- if label:
- tf = max(tl - 1, 1) # font thickness
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
-
-
-def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None):
- img = Image.fromarray(img)
- draw = ImageDraw.Draw(img)
- line_thickness = line_thickness or max(int(min(img.size) / 200), 2)
- draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
- if label:
- fontsize = max(round(max(img.size) / 40), 12)
- font = ImageFont.truetype("Arial.ttf", fontsize)
- txt_width, txt_height = font.getsize(label)
- draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
- draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
- return np.asarray(img)
-
-
-def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
- # Compares the two methods for width-height anchor multiplication
- # https://github.com/ultralytics/yolov3/issues/168
- x = np.arange(-4.0, 4.0, .1)
- ya = np.exp(x)
- yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
-
- fig = plt.figure(figsize=(6, 3), tight_layout=True)
- plt.plot(x, ya, '.-', label='YOLOv3')
- plt.plot(x, yb ** 2, '.-', label='YOLOR ^2')
- plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6')
- plt.xlim(left=-4, right=4)
- plt.ylim(bottom=0, top=6)
- plt.xlabel('input')
- plt.ylabel('output')
- plt.grid()
- plt.legend()
- fig.savefig('comparison.png', dpi=200)
-
-
-def output_to_target(output):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
- targets = []
- for i, o in enumerate(output):
- for *box, conf, cls in o.cpu().numpy():
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
- return np.array(targets)
-
-
-def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
- # Plot image grid with labels
-
- if isinstance(images, torch.Tensor):
- images = images.cpu().float().numpy()
- if isinstance(targets, torch.Tensor):
- targets = targets.cpu().numpy()
-
- # un-normalise
- if np.max(images[0]) <= 1:
- images *= 255
-
- tl = 3 # line thickness
- tf = max(tl - 1, 1) # font thickness
- bs, _, h, w = images.shape # batch size, _, height, width
- bs = min(bs, max_subplots) # limit plot images
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
-
- # Check if we should resize
- scale_factor = max_size / max(h, w)
- if scale_factor < 1:
- h = math.ceil(scale_factor * h)
- w = math.ceil(scale_factor * w)
-
- colors = color_list() # list of colors
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
- for i, img in enumerate(images):
- if i == max_subplots: # if last batch has fewer images than we expect
- break
-
- block_x = int(w * (i // ns))
- block_y = int(h * (i % ns))
-
- img = img.transpose(1, 2, 0)
- if scale_factor < 1:
- img = cv2.resize(img, (w, h))
-
- mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
- if len(targets) > 0:
- image_targets = targets[targets[:, 0] == i]
- boxes = xywh2xyxy(image_targets[:, 2:6]).T
- classes = image_targets[:, 1].astype('int')
- labels = image_targets.shape[1] == 6 # labels if no conf column
- conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
-
- if boxes.shape[1]:
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
- boxes[[0, 2]] *= w # scale to pixels
- boxes[[1, 3]] *= h
- elif scale_factor < 1: # absolute coords need scale if image scales
- boxes *= scale_factor
- boxes[[0, 2]] += block_x
- boxes[[1, 3]] += block_y
- for j, box in enumerate(boxes.T):
- cls = int(classes[j])
- color = colors[cls % len(colors)]
- cls = names[cls] if names else cls
- if labels or conf[j] > 0.25: # 0.25 conf thresh
- label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
- plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
-
- # Draw image filename labels
- if paths:
- label = Path(paths[i]).name[:40] # trim to 40 char
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
- lineType=cv2.LINE_AA)
-
- # Image border
- cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
-
- if fname:
- r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
- mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
- # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
- Image.fromarray(mosaic).save(fname) # PIL save
- return mosaic
-
-
-def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
- # Plot LR simulating training for full epochs
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
- y = []
- for _ in range(epochs):
- scheduler.step()
- y.append(optimizer.param_groups[0]['lr'])
- plt.plot(y, '.-', label='LR')
- plt.xlabel('epoch')
- plt.ylabel('LR')
- plt.grid()
- plt.xlim(0, epochs)
- plt.ylim(0)
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
- plt.close()
-
-
-def plot_test_txt(): # from utils.plots import *; plot_test()
- # Plot test.txt histograms
- x = np.loadtxt('test.txt', dtype=np.float32)
- box = xyxy2xywh(x[:, :4])
- cx, cy = box[:, 0], box[:, 1]
-
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
- ax.set_aspect('equal')
- plt.savefig('hist2d.png', dpi=300)
-
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
- ax[0].hist(cx, bins=600)
- ax[1].hist(cy, bins=600)
- plt.savefig('hist1d.png', dpi=200)
-
-
-def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
- # Plot targets.txt histograms
- x = np.loadtxt('targets.txt', dtype=np.float32).T
- s = ['x targets', 'y targets', 'width targets', 'height targets']
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- for i in range(4):
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
- ax[i].legend()
- ax[i].set_title(s[i])
- plt.savefig('targets.jpg', dpi=200)
-
-
-def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
- # Plot study.txt generated by test.py
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
- # ax = ax.ravel()
-
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]:
- for f in sorted(Path(path).glob('study*.txt')):
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
- x = np.arange(y.shape[1]) if x is None else np.array(x)
- s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
- # for i in range(7):
- # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- # ax[i].set_title(s[i])
-
- j = y[3].argmax() + 1
- ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
- label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
-
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
-
- ax2.grid(alpha=0.2)
- ax2.set_yticks(np.arange(20, 60, 5))
- ax2.set_xlim(0, 57)
- ax2.set_ylim(30, 55)
- ax2.set_xlabel('GPU Speed (ms/img)')
- ax2.set_ylabel('COCO AP val')
- ax2.legend(loc='lower right')
- plt.savefig(str(Path(path).name) + '.png', dpi=300)
-
-
-def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
- # plot dataset labels
- print('Plotting labels... ')
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
- nc = int(c.max() + 1) # number of classes
- colors = color_list()
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
-
- # seaborn correlogram
- sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
- plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
- plt.close()
-
- # matplotlib labels
- matplotlib.use('svg') # faster
- ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
- ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- ax[0].set_ylabel('instances')
- if 0 < len(names) < 30:
- ax[0].set_xticks(range(len(names)))
- ax[0].set_xticklabels(names, rotation=90, fontsize=10)
- else:
- ax[0].set_xlabel('classes')
- sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
- sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
-
- # rectangles
- labels[:, 1:3] = 0.5 # center
- labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
- img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
- for cls, *box in labels[:1000]:
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
- ax[1].imshow(img)
- ax[1].axis('off')
-
- for a in [0, 1, 2, 3]:
- for s in ['top', 'right', 'left', 'bottom']:
- ax[a].spines[s].set_visible(False)
-
- plt.savefig(save_dir / 'labels.jpg', dpi=200)
- matplotlib.use('Agg')
- plt.close()
-
- # loggers
- for k, v in loggers.items() or {}:
- if k == 'wandb' and v:
- v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
-
-
-def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
- # Plot hyperparameter evolution results in evolve.txt
- with open(yaml_file) as f:
- hyp = yaml.load(f, Loader=yaml.SafeLoader)
- x = np.loadtxt('evolve.txt', ndmin=2)
- f = fitness(x)
- # weights = (f - f.min()) ** 2 # for weighted results
- plt.figure(figsize=(10, 12), tight_layout=True)
- matplotlib.rc('font', **{'size': 8})
- for i, (k, v) in enumerate(hyp.items()):
- y = x[:, i + 7]
- # mu = (y * weights).sum() / weights.sum() # best weighted result
- mu = y[f.argmax()] # best single result
- plt.subplot(6, 5, i + 1)
- plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
- plt.plot(mu, f.max(), 'k+', markersize=15)
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
- if i % 5 != 0:
- plt.yticks([])
- print('%15s: %.3g' % (k, mu))
- plt.savefig('evolve.png', dpi=200)
- print('\nPlot saved as evolve.png')
-
-
-def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
- # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
- ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
- s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
- files = list(Path(save_dir).glob('frames*.txt'))
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
- n = results.shape[1] # number of rows
- x = np.arange(start, min(stop, n) if stop else n)
- results = results[:, x]
- t = (results[0] - results[0].min()) # set t0=0s
- results[0] = x
- for i, a in enumerate(ax):
- if i < len(results):
- label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
- a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
- a.set_title(s[i])
- a.set_xlabel('time (s)')
- # if fi == len(files) - 1:
- # a.set_ylim(bottom=0)
- for side in ['top', 'right']:
- a.spines[side].set_visible(False)
- else:
- a.remove()
- except Exception as e:
- print('Warning: Plotting error for %s; %s' % (f, e))
-
- ax[1].legend()
- plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
-
-
-def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
- # Plot training 'results*.txt', overlaying train and val losses
- s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
- t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
- for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
- ax = ax.ravel()
- for i in range(5):
- for j in [i, i + 5]:
- y = results[j, x]
- ax[i].plot(x, y, marker='.', label=s[j])
- # y_smooth = butter_lowpass_filtfilt(y)
- # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
-
- ax[i].set_title(t[i])
- ax[i].legend()
- ax[i].set_ylabel(f) if i == 0 else None # add filename
- fig.savefig(f.replace('.txt', '.png'), dpi=200)
-
-
-def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
- # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
- ax = ax.ravel()
- s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
- 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
- if bucket:
- # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
- files = ['results%g.txt' % x for x in id]
- c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
- os.system(c)
- else:
- files = list(Path(save_dir).glob('results*.txt'))
- assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- for i in range(10):
- y = results[i, x]
- if i in [0, 1, 2, 5, 6, 7]:
- y[y == 0] = np.nan # don't show zero loss values
- # y /= y[0] # normalize
- label = labels[fi] if len(labels) else f.stem
- ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
- ax[i].set_title(s[i])
- # if i in [5, 6, 7]: # share train and val loss y axes
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
- except Exception as e:
- print('Warning: Plotting error for %s; %s' % (f, e))
-
- ax[1].legend()
- fig.savefig(Path(save_dir) / 'results.png', dpi=200)
-
-
-def output_to_keypoint(output):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
- targets = []
- for i, o in enumerate(output):
- kpts = o[:,6:]
- o = o[:,:6]
- for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()):
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])])
- return np.array(targets)
-
-
-def plot_skeleton_kpts(im, kpts, steps, orig_shape=None):
- #Plot the skeleton and keypointsfor coco datatset
- palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
- [230, 230, 0], [255, 153, 255], [153, 204, 255],
- [255, 102, 255], [255, 51, 255], [102, 178, 255],
- [51, 153, 255], [255, 153, 153], [255, 102, 102],
- [255, 51, 51], [153, 255, 153], [102, 255, 102],
- [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0],
- [255, 255, 255]])
-
- skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
- [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
- [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
-
- pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]]
- pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]]
- radius = 5
- num_kpts = len(kpts) // steps
-
- for kid in range(num_kpts):
- r, g, b = pose_kpt_color[kid]
- x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1]
- if not (x_coord % 640 == 0 or y_coord % 640 == 0):
- if steps == 3:
- conf = kpts[steps * kid + 2]
- if conf < 0.5:
- continue
- cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1)
-
- for sk_id, sk in enumerate(skeleton):
- r, g, b = pose_limb_color[sk_id]
- pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1]))
- pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1]))
- if steps == 3:
- conf1 = kpts[(sk[0]-1)*steps+2]
- conf2 = kpts[(sk[1]-1)*steps+2]
- if conf1<0.5 or conf2<0.5:
- continue
- if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0:
- continue
- if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0:
- continue
- cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2)
diff --git a/spaces/Samarth991/Youtube-Video-ChatBot/app.py b/spaces/Samarth991/Youtube-Video-ChatBot/app.py
deleted file mode 100644
index d688bfd2ad47cf9baaf9ec5c066fde0f87ef4fcc..0000000000000000000000000000000000000000
--- a/spaces/Samarth991/Youtube-Video-ChatBot/app.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import time
-import gradio as gr
-import logging
-from langchain.document_loaders import PDFMinerLoader,CSVLoader ,UnstructuredWordDocumentLoader,TextLoader,OnlinePDFLoader
-from langchain.text_splitter import CharacterTextSplitter
-from langchain.embeddings import SentenceTransformerEmbeddings
-from langchain.vectorstores import FAISS
-from langchain.chains import RetrievalQA
-from langchain.prompts import PromptTemplate
-from langchain.docstore.document import Document
-from youtube_transcript_api import YouTubeTranscriptApi
-import chatops
-
-logger = logging.getLogger(__name__)
-
-DEVICE = 'cpu'
-MAX_NEW_TOKENS = 4096
-DEFAULT_TEMPERATURE = 0.1
-DEFAULT_MAX_NEW_TOKENS = 2048
-MAX_INPUT_TOKEN_LENGTH = 4000
-DEFAULT_CHAR_LENGTH = 1000
-
-EXAMPLES = ["https://www.youtube.com/watch?v=aircAruvnKk&ab_channel=3Blue1Brown",
- "https://www.youtube.com/watch?v=Ilg3gGewQ5U",
- "https://www.youtube.com/watch?v=WUvTyaaNkzM"
- ]
-
-
-
-def clear_chat():
- return []
-
-def get_text_from_youtube_link(video_link,max_video_length=800):
- video_text = ""
- video_id = video_link.split("watch?v=")[1].split("&")[0]
- srt = YouTubeTranscriptApi.get_transcript(video_id)
- for text_data in srt:
- video_text = video_text + " " + text_data.get("text")
- if len(video_text) > max_video_length:
- print(video_text)
- return video_text[0:max_video_length]
- else:
- print(video_text)
- return video_text
-
-def process_documents(documents,data_chunk=1500,chunk_overlap=100):
- text_splitter = CharacterTextSplitter(chunk_size=data_chunk, chunk_overlap=chunk_overlap,separator='\n')
- texts = text_splitter.split_documents(documents)
- return texts
-
-def process_youtube_link(link, document_name="youtube-content",char_length=1000):
- try:
- metadata = {"source": f"{document_name}.txt"}
- return [Document(page_content=get_text_from_youtube_link(video_link=link,max_video_length=char_length), metadata=metadata)]
- except Exception as err:
- logger.error(f'Error in reading document. {err}')
-
-
-def create_prompt():
- prompt_template = """As a chatbot asnwer the questions regarding the content in the video.
- Use the following context to answer.
- If you don't know the answer, just say I don't know.
-
- {context}
-
- Question: {question}
- Answer :"""
- prompt = PromptTemplate(
- template=prompt_template, input_variables=["context", "question"]
- )
- return prompt
-
-def youtube_chat(youtube_link,API_key,llm='HuggingFace',temperature=0.1,max_tokens=1096,char_length=1500):
-
- document = process_youtube_link(link=youtube_link,char_length=char_length)
- print("docuemt:",document)
- embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-base',model_kwargs={"device": DEVICE})
- texts = process_documents(documents=document)
- global vector_db
- vector_db = FAISS.from_documents(documents=texts, embedding= embedding_model)
- global qa
-
- if llm == 'HuggingFace':
- chat = chatops.get_hugging_face_model(
- model_id="tiiuae/falcon-7b-instruct",
- API_key=API_key,
- temperature=temperature,
- max_tokens=max_tokens
- )
- else:
- chat = chatops.get_openai_chat_model(API_key=API_key)
- chain_type_kwargs = {"prompt": create_prompt()}
-
- qa = RetrievalQA.from_chain_type(llm=chat,
- chain_type='stuff',
- retriever=vector_db.as_retriever(),
- chain_type_kwargs=chain_type_kwargs,
- return_source_documents=True
- )
- return "Youtube link Processing completed ..."
-
-def infer(question, history):
- # res = []
- # # for human, ai in history[:-1]:
- # # pair = (human, ai)
- # # res.append(pair)
-
- # chat_history = res
- result = qa({"query": question})
- matching_docs_score = vector_db.similarity_search_with_score(question)
-
- return result["result"]
-
-def bot(history):
- response = infer(history[-1][0], history)
- history[-1][1] = ""
-
- for character in response:
- history[-1][1] += character
- time.sleep(0.05)
- yield history
-
-def add_text(history, text):
- history = history + [(text, None)]
- return history, ""
-
-
-css="""
-#col-container {max-width: 2048px; margin-left: auto; margin-right: auto;}
-"""
-
-title = """
-
-
Chat with Youtube Videos
-
Upload a youtube link of any video-lecture/song/Research/Conference & ask Questions to chatbot with the tool.
- Tools uses State of the Art Models from HuggingFace/OpenAI so, make sure to add your key.
-
-
-"""
-
-with gr.Blocks(css=css) as demo:
- with gr.Row():
- with gr.Column(elem_id="col-container"):
- gr.HTML(title)
-
- with gr.Column():
- with gr.Row():
- LLM_option = gr.Dropdown(['HuggingFace','OpenAI'],label='Select HuggingFace/OpenAI')
- API_key = gr.Textbox(label="Add API key", type="password",autofocus=True)
-
- with gr.Group():
- chatbot = gr.Chatbot(height=270)
-
- with gr.Row():
- question = gr.Textbox(label="Type your question !",lines=1).style(full_width=True)
- with gr.Row():
- submit_btn = gr.Button(value="Send message", variant="primary", scale = 1)
- clean_chat_btn = gr.Button("Delete Chat")
-
- with gr.Column():
- with gr.Box():
- youtube_link = gr.Textbox(label="Add your you tube Link",text_align='left',autofocus=True)
- with gr.Row():
- load_youtube_bt = gr.Button("Process Youtube Link",).style(full_width = False)
- langchain_status = gr.Textbox(label="Status", placeholder="", interactive = False)
-
- with gr.Column():
- with gr.Accordion(label='Advanced options', open=False):
- max_new_tokens = gr.Slider(
- label='Max new tokens',
- minimum=2048,
- maximum=MAX_NEW_TOKENS,
- step=1,
- value=DEFAULT_MAX_NEW_TOKENS,
- )
- temperature = gr.Slider(label='Temperature',minimum=0.1,maximum=4.0,step=0.1,value=DEFAULT_TEMPERATURE,)
- char_length = gr.Slider(label='Max Character',
- minimum= DEFAULT_CHAR_LENGTH,
- maximum = 5*DEFAULT_CHAR_LENGTH,
- step = 500,value= 1500
- )
-
- load_youtube_bt.click(youtube_chat,inputs= [youtube_link,API_key,LLM_option,temperature,max_new_tokens,char_length],outputs=[langchain_status], queue=False)
-
- clean_chat_btn.click(clear_chat, [], chatbot)
-
- question.submit(add_text, inputs=[chatbot, question], outputs=[chatbot, question]).then(bot, chatbot, chatbot)
- submit_btn.click(add_text, inputs=[chatbot, question], outputs=[chatbot, question]).then(bot, chatbot, chatbot)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/Sandiago21/text-to-speech-greek/README.md b/spaces/Sandiago21/text-to-speech-greek/README.md
deleted file mode 100644
index f6d5d7ebbe464ec80c1461fa3ca418e2732e7af3..0000000000000000000000000000000000000000
--- a/spaces/Sandiago21/text-to-speech-greek/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: text-to-speech-greek
-app_file: app.py
-sdk: gradio
-sdk_version: 3.36.0
----
diff --git a/spaces/ShibaDeveloper/Text-To-Image/README.md b/spaces/ShibaDeveloper/Text-To-Image/README.md
deleted file mode 100644
index 14781ed788c23d0caadab48de464aa6caeb8a31a..0000000000000000000000000000000000000000
--- a/spaces/ShibaDeveloper/Text-To-Image/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Text To Image
-emoji: 🐶
-colorFrom: indigo
-colorTo: gray
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
-license: unknown
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ShkShahid/Auto-encoder_For_Image_Reconstruction/README.md b/spaces/ShkShahid/Auto-encoder_For_Image_Reconstruction/README.md
deleted file mode 100644
index 8e8bb4cc97eb342f1cf0a00491302be8d7e11654..0000000000000000000000000000000000000000
--- a/spaces/ShkShahid/Auto-encoder_For_Image_Reconstruction/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Auto-encoder For Image Reconstruction
-emoji: 🐨
-colorFrom: yellow
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.4.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/display.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/display.py
deleted file mode 100644
index b7f64f25c973b920da9def59ff0a884024aaa86e..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/display.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Public API for display tools in IPython.
-"""
-
-# -----------------------------------------------------------------------------
-# Copyright (C) 2012 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-
-from IPython.core.display_functions import *
-from IPython.core.display import (
- display_pretty,
- display_html,
- display_markdown,
- display_svg,
- display_png,
- display_jpeg,
- display_latex,
- display_json,
- display_javascript,
- display_pdf,
- DisplayObject,
- TextDisplayObject,
- Pretty,
- HTML,
- Markdown,
- Math,
- Latex,
- SVG,
- ProgressBar,
- JSON,
- GeoJSON,
- Javascript,
- Image,
- set_matplotlib_formats,
- set_matplotlib_close,
- Video,
-)
-from IPython.lib.display import *
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/necks/fpn.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/necks/fpn.py
deleted file mode 100644
index a53b2a69500f8c2edb835abc3ff0ccc2173d1fb1..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/necks/fpn.py
+++ /dev/null
@@ -1,212 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-from annotator.uniformer.mmcv.cnn import ConvModule, xavier_init
-
-from ..builder import NECKS
-
-
-@NECKS.register_module()
-class FPN(nn.Module):
- """Feature Pyramid Network.
-
- This is an implementation of - Feature Pyramid Networks for Object
- Detection (https://arxiv.org/abs/1612.03144)
-
- Args:
- in_channels (List[int]): Number of input channels per scale.
- out_channels (int): Number of output channels (used at each scale)
- num_outs (int): Number of output scales.
- start_level (int): Index of the start input backbone level used to
- build the feature pyramid. Default: 0.
- end_level (int): Index of the end input backbone level (exclusive) to
- build the feature pyramid. Default: -1, which means the last level.
- add_extra_convs (bool | str): If bool, it decides whether to add conv
- layers on top of the original feature maps. Default to False.
- If True, its actual mode is specified by `extra_convs_on_inputs`.
- If str, it specifies the source feature map of the extra convs.
- Only the following options are allowed
-
- - 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- - 'on_lateral': Last feature map after lateral convs.
- - 'on_output': The last output feature map after fpn convs.
- extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
- on the original feature from the backbone. If True,
- it is equivalent to `add_extra_convs='on_input'`. If False, it is
- equivalent to set `add_extra_convs='on_output'`. Default to True.
- relu_before_extra_convs (bool): Whether to apply relu before the extra
- conv. Default: False.
- no_norm_on_lateral (bool): Whether to apply norm on lateral.
- Default: False.
- conv_cfg (dict): Config dict for convolution layer. Default: None.
- norm_cfg (dict): Config dict for normalization layer. Default: None.
- act_cfg (str): Config dict for activation layer in ConvModule.
- Default: None.
- upsample_cfg (dict): Config dict for interpolate layer.
- Default: `dict(mode='nearest')`
-
- Example:
- >>> import torch
- >>> in_channels = [2, 3, 5, 7]
- >>> scales = [340, 170, 84, 43]
- >>> inputs = [torch.rand(1, c, s, s)
- ... for c, s in zip(in_channels, scales)]
- >>> self = FPN(in_channels, 11, len(in_channels)).eval()
- >>> outputs = self.forward(inputs)
- >>> for i in range(len(outputs)):
- ... print(f'outputs[{i}].shape = {outputs[i].shape}')
- outputs[0].shape = torch.Size([1, 11, 340, 340])
- outputs[1].shape = torch.Size([1, 11, 170, 170])
- outputs[2].shape = torch.Size([1, 11, 84, 84])
- outputs[3].shape = torch.Size([1, 11, 43, 43])
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- num_outs,
- start_level=0,
- end_level=-1,
- add_extra_convs=False,
- extra_convs_on_inputs=False,
- relu_before_extra_convs=False,
- no_norm_on_lateral=False,
- conv_cfg=None,
- norm_cfg=None,
- act_cfg=None,
- upsample_cfg=dict(mode='nearest')):
- super(FPN, self).__init__()
- assert isinstance(in_channels, list)
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.num_ins = len(in_channels)
- self.num_outs = num_outs
- self.relu_before_extra_convs = relu_before_extra_convs
- self.no_norm_on_lateral = no_norm_on_lateral
- self.fp16_enabled = False
- self.upsample_cfg = upsample_cfg.copy()
-
- if end_level == -1:
- self.backbone_end_level = self.num_ins
- assert num_outs >= self.num_ins - start_level
- else:
- # if end_level < inputs, no extra level is allowed
- self.backbone_end_level = end_level
- assert end_level <= len(in_channels)
- assert num_outs == end_level - start_level
- self.start_level = start_level
- self.end_level = end_level
- self.add_extra_convs = add_extra_convs
- assert isinstance(add_extra_convs, (str, bool))
- if isinstance(add_extra_convs, str):
- # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
- assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
- elif add_extra_convs: # True
- if extra_convs_on_inputs:
- # For compatibility with previous release
- # TODO: deprecate `extra_convs_on_inputs`
- self.add_extra_convs = 'on_input'
- else:
- self.add_extra_convs = 'on_output'
-
- self.lateral_convs = nn.ModuleList()
- self.fpn_convs = nn.ModuleList()
-
- for i in range(self.start_level, self.backbone_end_level):
- l_conv = ConvModule(
- in_channels[i],
- out_channels,
- 1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
- act_cfg=act_cfg,
- inplace=False)
- fpn_conv = ConvModule(
- out_channels,
- out_channels,
- 3,
- padding=1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg,
- inplace=False)
-
- self.lateral_convs.append(l_conv)
- self.fpn_convs.append(fpn_conv)
-
- # add extra conv layers (e.g., RetinaNet)
- extra_levels = num_outs - self.backbone_end_level + self.start_level
- if self.add_extra_convs and extra_levels >= 1:
- for i in range(extra_levels):
- if i == 0 and self.add_extra_convs == 'on_input':
- in_channels = self.in_channels[self.backbone_end_level - 1]
- else:
- in_channels = out_channels
- extra_fpn_conv = ConvModule(
- in_channels,
- out_channels,
- 3,
- stride=2,
- padding=1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg,
- inplace=False)
- self.fpn_convs.append(extra_fpn_conv)
-
- # default init_weights for conv(msra) and norm in ConvModule
- def init_weights(self):
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- xavier_init(m, distribution='uniform')
-
- def forward(self, inputs):
- assert len(inputs) == len(self.in_channels)
-
- # build laterals
- laterals = [
- lateral_conv(inputs[i + self.start_level])
- for i, lateral_conv in enumerate(self.lateral_convs)
- ]
-
- # build top-down path
- used_backbone_levels = len(laterals)
- for i in range(used_backbone_levels - 1, 0, -1):
- # In some cases, fixing `scale factor` (e.g. 2) is preferred, but
- # it cannot co-exist with `size` in `F.interpolate`.
- if 'scale_factor' in self.upsample_cfg:
- laterals[i - 1] += F.interpolate(laterals[i],
- **self.upsample_cfg)
- else:
- prev_shape = laterals[i - 1].shape[2:]
- laterals[i - 1] += F.interpolate(
- laterals[i], size=prev_shape, **self.upsample_cfg)
-
- # build outputs
- # part 1: from original levels
- outs = [
- self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
- ]
- # part 2: add extra levels
- if self.num_outs > len(outs):
- # use max pool to get more levels on top of outputs
- # (e.g., Faster R-CNN, Mask R-CNN)
- if not self.add_extra_convs:
- for i in range(self.num_outs - used_backbone_levels):
- outs.append(F.max_pool2d(outs[-1], 1, stride=2))
- # add conv layers on top of original feature maps (RetinaNet)
- else:
- if self.add_extra_convs == 'on_input':
- extra_source = inputs[self.backbone_end_level - 1]
- elif self.add_extra_convs == 'on_lateral':
- extra_source = laterals[-1]
- elif self.add_extra_convs == 'on_output':
- extra_source = outs[-1]
- else:
- raise NotImplementedError
- outs.append(self.fpn_convs[used_backbone_levels](extra_source))
- for i in range(used_backbone_levels + 1, self.num_outs):
- if self.relu_before_extra_convs:
- outs.append(self.fpn_convs[i](F.relu(outs[-1])))
- else:
- outs.append(self.fpn_convs[i](outs[-1]))
- return tuple(outs)
diff --git a/spaces/Supsies/CodingandMore/app.py b/spaces/Supsies/CodingandMore/app.py
deleted file mode 100644
index b8e324b9c29780cc194b84219d4782bd519931d7..0000000000000000000000000000000000000000
--- a/spaces/Supsies/CodingandMore/app.py
+++ /dev/null
@@ -1,172 +0,0 @@
-### ----------------------------- ###
-### libraries ###
-### ----------------------------- ###
-
-import gradio as gr
-import pandas as pd
-import numpy as np
-from sklearn.model_selection import train_test_split
-from sklearn.linear_model import LogisticRegression
-from sklearn import metrics
-
-
-### ------------------------------ ###
-### data transformation ###
-### ------------------------------ ###
-
-# load dataset
-uncleaned_data = pd.read_csv('data.csv')
-
-# remove timestamp from dataset (always first column)
-uncleaned_data = uncleaned_data.iloc[: , 1:]
-data = pd.DataFrame()
-
-# keep track of which columns are categorical and what
-# those columns' value mappings are
-# structure: {colname1: {...}, colname2: {...} }
-cat_value_dicts = {}
-final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1]
-
-# for each column...
-for (colname, colval) in uncleaned_data.iteritems():
-
- # check if col is already a number; if so, add col directly
- # to new dataframe and skip to next column
- if isinstance(colval.values[0], (np.integer, float)):
- data[colname] = uncleaned_data[colname].copy()
- continue
-
- # structure: {0: "lilac", 1: "blue", ...}
- new_dict = {}
- val = 0 # first index per column
- transformed_col_vals = [] # new numeric datapoints
-
- # if not, for each item in that column...
- for (row, item) in enumerate(colval.values):
-
- # if item is not in this col's dict...
- if item not in new_dict:
- new_dict[item] = val
- val += 1
-
- # then add numerical value to transformed dataframe
- transformed_col_vals.append(new_dict[item])
-
- # reverse dictionary only for final col (0, 1) => (vals)
- if colname == final_colname:
- new_dict = {value : key for (key, value) in new_dict.items()}
-
- cat_value_dicts[colname] = new_dict
- data[colname] = transformed_col_vals
-
-
-### -------------------------------- ###
-### model training ###
-### -------------------------------- ###
-
-# select features and predicton; automatically selects last column as prediction
-cols = len(data.columns)
-num_features = cols - 1
-x = data.iloc[: , :num_features]
-y = data.iloc[: , num_features:]
-
-# split data into training and testing sets
-x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
-
-# instantiate the model (using default parameters)
-model = LogisticRegression()
-model.fit(x_train, y_train.values.ravel())
-y_pred = model.predict(x_test)
-
-
-### -------------------------------- ###
-### article generation ###
-### -------------------------------- ###
-# borrow file reading function from reader.py
-
-def get_feat():
- feats = [abs(x) for x in model.coef_[0]]
- max_val = max(feats)
- idx = feats.index(max_val)
- return data.columns[idx]
-
-acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%"
-most_imp_feat = get_feat()
-# info = get_article(acc, most_imp_feat)
-
-
-
-### ------------------------------- ###
-### interface creation ###
-### ------------------------------- ###
-
-
-# predictor for generic number of features
-def general_predictor(*args):
- features = []
-
- # transform categorical input
- for colname, arg in zip(data.columns, args):
- if (colname in cat_value_dicts):
- features.append(cat_value_dicts[colname][arg])
- else:
- features.append(arg)
-
- # predict single datapoint
- new_input = [features]
- result = model.predict(new_input)
- return cat_value_dicts[final_colname][result[0]]
-
-# add data labels to replace those lost via star-args
-
-
-block = gr.Blocks()
-
-with open('info.md') as f:
- with block:
- gr.Markdown(f.readline())
- gr.Markdown('Take the quiz to get a personalized recommendation using AI.')
-
- with gr.Row():
- with gr.Box():
- inputls = []
- for colname in data.columns:
- # skip last column
- if colname == final_colname:
- continue
-
- # access categories dict if data is categorical
- # otherwise, just use a number input
- if colname in cat_value_dicts:
- radio_options = list(cat_value_dicts[colname].keys())
- inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname))
- else:
- # add numerical input
- inputls.append(gr.inputs.Number(label=colname))
- gr.Markdown(" ")
-
- submit = gr.Button("Click to see your personalized result!", variant="primary")
- gr.Markdown(" ")
- output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here")
-
- submit.click(fn=general_predictor, inputs=inputls, outputs=output)
- gr.Markdown(" ")
-
- with gr.Row():
- with gr.Box():
- gr.Markdown(f"
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Cops And Robbers Movie In Italian Dubbed Download.md b/spaces/cihyFjudo/fairness-paper-search/Cops And Robbers Movie In Italian Dubbed Download.md
deleted file mode 100644
index 023ac620f370a13b3ddb77135b98c0d0ba90e310..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Cops And Robbers Movie In Italian Dubbed Download.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
Bandits stars Billy Bob Thornton and Bruce Willis as two unlikely partners in crime who simplify the process of robbing banks by confronting the bank managers in their homes, spend the night at their homes, and arrive at the bank the following morning with their targets. The film is inspired by the real-life crimes of bank robbers Terry Lee Connor and Joseph Daugherty. Bandits is one of the best bank robbery movies because of the amazing chemistry and comedic timing of the three lead characters.
A crew of childhood friends makes their living by robbing banks in The Town. During a job, the masked bank robbers take assistant manager Claire Keesey (Rebecca Hall) hostage, but release her unharmed. Things get complicated when one of the masked thieves Doug (Ben Affleck) begins courting her secretly. On top of that, they are being pursued by hard-nosed FBI Agent Adam Frawley (Jon Hamm). In typical heist movie fashion, the expert crew plans one last job that is going to set them up for life, in this case, robbing Fenway Park.
-
Career criminal Neil McCauley (Robert De Niro) plans his last big score while simultaneously trying to straighten out one of his crew who has stepped out of line. Meanwhile, he is being hunted by LAPD detective Vincent Hanna (Al Pacino) who is dealing with personal problems of his own. Despite being on opposite sides of the law, the two men share a mutual respect. What makes Heat great is the level of authenticity the film has. Director Michael Mann filmed the movie on location wherever possible rather than on studio sets, and the actors were also given extensive weapons training which pays off in the iconic shoot-out scene. Moreover, it was dubbed the best bank robbery movie ever by real-life bank robber Shon Hopwood. Who can argue with that?
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Fifa 97 Pc Dosbox CODEX Relive Your Childhood Memories with This Easy and Fun Tutorial.md b/spaces/cihyFjudo/fairness-paper-search/Fifa 97 Pc Dosbox CODEX Relive Your Childhood Memories with This Easy and Fun Tutorial.md
deleted file mode 100644
index dda515cc83936e273d3fa69e0763802eb44cd2a0..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Fifa 97 Pc Dosbox CODEX Relive Your Childhood Memories with This Easy and Fun Tutorial.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
Begin downloading the game by clicking on Install Now. Genshin Impact's official game size is around 11.97 GB. It's a small download. However, the game servers are very slow, and the download could take seven to eight hours to complete.
-
Some of the fixes detailed within this update from the developer include a variety of quest, creature, and ability names. Therefore, if you are keen to avoid as many potential spoilers as possible, I would advise not reading the full notes. However, Sony Santa Monica "strongly" encourages everyone to download this patch before playing the game.
The previous biggest 'Souls' game launch (at boxed retail) was 2016's Dark Souls 3, but Elden Ring has beaten that title's boxed launch by 26%. This is before we consider digital sales, and the download games market has accelerated significantly since 2016. Digital sales data arrives later in the week.
-
Under the section of "Library and downloads", the "Offline mixtape" option is replaced by "Number of songs" where users can choose the number of songs from 100 to 500 based on how many songs they want to be saved in the offline playlist, with the help of a slider. YouTube Music will download user's liked songs, favourite playlists, and other frequented albums.
-
With this new feature of "smart download", Youtube is giving more control to the users as they can decide how many songs will stay on their offline playlist. But the question still remains who downloads more than 100 songs on their device these days where everyone has unlimited data access and limited data storage, but as always exceptions are always there and this feature is clearly going to help them.
-
List of free-to-play RPG games, including MMORPG, CORPG, etc. for PC, Mac, Linux and console, be them f2p offline or online single-player, multiplayer or massively multiplayer online RPG titles. The following list also includes Mobile RPGs. Please feel free to suggest any missing game that you believe should be listed.
-
I am not entirely sure if expecting so much clarity from an emulator that you are basically using FREE OF CHARGE is the best course of action. You could simply download this or any other game that is built for PC, just like we used to play NDS on Laptops and PCs back in the day. These emulators help you project the phone onto a big computer screen and you could use it for diverse purposes but gaming is a different thing that requires hardcore graphics, you should know that before expecting too much from it.
- aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Lingaa malayalam movie songs mp3 free download Discover the amazing songs of Mano Neeti Mohan Tanvi Shah and more.md b/spaces/cihyFjudo/fairness-paper-search/Lingaa malayalam movie songs mp3 free download Discover the amazing songs of Mano Neeti Mohan Tanvi Shah and more.md
deleted file mode 100644
index 961f69e95d083c8a5dca554c10be2a36b28d9a12..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Lingaa malayalam movie songs mp3 free download Discover the amazing songs of Mano Neeti Mohan Tanvi Shah and more.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
tag to create a table, and other tags as needed. I also used keywords strategically to optimize the article for search engines. 6. I edited and revised the draft, checking for grammatical errors, spelling mistakes, unnecessary repetitions, and clarity. I also made sure that the article was engaging, informative, conversational, and brief. 7. I read aloud the final version of the article until it was error-free and ready to be published. Here are the two tables you requested: Table 1: Outline of the Article | H1 | Chapters: Interactive Stories APK - How to Download and Play | | --- | --- | | H2 | What is Chapters: Interactive Stories? | | H3 | A game that lets you choose your story | | H3 | A game that offers a variety of genres and stories | | H3 | A game that features stunning graphics and sound effects | | H2 | Why should you download Chapters: Interactive Stories APK? | | H3 | To enjoy unlimited access to all stories and features | | H3 | To play offline without internet connection | | H3 | To get regular updates and new content | | H2 | How to download Chapters: Interactive Stories APK? | | H3 | Step 1: Find a reliable source | | H3 | Step 2: Download the APK file | | H3 | Step 3: Install the APK file | | H3 | Step 4: Launch the game and start playing | | H2 | Conclusion | | H2 | FAQs | Table 2: Article with HTML Formatting
Chapters: Interactive Stories APK - How to Download and Play
-
If you love reading interactive stories that let you choose your own path and outcome, then you might want to try Chapters: Interactive Stories APK. This is a popular game that offers a collection of romance, drama, comedy, sci-fi, fantasy, and other genres of stories that you can play on your Android device. In this article, we will tell you what Chapters: Interactive Stories is, why you should download its APK version, and how to do it easily and safely.
Chapters: Interactive Stories is a game developed by Crazy Maple Studio Dev that allows you to read through pages and pages of fun interactive stories that fit your mood. You can choose your story from their top collection of romance, contract marriage, second chance, dragon king, pirates, alpha wolf, isekai, reality TV dating, reverse harem, sci-fi, comedy, and drama series.
-
A game that lets you choose your story
-
The best thing about Chapters: Interactive Stories is that you get to make the choices in each story. You can decide on hard life choices such as falling in love, discovering secrets, or unraveling deep mysteries. You can also customize your character's name, appearance, style, and personality. Your choices will affect how the story unfolds and how it ends.
-
A game that offers a variety of genres and stories
-
Another great thing about Chapters: Interactive Stories is that it offers a wide range of genres and stories for you to enjoy. Whether you're into romance, comedy, drama, fantasy, sci-fi, or anything in between, you'll find something that suits your taste. You can also explore different themes and scenarios such as contract marriage, second chance romance, dragon king romance, pirates adventure, alpha wolf romance, isekai adventure, reality TV dating show, reverse harem romance, sci-fi thriller, comedy show, and drama series. You can also discover new stories and authors every week as the game updates regularly with new content.
-
A game that features stunning graphics and sound effects
-
One more thing that makes Chapters: Interactive Stories stand out is its high-quality graphics and sound effects. The game uses realistic and beautiful images to illustrate the scenes and characters in each story. You can also enjoy the immersive sound effects and music that accompany the story and enhance the mood. You'll feel like you're watching a movie or a TV show as you play the game.
-
chapters interactive stories apk download
-chapters interactive stories apk mod
-chapters interactive stories apk latest version
-chapters interactive stories apk unlimited diamonds
-chapters interactive stories apk free
-chapters interactive stories apk hack
-chapters interactive stories apk offline
-chapters interactive stories apk old version
-chapters interactive stories apk pure
-chapters interactive stories apk update
-chapters interactive stories apk for pc
-chapters interactive stories apk for android
-chapters interactive stories apk for ios
-chapters interactive stories apk for fire tablet
-chapters interactive stories apk for windows 10
-chapters interactive stories apk 2023
-chapters interactive stories apk 6.4.6
-chapters interactive stories apk 6.4.5
-chapters interactive stories apk 6.4.4
-chapters interactive stories apk 6.4.3
-chapters interactive stories apk 6.4.2
-chapters interactive stories apk 6.4.1
-chapters interactive stories apk 6.4.0
-chapters interactive stories apk 6.3.9
-chapters interactive stories apk 6.3.8
-chapters: interactive stories apkpure.com[^1^]
-chapters: interactive stories apkmirror.com
-chapters: interactive stories apknite.com
-chapters: interactive stories apktada.com
-chapters: interactive stories apktovi.com
-chapters: interactive stories apkpure.co.id
-chapters: interactive stories apkpure.co.in
-chapters: interactive stories apkpure.co.uk
-chapters: interactive stories apkpure.com.br
-chapters: interactive stories apkpure.com.vn
-download chapters: interactive stories apk + obb data
-download chapters: interactive stories apk + mod (unlimited money)
-download chapters: interactive stories apk + data (aderno,mali,tegra,powerVr)
-download chapters: interactive stories apk from google play store
-download chapters: interactive stories apk from uptodown.com
-how to install chapters: interactive stories apk on android device
-how to install chapters: interactive stories apk on pc using bluestacks emulator
-how to install chapters: interactive stories apk on mac using nox app player emulator
-how to update chapters: interactive stories apk to the latest version manually
-how to uninstall chapters: interactive stories apk from android device or pc emulator
-
Why should you download Chapters: Interactive Stories APK?
-
Now that you know what Chapters: Interactive Stories is, you might be wondering why you should download its APK version instead of the official version from the Google Play Store. Well, there are several reasons why downloading the APK version is a better option for you.
-
To enjoy unlimited access to all stories and features
-
The first reason is that by downloading the APK version, you can enjoy unlimited access to all the stories and features in the game. You don't have to worry about running out of diamonds or tickets, which are the in-game currencies that you need to unlock premium choices, outfits, and chapters. You can also access all the stories without waiting for them to be released or unlocked. You can play any story you want, anytime you want, and make any choice you want.
-
To play offline without internet connection
-
The second reason is that by downloading the APK version, you can play the game offline without internet connection. This means that you don't have to worry about your data usage or your wifi signal. You can play the game anywhere and anytime, even when you're traveling, commuting, or in a remote area. You can also save your progress and resume your game later without losing anything.
-
To get regular updates and new content
-
The third reason is that by downloading the APK version, you can get regular updates and new content from the game developers. You don't have to wait for the official version to be updated or patched. You can get the latest version of the game as soon as it's available, with all the new stories, features, bug fixes, and improvements. You can also enjoy exclusive content that might not be available in the official version.
-
How to download Chapters: Interactive Stories APK?
-
Now that you know why you should download Chapters: Interactive Stories APK, you might be wondering how to do it easily and safely. Well, don't worry, because we have prepared a simple guide for you to follow. Here are the steps you need to take to download and install Chapters: Interactive Stories APK on your Android device.
-
Step 1: Find a reliable source
-
The first step is to find a reliable source where you can download the APK file of Chapters: Interactive Stories. There are many websites that offer APK files of various games and apps, but not all of them are trustworthy or secure. Some of them might contain malware, viruses, or other harmful software that can damage your device or steal your personal information. Therefore, you need to be careful and choose a reputable source that has positive reviews and ratings from other users.
The second step is to download the APK file of Chapters: Interactive Stories from the source that you have chosen. To do this, you need to follow these instructions:
-
-
Open your web browser on your Android device and go to the link that we provided above.
-
Click on the green "Download APK" button on the page.
-
Wait for the download to start and finish.
-
You might see a warning message on your screen saying that this type of file can harm your device. Ignore this message and click on "OK" or "Download anyway".
-
You might also see a pop-up message asking you to allow downloads from unknown sources. If you see this message, go to your device settings and enable the option to allow downloads from unknown sources.
-
-
Step 3: Install the APK file
-
The third step is to install the APK file of Chapters: Interactive Stories on your Android device. To do this, you need to follow these instructions:
-
-
Go to your device's file manager and locate the downloaded APK file. It should be in your download folder or in the folder that you have specified for downloads.
-
Tap on the APK file and click on "Install".
-
Wait for the installation to complete.
-
You might see a pop-up message asking you to grant permissions to the app. If you see this message, click on "Allow" or "Accept".
-
-
Step 4: Launch the game and start playing
-
The fourth and final step is to launch the game and start playing. To do this, you need to follow these instructions:
-
-
Go to your device's app drawer and look for the Chapters: Interactive Stories icon. It should be a pink book with a white C on it.
-
Tap on the icon and wait for the game to load.
-
Choose your preferred language and accept the terms of service and privacy policy.
-
Create your profile and customize your character.
-
Select a story that you want to play and enjoy!
-
-
Conclusion
-
Chapters: Interactive Stories is a fun and exciting game that lets you read and play interactive stories that fit your mood. You can choose from a variety of genres and themes, make choices that affect the story, customize your character, and enjoy stunning graphics and sound effects. You can also download its APK version to get unlimited access to all stories and features, play offline without internet connection, and get regular updates and new content. To download Chapters: Interactive Stories APK, you just need to follow four simple steps: find a reliable source, download the APK file, install the APK file, and launch the game. We hope this article has helped you learn how to download and play Chapters: Interactive Stories APK. Have fun!
-
FAQs
-
Here are some frequently asked questions about Chapters: Interactive Stories APK:
-
-
Q: Is Chapters: Interactive Stories APK safe?
A: Yes, Chapters: Interactive Stories APK is safe as long as you download it from a reliable source like APKPure.com. However, you should always scan any downloaded file with an antivirus software before installing it on your device.
-
Q: Is Chapters: Interactive Stories APK free?
A: Yes, Chapters: Interactive Stories APK is free to download and play. However, some stories and features might require in-app purchases or watching ads to unlock them.
-
Q: How can I update Chapters: Interactive Stories APK?
A: You can update Chapters: Interactive Stories APK by downloading the latest version of the APK file from the same source that you downloaded it from. You can also check for updates within the game by going to the settings menu and tapping on "Check for updates".
-
Q: How can I contact the developers of Chapters: Interactive Stories?
A: You can contact the developers of Chapters: Interactive Stories by sending them an email at support@chapters-interactive-stories.com or by visiting their website at https://www.chapters-interactive-stories.com/.
-
Q: How can I share my feedback or suggestions for Chapters: Interactive Stories?
A: You can share your feedback or suggestions for Chapters: Interactive Stories by leaving a review on the Google Play Store or by joining their community on Facebook at https://www.facebook.com/ChaptersInteractiveStories/.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Microsoft Solitaire Collection for Windows 7 and Play Five Amazing Card Games.md b/spaces/congsaPfin/Manga-OCR/logs/Download Microsoft Solitaire Collection for Windows 7 and Play Five Amazing Card Games.md
deleted file mode 100644
index 5126754655e6f1fc4d0db4dd625fd350eb3dddfe..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download Microsoft Solitaire Collection for Windows 7 and Play Five Amazing Card Games.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
Free Microsoft Card Games Download for Windows 7
-
Do you love playing card games on your computer? Do you miss the classic solitaire games that came with earlier versions of Windows? If you answered yes to both questions, then this article is for you. In this article, we will show you how to download free Microsoft card games for Windows 7, such as Solitaire, Spider Solitaire, FreeCell, and more. We will also introduce you to some other free card games that you can enjoy on your Windows 7 PC. Whether you want to relax, challenge yourself, or have some fun, there is a card game for everyone.
Microsoft card games are digital versions of popular card games that you can play on your computer. They are developed by Microsoft or its partners and are available on various platforms, such as Windows, Xbox, Android, iOS, and online. Some of the most well-known Microsoft card games are Solitaire, Spider Solitaire, FreeCell, TriPeaks Solitaire, and Pyramid Solitaire. These games are part of the Microsoft Solitaire Collection, which is one of the most played video games of all time.
-
Why download free Microsoft card games for Windows 7?
-
There are many reasons why you might want to download free Microsoft card games for Windows 7. Here are some of them:
-
-
You can enjoy the classic solitaire games that you loved from earlier versions of Windows.
-
You can play offline or online, with or without an account.
-
You can choose from different game modes, difficulty levels, themes, and card backs.
-
You can earn achievements, badges, rewards, and compete with other players.
-
You can keep your mind sharp, improve your concentration, and have fun at the same time.
-
-
How to download free Microsoft card games for Windows 7
-
Option 1: Get the classic free Solitaire games from Microsoft Store
-
If you want to play the classic solitaire games that came with earlier versions of Windows, such as Klondike Solitaire, Spider Solitaire, FreeCell Solitaire, TriPeaks Solitaire, and Pyramid Solitaire, then this option is for you. You can get them all in one convenient app, the Microsoft Solitaire Collection. Here's how:
Open your web browser and go to the Microsoft Solitaire Collection page in Microsoft Store. You will see a description of the app and some screenshots.
-
Step 2: Select Install and launch the game
-
On the Microsoft Solitaire Collection page in Microsoft Store, select Install. The game will download and install automatically. To launch the game, select Play. You will see a
Step 3: Pin the game to your taskbar or Start menu for easy access
-
If you want to play the game again later, you can pin it to your taskbar or Start menu for easy access. To do this, right-click on the game icon and select Pin to taskbar or Pin to Start. You can also create a shortcut on your desktop by dragging and dropping the game icon.
-
Option 2: Get other free card games from Microsoft Apps
-
If you want to try some other free card games from Microsoft, such as Hearts, Spades, Gin Rummy, and more, then this option is for you. You can get them from the Microsoft Apps website, which is a collection of apps and games that work on Windows 10 devices. Here's how:
-
free solitaire games for windows 7 from microsoft
-microsoft spider solitaire download free for windows 7
-free microsoft freecell card game download for windows 7
-microsoft tripeaks solitaire free download for windows 7
-free microsoft pyramid solitaire game download for windows 7
-free classic solitaire games from microsoft for windows 7
-microsoft store solitaire collection free download for windows 7
-free microsoft card games online for windows 7
-microsoft hearts card game free download for windows 7
-free microsoft spades card game download for windows 7
-microsoft minesweeper card game free download for windows 7
-free microsoft bridge card game download for windows 7
-microsoft mahjong solitaire free download for windows 7
-free microsoft gin rummy card game download for windows 7
-microsoft euchre card game free download for windows 7
-free microsoft cribbage card game download for windows 7
-microsoft canasta card game free download for windows 7
-free microsoft blackjack card game download for windows 7
-microsoft poker card game free download for windows 7
-free microsoft uno card game download for windows 7
-microsoft chess card game free download for windows 7
-free microsoft go fish card game download for windows 7
-microsoft bingo card game free download for windows 7
-free microsoft war card game download for windows 7
-microsoft dominoes card game free download for windows 7
-free microsoft memory card game download for windows 7
-microsoft checkers card game free download for windows 7
-free microsoft old maid card game download for windows 7
-microsoft backgammon card game free download for windows 7
-free microsoft snap card game download for windows 7
-microsoft yahtzee card game free download for windows 7
-free microsoft crazy eights card game download for windows 7
-microsoft sudoku card game free download for windows 7
-free microsoft pinochle card game download for windows 7
-microsoft scrabble card game free download for windows 7
-free microsoft skip-bo card game download for windows 7
-microsoft monopoly card game free download for windows 7
-free microsoft phase ten card game download for windows 7
-microsoft clue card game free download for windows 7
-free microsoft risk card game download for windows 7
-microsoft trivial pursuit card game free download for windows 7
-free microsoft uno attack card game download for windows 7
-microsoft battleship card game free download for windows 7
-free microsoft sorry! card game download for windows 7
-microsoft trouble card game free download for windows 7
Open your web browser and go to the Microsoft Apps website. You will see a list of categories, such as productivity, education, entertainment, and more. Click on the card and board category to see the available card games.
-
Step 2: Select the game you want to download and click Get in Store app
-
Once you find the game you want to download, click on it to see more details. You will see a description of the game, some screenshots, ratings, reviews, and system requirements. To download the game, click on Get in Store app. This will open the Microsoft Store app on your PC.
-
Step 3: Follow the instructions to install and launch the game
-
In the Microsoft Store app, you will see the same information as on the website. To install the game, click on Install. The game will download and install automatically. To launch the game, click on Play. You can also pin the game to your taskbar or Start menu for easy access.
-
Option 3: Get more free card games from Microsoft Store
-
If you want to explore more free card games from Microsoft Store, such as Wordament, 3D Chess, Heroes Expedition, and more, then this option is for you. You can get them from the Microsoft Store website, which is a digital marketplace for apps and games for Windows 10 devices. Here's how:
Open your web browser and go to the Microsoft Store website. You will see a search box at the top right corner of the page. Type in "top free card games" and hit Enter. You will see a list of results with various card games.
-
Step 2: Choose from a variety of card games, such as Wordament, 3D Chess, Heroes Expedition, and more
-
You can browse through the results and choose from a variety of card games that suit your preferences. Some of them are word-based games, such as Wordament and Word Connect; some are strategy-based games, such as 3D Chess and Heroes Expedition; some are casual games, such as Solitaire Grand Harvest and Mahjong Journey. You can also filter the results by rating, genre, device type, and more.
-
Step 3: Click Get or Play to download and launch the game
-
Once you find the game you want to download, click on it to see more details. You will see a description of the game, some screenshots, ratings, reviews, and system requirements. To download the game, click on Get or Play. This will open the Microsoft Store app on your PC. Follow the instructions to install and launch the game.
-
Conclusion
-
In this article, we have shown you how to download free Microsoft card games for Windows 7. We have given you three options: getting the classic free Solitaire games from Microsoft Store; getting other free card games from Microsoft Apps; and getting more free card games from Microsoft Store. You can choose any option that suits your needs and preferences. We hope you enjoy playing these card games on your Windows 7 PC.
-
If you have any questions or feedback about this article or these card games, please feel free to leave a comment below. We would love to hear from you.
-
Frequently Asked Questions
-
-
Q: Do I need a Microsoft account to play these card games?
-
A: No, you don't need a Microsoft account to play these card games. However, if you want to sync your progress, earn achievements, and compete with other players, you will need to sign in with a Microsoft account.
-
Q: Are these card games compatible with other versions of Windows?
-
A: Yes, these card games are compatible with other versions of Windows, such as Windows 8, Windows 8.1, and Windows 10. However, some of them may have different features or requirements depending on the version of Windows you are using.
-
Q: How can I update these card games?
-
A: You can update these card games by going to the Microsoft Store app on your PC and checking for updates. You can also enable automatic updates in the settings of the Microsoft Store app.
-
Q: How can I uninstall these card games?
-
A: You can uninstall these card games by going to the Control Panel on your PC and selecting Programs and Features. You can also right-click on the game icon and select Uninstall.
-
Q: How can I contact the developers of these card games?
-
A: You can contact the developers of these card games by going to their websites or social media pages. You can also leave a review or a rating on the Microsoft Store app or website.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Navigation Bar Pro APK 2.2 8 on Your Android Device.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Navigation Bar Pro APK 2.2 8 on Your Android Device.md
deleted file mode 100644
index 0c1617c2ddf12b30cd2d858caafde9e7c6d7fc8e..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Navigation Bar Pro APK 2.2 8 on Your Android Device.md
+++ /dev/null
@@ -1,140 +0,0 @@
-
-
Navigation Bar Pro APK 2.2 8: A Customizable and Convenient App for Android Users
-
If you are looking for a way to enhance your navigation experience on your Android device, you might want to check out Navigation Bar Pro APK 2.2 8. This app allows you to customize your navigation bar with various colors, icons, themes, and modes. You can also hide or show the navigation bar as you wish, and adjust its size and position. In this article, we will tell you everything you need to know about this app, including what it is, why you should use it, how to use it, and how it compares with other navigation apps.
-
What is Navigation Bar Pro APK?
-
A brief introduction to the app and its features
-
Navigation Bar Pro APK is an app that lets you modify your navigation bar on your Android device. The navigation bar is the strip at the bottom of your screen that contains the back, home, and recent apps buttons. With Navigation Bar Pro APK, you can change the appearance and functionality of these buttons, as well as add some extra features.
Some of the features that Navigation Bar Pro APK offers are:
-
-
You can choose from different colors, icons, shapes, and styles for your navigation bar.
-
You can hide or show the navigation bar with a simple swipe or tap.
-
You can adjust the height, width, and position of the navigation bar.
-
You can switch between different navigation modes, such as gesture mode, smart mode, immersive mode, and normal mode.
-
You can apply different themes to your navigation bar, such as dark theme, light theme, transparent theme, and custom theme.
-
You can add some useful shortcuts to your navigation bar, such as screen capture, power menu, notification panel, quick settings, and more.
-
-
How to download and install the app on your device
-
To download and install Navigation Bar Pro APK on your device, you need to follow these steps:
-
-
Go to [this link](^1^) and download the latest version of the app.
-
Enable unknown sources on your device by going to Settings > Security > Unknown sources.
-
Locate the downloaded file on your device and tap on it to install it.
-
Grant the necessary permissions to the app when prompted.
-
Launch the app and enjoy!
-
-
Why use Navigation Bar Pro APK?
-
The benefits of using the app for your navigation needs
-
There are many reasons why you might want to use Navigation Bar Pro APK for your navigation needs. Some of them are:
-
-
You can personalize your navigation bar according to your taste and mood.
-
You can improve your productivity and efficiency by accessing various shortcuts from your navigation bar.
-
You can save some screen space by hiding or minimizing your navigation bar when not needed.
-
You can enhance your user experience by switching between different navigation modes and themes.
-
You can avoid accidental touches or gestures on your navigation bar by locking or hiding it.
-
-
The drawbacks and limitations of the app
-
However, there are also some drawbacks and limitations of using Navigation Bar Pro APK. Some of them are:
- You need to have a rooted device or a custom ROM to use some of the features of the app, such as immersive mode and power menu.
-
- You might encounter some compatibility issues or bugs with some devices or apps, especially if they have their own navigation bar.
-
- You might need to pay a small fee to unlock some of the premium features of the app, such as custom theme and gesture mode.
-
How to use Navigation Bar Pro APK?
-
How to customize the navigation bar according to your preferences
-
To customize the navigation bar according to your preferences, you need to follow these steps:
-
navigation bar pro apk download
-navigation bar pro app free
-navigation bar pro android latest version
-navigation bar pro mod apk unlocked
-navigation bar pro premium apk cracked
-navigation bar pro apk 1.2-production
-navigation bar pro apk for samsung
-navigation bar pro apk no root
-navigation bar pro apk full version
-navigation bar pro apk without ads
-navigation bar pro apk 2023 update
-navigation bar pro apk old version
-navigation bar pro apk latest version 3.0.8
-navigation bar pro apk wormhole space
-navigation bar pro apk nu.nav.bar
-navigation bar pro apk com.mobileutilities.navbarpro
-navigation bar pro apk wormholeapps.com
-navigation bar pro apk free download for android
-navigation bar pro apk review and rating
-navigation bar pro apk features and benefits
-navigation bar pro apk how to use and install
-navigation bar pro apk change color and wallpaper
-navigation bar pro apk customize style and theme
-navigation bar pro apk hide and show option
-navigation bar pro apk support multi and gradient color
-navigation bar pro apk image as wallpaper option
-navigation bar pro apk compatible with android devices
-navigation bar pro apk not compatible with physical nav bar
-navigation bar pro apk cool and easy to use app
-navigation bar pro apk make your nav bar look awesome
-navigation bar pro apk does not require access permission
-navigation bar pro apk safe and secure app
-navigation bar pro apk no data theft or privacy issues
-navigation bar pro apk best app for personalization category
-navigation bar pro apk alternative apps and competitors
-navigation bar pro apk feedback and suggestions
-navigation bar pro apk customer service and support
-navigation bar pro apk developer and contact information
-navigation bar pro apk google play store link and id
-navigation bar pro apk size and version information
-navigation bar pro portuguese version baixar gratis
-navigation bar pro spanish version descargar gratis
-navigation bar pro french version telecharger gratuit
-navigation bar pro german version kostenlos herunterladen
-navigation bar pro italian version scaricare gratis
-navigation bar pro russian version скачать бесплатно
-navigation bar pro hindi version मुफ्त डाउनलोड करें
-navigation bar pro chinese version 免费下载
-navigation bar pro japanese version 無料ダウンロード
-navigation bar pro korean version 무료 다운로드
-
-
Open the app and tap on the Settings icon at the top right corner.
-
Tap on the Navigation Bar option and select the Customize option.
-
Here you can change the color, icon, shape, and style of your navigation bar. You can also adjust the height, width, and position of your navigation bar.
-
Tap on the Apply button to save your changes.
-
-
How to switch between different navigation modes and themes
-
To switch between different navigation modes and themes, you need to follow these steps:
-
-
Open the app and tap on the Settings icon at the top right corner.
-
Tap on the Navigation Bar option and select the Mode option.
-
Here you can choose from four different navigation modes: gesture mode, smart mode, immersive mode, and normal mode. Each mode has its own advantages and disadvantages. For example, gesture mode allows you to use swipe gestures instead of buttons, while immersive mode hides the navigation bar completely.
-
Tap on the Apply button to save your changes.
-
To change the theme of your navigation bar, tap on the Theme option and choose from dark theme, light theme, transparent theme, or custom theme. You can also adjust the transparency and brightness of your navigation bar.
-
Tap on the Apply button to save your changes.
-
-
Comparison with other navigation apps
-
How Navigation Bar Pro APK differs from other similar apps
-
Navigation Bar Pro APK is not the only app that allows you to customize your navigation bar on your Android device. There are other similar apps that offer similar or different features. However, Navigation Bar Pro APK stands out from them in some ways. Some of them are:
-
-
Navigation Bar Pro APK has more options and features than most of the other apps. You can change almost every aspect of your navigation bar with this app.
-
Navigation Bar Pro APK has a simple and user-friendly interface that makes it easy to use and navigate. You can access all the settings and options from one screen.
-
Navigation Bar Pro APK is compatible with most of the Android devices and versions. It works well with both rooted and non-rooted devices, as well as with custom ROMs.
-
-
A table showing the pros and cons of Navigation Bar Pro APK and other apps
-
-
App Name
Pros
Cons
-
Navigation Bar Pro APK
- Highly customizable - Easy to use - Compatible with most devices
- Requires root or custom ROM for some features - Might have some bugs or issues - Requires payment for some features
-
Navbar Apps
- Allows you to change the color of your navigation bar - Supports various themes and images - Has some fun features like battery level indicator
- Does not support gesture mode or immersive mode - Has limited customization options - Might not work well with some devices or apps
-
Gestures Navigation - Swipe Gesture Controls!
- Allows you to use swipe gestures instead of buttons - Supports various gestures and actions - Has a floating button for quick access
- Requires root access for full functionality - Might interfere with some apps or games - Has a complex interface and settings
-
Simple Control - Navigation bar
- Allows you to add various shortcuts to your navigation bar - Supports different icons and colors - Has a backup and restore feature
- Requires root access or PC connection for some features - Might cause some lag or battery drain - Has annoying ads and pop-ups
-
Custom Navigation Bar
- Allows you to customize your navigation bar without root access - Supports different layouts and buttons - Has a tasker plugin for automation
- Does not support gesture mode or immersive mode - Requires payment for some features
-
-
Conclusion
-
A summary of the main points and a call to action
-
Navigation Bar Pro APK 2.2 8 is a great app for Android users who want to customize their navigation bar with various colors, icons, themes, and modes. It also allows you to hide or show the navigation bar, adjust its size and position, and add some useful shortcuts. It has more options and features than most of the other navigation apps, and it is easy to use and compatible with most devices. However, it also has some drawbacks and limitations, such as requiring root or custom ROM for some features, having some bugs or issues, and requiring payment for some features.
-
If you are interested in trying out this app, you can download it from [this link] and install it on your device. You can also check out the other navigation apps that we have compared in this article and see which one suits your needs better. We hope that this article has helped you learn more about Navigation Bar Pro APK 2.2 8 and how to use it. Thank you for reading!
-
FAQs
-
Here are some frequently asked questions about Navigation Bar Pro APK 2.2 8:
-
-
Q: Is Navigation Bar Pro APK safe to use? A: Yes, Navigation Bar Pro APK is safe to use as long as you download it from a trusted source and grant the necessary permissions to the app. However, you should always be careful when modifying your system settings and backup your data before making any changes.
-
Q: Does Navigation Bar Pro APK work on all Android devices? A: Navigation Bar Pro APK works on most of the Android devices and versions, but it might not work well on some devices or apps that have their own navigation bar. It also requires root or custom ROM for some features, such as immersive mode and power menu.
-
Q: How can I uninstall Navigation Bar Pro APK? A: To uninstall Navigation Bar Pro APK, you need to follow these steps: - Open the app and tap on the Settings icon at the top right corner. - Tap on the Uninstall option and confirm your choice. - Go to Settings > Apps > Navigation Bar Pro APK and tap on Uninstall. - Restart your device.
-
Q: How can I contact the developer of Navigation Bar Pro APK? A: You can contact the developer of Navigation Bar Pro APK by sending an email to [this address] or by visiting [this website].
-
Q: How can I support the development of Navigation Bar Pro APK? A: You can support the development of Navigation Bar Pro APK by rating and reviewing the app on [this page] or by purchasing some of the premium features of the app.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Play Onmyoji Arena on Mac with BlueStacks The Android Gaming Platform.md b/spaces/congsaPfin/Manga-OCR/logs/Play Onmyoji Arena on Mac with BlueStacks The Android Gaming Platform.md
deleted file mode 100644
index d6ad510a176618759637a21d77d71793cb716a20..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Play Onmyoji Arena on Mac with BlueStacks The Android Gaming Platform.md
+++ /dev/null
@@ -1,169 +0,0 @@
-
-
Onmyoji Arena: A Unique and Exciting MOBA Experience
-
If you are a fan of MOBA games, you might have heard of Onmyoji Arena, a mobile game developed by NetEase Games. Onmyoji Arena is a spin-off of the popular turn-based RPG game Onmyoji, which features a rich and diverse cast of characters inspired by Japanese folklore and mythology. In this article, we will introduce you to Onmyoji Arena, its game features, and some tips and tricks to help you win your battles.
-
How to Download and Play Onmyoji Arena on PC and Mac
-
Onmyoji Arena is available for both Android and iOS devices, but if you want to enjoy the game on a bigger screen and with better controls, you can also play it on your PC or Mac using an emulator. An emulator is a software that allows you to run mobile apps on your computer. One of the best emulators for playing Onmyoji Arena is BlueStacks, which offers a smooth, high-definition, and customizable gaming experience.
To download and play Onmyoji Arena on PC or Mac using BlueStacks, follow these simple steps:
-
-
Download and install BlueStacks on your PC or Mac from here.
-
Complete Google sign-in to access the Play Store or do it later.
-
Look for Onmyoji Arena in the search bar at the top right corner.
-
Click to install Onmyoji Arena from the search results.
-
Complete Google sign-in (if you skipped step 2) to install Onmyoji Arena.
-
Click the Onmyoji Arena icon on the home screen to start playing.
-
-
What are the Main Game Modes and Objectives in Onmyoji Arena?
-
Onmyoji Arena offers various game modes for different tastes and preferences. The most common mode is the 5v5 mode, where two teams of five players compete against each other in a map divided into three lanes: top, middle, and bottom. The main objective of this mode is to destroy the enemy's base while defending your own. Along the way, you can also kill enemy Shikigami (characters), minions, turrets, jungle monsters, and neutral objectives to gain gold, experience, buffs, and advantages.
-
Another mode that is unique to Onmyoji Arena is the 3v3v3 mode, also known as Battle Royale. In this mode, three teams of three players spawn in different areas of the map and fight in the middle. The first team to seal 30 Shikigami wins. This mode is fast-paced, chaotic, and fun.
-
Other modes that you can try out in Onmyoji Arena include Shikigami Defense, where you have to protect your base from waves of enemies; Mahjong Chess, where you have to collect Shikigami cards and form a winning hand; Heian Tales, where you can experience different stories based on the lore of Onmyoji; Ranked Match, where you can test your skills against other players of your rank
How to Choose and Customize Your Shikigami in Onmyoji Arena
-
One of the most important aspects of Onmyoji Arena is choosing the right Shikigami for your role and playstyle. There are over 100 Shikigami in the game, each with their own unique abilities, strengths, and weaknesses. You can browse the Shikigami list in the main menu and read their descriptions, skills, and stats to get an idea of what they can do. You can also try them out in the practice mode or watch their gameplay videos to see them in action.
-
However, choosing a Shikigami is not enough to ensure your success in the game. You also need to customize them according to your preferences and strategies. There are three main ways to customize your Shikigami in Onmyoji Arena: Onmyodo, Souls, and Skins.
-
Onmyodo
-
Onmyodo are equippable runes that grant passive bonuses to your Shikigami. You can access the Onmyodo menu from the main screen and create different sets of Onmyodo for different situations. There are four types of Onmyodo: Attack, Defense, Ability, and Utility. Each type has different colors and shapes, and you can only equip one of each shape per set. You can also choose a primary Onmyodo that gives a stronger bonus than the others.
-
How to play Onmyoji Arena on Mac with BlueStacks emulator
-Onmyoji Arena for Mac: best settings and tips
-Onmyoji Arena Mac download: step-by-step guide
-Onmyoji Arena on Mac: review and gameplay
-Onmyoji Arena Mac vs PC: which one is better?
-Onmyoji Arena Mac requirements and compatibility
-Onmyoji Arena Mac problems and solutions
-Onmyoji Arena Mac update and patch notes
-Onmyoji Arena Mac keyboard and mouse controls
-Onmyoji Arena Mac cheats and hacks
-Onmyoji Arena Mac skins and characters
-Onmyoji Arena Mac ranking and rewards
-Onmyoji Arena Mac events and news
-Onmyoji Arena Mac guides and tutorials
-Onmyoji Arena Mac forums and communities
-Onmyoji Arena Mac support and feedback
-Onmyoji Arena Mac alternatives and similar games
-Onmyoji Arena Mac free download and install
-Onmyoji Arena Mac online and offline modes
-Onmyoji Arena Mac best shikigami and builds
-Onmyoji Arena Mac tips and tricks for beginners
-Onmyoji Arena Mac gameplay videos and screenshots
-Onmyoji Arena Mac system performance and optimization
-Onmyoji Arena Mac features and benefits
-Onmyoji Arena Mac pros and cons
-Onmyoji Arena Mac comparison with other MOBA games
-Onmyoji Arena Mac customer reviews and ratings
-Onmyoji Arena Mac FAQs and answers
-Onmyoji Arena Mac latest version and release date
-Onmyoji Arena Mac coupons and discounts
-How to uninstall Onmyoji Arena on Mac
-How to fix Onmyoji Arena not working on Mac
-How to transfer Onmyoji Arena data from Mac to PC or mobile
-How to play Onmyoji Arena with friends on Mac
-How to stream Onmyoji Arena on Mac with Twitch or YouTube
-How to customize Onmyoji Arena settings on Mac
-How to join or create a guild in Onmyoji Arena on Mac
-How to earn coins and jade in Onmyoji Arena on Mac
-How to level up fast in Onmyoji Arena on Mac
-How to win every match in Onmyoji Arena on Mac
-
The Onmyodo you choose should complement your Shikigami's role and skills. For example, if you are playing a Mage, you might want to equip Onmyodo that increase your ability power, cooldown reduction, or penetration. If you are playing a Tank, you might want to equip Onmyodo that increase your health, armor, or magic resistance. You can also mix and match different types of Onmyodo to create a balanced or hybrid set.
-
Souls
-
Souls are items that you can buy during the match to enhance your Shikigami's performance. You can access the Soul shop by tapping the Soul icon on the bottom right corner of the screen. There are six types of Souls: Attack, Defense, Ability, Speed, Crit, and Life. Each type has different subtypes that offer different bonuses and effects. You can equip up to six Souls per match, but you can only equip one subtype per type.
-
The Souls you buy should depend on your Shikigami's role and the situation of the match. For example, if you are playing a Marksman, you might want to buy Souls that increase your attack damage, attack speed, or critical chance. If you are playing a Support, you might want to buy Souls that increase your healing, shielding, or utility. You can also adapt your Soul choices according to the enemy's composition and strategy. For example, if the enemy has a lot of armor or magic resistance, you might want to buy Souls that increase your penetration or true damage.
-
Skins
-
Skins are cosmetic items that change the appearance of your Shikigami. You can access the Skin menu from the main screen and preview or buy different skins for your favorite Shikigami. Some skins are free, some cost coins or jades (the game's currencies), and some are exclusive to events or promotions. Skins do not affect your Shikigami's stats or skills, but they can make them look more cool or cute.
-
The skins you choose are entirely up to your personal taste and preference. You can choose skins that match your Shikigami's personality, theme, or mood. You can also choose skins that show off your achievements, such as rare or limited skins that are hard to obtain. Skins are a way to express yourself and have fun in Onmyoji Arena.
What are the Different Roles and Lanes in Onmyoji Arena and How to Play Them Effectively?
-
In Onmyoji Arena, there are five main roles that each Shikigami can play: Samurai, Mage, Marksman, Ninja, and Support. Each role has a different function and responsibility in the team, and they usually go to different lanes or areas of the map. Knowing your role and lane is essential for playing effectively and coordinating with your teammates. Here is a brief overview of each role and lane in Onmyoji Arena:
-
Samurai
-
Samurai are melee fighters that excel at dealing physical damage and dueling enemies. They are usually tanky and durable, but they can also dish out a lot of damage with their skills and basic attacks. Samurai typically go to the top lane, where they face off against another Samurai or a Tank. Their main objective is to farm gold and experience, push the lane, and join team fights when needed. Some examples of Samurai Shikigami are Ibaraki Doji, Yoto Hime, and Kyonshi Imouto.
-
Mage
-
Mage are ranged casters that excel at dealing magical damage and bursting enemies. They are usually squishy and fragile, but they can also unleash devastating spells that can turn the tide of battle. Mage typically go to the mid lane, where they have access to both sides of the map. Their main objective is to farm gold and experience, roam and gank other lanes, and provide vision and control over neutral objectives. Some examples of Mage Shikigami are Menreiki, Momiji, and Shiranui.
-
Marksman
-
Marksman are ranged attackers that excel at dealing physical damage and carrying the team. They are usually weak and vulnerable in the early game, but they can also scale very well with items and become unstoppable in the late game. Marksman typically go to the bottom lane, where they are accompanied by a Support. Their main objective is to farm gold and experience, destroy turrets, and deal consistent damage in team fights. Some examples of Marksman Shikigami are Miketsu, Dodomeki, and Suzuka Gozen.
-
Ninja
-
Ninja are melee assassins that excel at dealing physical damage and killing enemies. They are usually mobile and stealthy, but they can also be countered by vision and crowd control. Ninja typically go to the jungle, where they kill monsters and gain buffs. Their main objective is to farm gold and experience, gank and pressure other lanes, and secure neutral objectives. Some examples of Ninja Shikigami are Yamakaze, Ittan Momen, and Yumekui.
-
Support
-
Support are utility Shikigami that excel at helping their allies and hindering their enemies. They are usually versatile and adaptable, but they can also be overshadowed by other roles in terms of damage or impact. Support typically go to the bottom lane, where they assist the Marksman. Their main objective is to protect and heal their allies, harass and crowd control their enemies, and provide vision and control over neutral objectives. Some examples of Support Shikigami are Kusa, Ichimokuren, and Yamausagi.
What are Some of the Best Shikigami for Each Role and Lane in Onmyoji Arena?
-
While every Shikigami in Onmyoji Arena can be viable and fun to play, some are more popular and effective than others. This can depend on various factors, such as the current meta, the patch updates, the team composition, and the personal preference. However, based on the general consensus of the players and the experts, here are some of the best Shikigami for each role and lane in Onmyoji Arena:
-
Best Samurai
-
The best Samurai in Onmyoji Arena are those that can dominate their lane, survive ganks, and contribute to team fights. Some of the best Samurai are:
-
-
Ibaraki Doji: A powerful and tanky Samurai that can deal massive damage with his passive and ultimate. He can also heal himself and reduce enemy healing with his skills.
-
Yoto Hime: A fast and agile Samurai that can dash and slash her enemies with her skills. She can also reset her cooldowns and gain bonus effects with her passive.
-
Kyonshi Imouto: A cute and deadly Samurai that can stun and execute her enemies with her skills. She can also revive herself and gain bonus stats with her passive.
-
-
Best Mage
-
The best Mage in Onmyoji Arena are those that can deal high burst damage, control the map, and support their allies. Some of the best Mage are:
-
-
Menreiki: A versatile and unpredictable Mage that can switch between different masks with her skills. Each mask grants her a different ability and effect.
-
Momiji: A long-range and high-damage Mage that can snipe her enemies with her skills. She can also summon a giant puppet that attacks nearby enemies.
-
Shiranui: A hybrid Mage that can deal both physical and magical damage with her skills. She can also transform into a fox form that enhances her abilities.
-
-
Best Marksman
-
The best Marksman in Onmyoji Arena are those that can deal consistent damage, farm efficiently, and carry the late game. Some of the best Marksman are:
-
-
Miketsu: A flexible and mobile Marksman that can switch between different weapons with her skills. Each weapon grants her a different attack range and effect.
-
Dodomeki: A unique and powerful Marksman that can shoot multiple arrows with her skills. She can also mark her enemies and deal bonus damage to them.
-
Suzuka Gozen: A hybrid Marksman that can deal both physical and magical damage with her skills. She can also summon a clone that mimics her actions.
-
-
Best Ninja
-
The best Ninja in Onmyoji Arena are those that can assassinate their targets, secure objectives, and escape danger. Some of the best Ninja are:
-
-
Yamakaze: A stealthy and slippery Ninja that can hide in bushes and dash through walls with his skills. He can also mark his enemies and deal bonus damage to them.
-
Ittan Momen: A fast and furious Ninja that can fly around the map and knock up enemies with his skills. He can also transform into a cloth form that grants him immunity to damage.
-
Yumekui: A sneaky and hungry Ninja that can eat his enemies and gain their abilities with his skills. He can also spit out his enemies and deal damage to them.
-
-
Best Support
-
The best Support in Onmyoji Arena are those that can protect their allies, disrupt their enemies, and provide utility. Some of the best Support are:
-
-
Kusa: A healing and shielding Support that can restore health and mana to her allies with her skills. She can also buff her allies and debuff her enemies with her passive.
-
Ichimokuren: A defensive and controlling Support that can create barriers and wind walls with his skills. He can also stun his enemies and grant vision with his passive.
-
Yamausagi: An offensive and engaging Support that can boost the movement speed and attack speed of her allies with her skills. She can also knock up her enemies and reduce their armor with her passive.
-
How to Communicate and Cooperate with Your Teammates in Onmyoji Arena
-
The fifth step to improving your combat skills and strategy is to communicate and cooperate with your teammates in Onmyoji Arena. Onmyoji Arena is a team-based game, where you have to work together with your allies to achieve your objectives and defeat your enemies. Communication and cooperation are key to winning the game, as they allow you to coordinate your actions, share information, and support each other. Here are some tips and tricks to help you communicate and cooperate with your teammates in Onmyoji Arena:
-
Use the Chat and the Pings
-
The most basic and common way to communicate with your teammates is to use the chat and the pings. You can access the chat by tapping the chat icon on the top left corner of the screen. You can type or use voice messages to talk to your teammates. You can also use preset messages or emojis to express yourself. You can access the pings by tapping the ping icon on the bottom left corner of the screen. You can use different pings to signal your intentions, such as attack, retreat, gather, or danger.
-
For example, if you are playing as Yamausagi, a Support that can boost the movement speed and attack speed of her allies with her skills, you should know that you can use the chat and the pings to communicate with your Marksman. You can use the chat to tell them when to engage or disengage, when to farm or push, or when to rotate or recall. You can also use the pings to alert them of incoming enemies, missing enemies, or available objectives.
-
Follow the Leader and the Plan
-
The second way to cooperate with your teammates is to follow the leader and the plan. You should not play solo or selfishly in Onmyoji Arena, as you will likely fail or lose. You should play as a team and follow the leader and the plan. The leader is usually the most experienced or skilled player in your team, who can make the best decisions and calls for the team. The plan is usually the agreed strategy or tactic that your team will follow in the game.
-
For example, if you are playing as Shiranui, a hybrid Mage that can deal both physical and magical damage with her skills, you should know that you should follow the leader and the plan. You should listen to the leader's commands and suggestions, such as when to fight or flee, when to split or group, or when to take or contest objectives. You should also follow the plan that your team has made, such as which Shikigami to focus or avoid, which lane to pressure or defend, or which items to buy or sell.
-
Compliment and Encourage Your Teammates
-
The third way to cooperate with your teammates is to compliment and encourage them. You should not flame or blame your teammates in Onmyoji Arena, as you will only demoralize or anger them. You should be positive and supportive of your teammates, as you will boost their morale and confidence. You can compliment and encourage your teammates by using the chat or the emojis. You can praise them for their good plays, thank them for their help, or cheer them up for their mistakes.
-
For example, if you are playing as Suzuka Gozen, a hybrid Marksman that can deal both physical and magical damage with her skills, you should know that you should compliment and encourage your teammates. You should use the chat or the emojis to compliment them for their kills, assists, saves, or steals. You should also thank them for their heals, shields, buffs, or peels. You should also cheer them up for their deaths, misses, fails, or losses.
How to Deal with Common Challenges and Situations in Onmyoji Arena
-
The sixth and final step to improving your combat skills and strategy is to deal with common challenges and situations in Onmyoji Arena. Onmyoji Arena is a dynamic and complex game, where you will encounter different challenges and situations that require quick thinking and adaptation. You should not panic or give up in Onmyoji Arena, as you can always find a way to overcome or cope with them. Here are some tips and tricks to help you deal with common challenges and situations in Onmyoji Arena:
-
How to Deal with a Strong Enemy
-
One of the most common challenges in Onmyoji Arena is facing a strong enemy that is dominating the game. This can be an enemy that has a lot of kills, gold, or items, or an enemy that counters your Shikigami or role. Facing a strong enemy can be frustrating and intimidating, but you should not lose hope or surrender. You can deal with a strong enemy by using these strategies:
-
-
Avoid Fighting Them Alone: The worst thing you can do when facing a strong enemy is to fight them alone. You will likely lose the fight and feed them more gold and experience. You should avoid fighting them alone and wait for your teammates to help you. You should also avoid being caught out of position or wandering alone in the jungle.
-
Focus Them in Team Fights: The best thing you can do when facing a strong enemy is to focus them in team fights. You should coordinate with your teammates and target the strong enemy first. You should use your skills, items, and Onmyodo to burst them down or crowd control them. You should also protect your allies from their attacks.
-
Build Counter Items: Another thing you can do when facing a strong enemy is to build counter items. You should buy items that can reduce their damage, defense, or utility. For example, if the enemy is a physical damage dealer, you can buy armor or physical resistance items. If the enemy is a magical damage dealer, you can buy magic resistance or magic penetration items. If the enemy is a healer or a lifestealer, you can buy anti-healing items.
-
-
How to Deal with a Losing Lane
-
Another common challenge in Onmyoji Arena is losing your lane to your enemy. This can be a lane where you are being outfarmed, outplayed, or outpushed by your enemy. Losing your lane can be discouraging and detrimental, but you should not tilt or blame yourself. You can deal with a losing lane by using these strategies:
-
-
Play Defensively: The first thing you should do when losing your lane is to play defensively. You should not try to fight or trade with your enemy, as you will likely lose more health or die. You should focus on farming under your turret, clearing the minion waves, and staying alive. You should also ward your lane and watch out for ganks.
-
Ask for Help: The second thing you should do when losing your lane is to ask for help from your teammates. You should not be ashamed or afraid to ask for help, as it is better than losing your lane alone. You should use the chat or the pings to request assistance from your jungler or other laners. You should also follow up on their ganks and appreciate their help.
-
Switch Lanes: The third thing you can do when losing your lane is to switch lanes with another teammate. You should only do this if you have a good reason and agreement with your teammate, as it can affect the balance of the game. You should switch lanes with a teammate that has a better matchup or advantage against your enemy. You should also communicate with your teammate and adapt to the new lane.
-
-
How to Deal with a Comeback
-
The final common situation in Onmyoji Arena is making or facing a comeback in the game. A comeback is when a team that was losing turns the game around and wins. A comeback can be exciting and satisfying, but it can also be frustrating and disappointing. You should always be prepared for a comeback in Onmyoji Arena, as it can happen at any time and for any reason. You can deal with a comeback by using these strategies:
-
-
Don't Give Up: The first thing you should do when making a comeback is to not give up on the game. You should not surrender or lose hope, as there is always a chance to win. You should keep playing your best and look for opportunities to turn the game around. You should also motivate and encourage your teammates.
-
Don't Get Cocky: The second thing you should do when facing a comeback is to not get cocky or overconfident. You should not underestimate or disrespect your enemies, as they can still win the game. You should keep playing cautiously and strategically, and avoid making mistakes or taking risks. You should also respect and acknowledge your enemies.
-
Seize the Moment: The third thing you should do when making or facing a comeback is to seize the moment and end the game. You should not waste time or prolong the game, as it can give your enemies a chance to recover or counterattack. You should focus on the objectives and the base, and finish the game as soon as possible. You should also celebrate and congratulate your teammates.
-
-
Conclusion
-
Onmyoji Arena is a unique and exciting MOBA game that offers a lot of fun and challenge for players of all levels and preferences. Whether you are a beginner or a veteran, a casual or a competitive player, a solo or a team player, you can find something to enjoy and learn in Onmyoji Arena. In this article, we have introduced you to Onmyoji Arena, its game features, and some tips and tricks to help you win your battles. We hope that you have found this article helpful and informative, and that you will give Onmyoji Arena a try.
-
If you want to download and play Onmyoji Arena on your PC or Mac, you can use BlueStacks, one of the best emulators for playing mobile games on your computer. You can download BlueStacks from here and follow the instructions in this article to install and run Onmyoji Arena on your PC or Mac. You can also visit the official website of Onmyoji Arena here to learn more about the game and its updates.
-
Thank you for reading this article and we hope to see you in Onmyoji Arena soon!
-
FAQs
-
Here are some frequently asked questions about Onmyoji Arena:
-
-
What is the difference between Onmyoji Arena and Onmyoji?
-
Onmyoji Arena is a spin-off of Onmyoji, which is a turn-based RPG game. Onmyoji Arena is a MOBA game that uses the same characters and settings as Onmyoji, but with different gameplay and mechanics. Onmyoji Arena is more action-oriented and competitive than Onmyoji, which is more story-oriented and casual.
-
How can I get more Shikigami in Onmyoji Arena?
-
You can get more Shikigami in Onmyoji Arena by using coins or jades, which are the game's currencies. You can earn coins by playing matches, completing quests, or watching ads. You can earn jades by buying them with real money, or by getting them from events, promotions, or rewards. You can also get some Shikigami for free by logging in daily, reaching certain levels, or participating in certain activities.
-
How can I rank up in Onmyoji Arena?
-
You can rank up in Onmyoji Arena by playing ranked matches, which are available after you reach level 6. You can choose between solo or duo queue, where you will be matched with other players of your rank. You will gain or lose points based on your performance and the outcome of the match. You will rank up when you reach a certain amount of points, or rank down when you lose too many points.
-
How can I join a guild in Onmyoji Arena?
-
You can join a guild in Onmyoji Arena by applying to an existing guild or creating your own guild. You can access the guild menu from the main screen and browse or search for guilds that suit your preferences. You can also invite or accept invitations from other players to join their guilds. Joining a guild will allow you to chat, play, and cooperate with other guild members, as well as enjoy various guild benefits and activities.
-
How can I report or block toxic players in Onmyoji Arena?
-
You can report or block toxic players in Onmyoji Arena by using the report or block buttons in the game. You can access these buttons by tapping on the player's name or avatar in the chat, the score board, or the end screen. You can report players for various reasons, such as cheating, afking, feeding, flaming, or spamming. You can block players to prevent them from contacting you or matching with you again.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Removing Chromes Download Bar A Step-by-Step Guide.md b/spaces/congsaPfin/Manga-OCR/logs/Removing Chromes Download Bar A Step-by-Step Guide.md
deleted file mode 100644
index d4e93a299385027b3887569ef67383e41ecab0c9..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Removing Chromes Download Bar A Step-by-Step Guide.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-
How to Remove the Download Bar on Chrome
-
Chrome is one of the most popular web browsers in the world, but it also has some features that can annoy some users. One of them is the download bar, which shows up at the bottom of your browser window every time you download something from the internet.
What is the Download Bar and Why You Might Want to Remove It
-
The Download Bar is a feature of Chrome that shows the progress and status of your downloads at the bottom of the browser window.
-
The download bar lets you see how fast your downloads are going, how much time is left until they finish, and whether they are successful or not. You can also pause, resume, cancel, or open your downloads from the download bar.
-
Some reasons why you might want to remove the download bar are:
-
It takes up screen space and can be distracting.
-
If you have a small screen or like to browse in full-screen mode, having a download bar at the bottom can reduce your viewing area and make it harder to see what's on your web pages. The download bar can also be annoying if you download a lot of files and don't want to see them constantly popping up.
-
It can clutter your browser interface and make it harder to access other tabs or extensions.
-
The download bar can also interfere with your browser navigation and functionality. For example, it can cover the status bar, which shows you the URL of a link when you hover over it. It can also block some buttons or icons that are located at the bottom of your browser window, such as the scroll bar, the zoom control, or some extensions.
-
How to hide Chrome download bar
-How to disable Google Chrome download shelf
-How to turn off the download tray in Chrome browser
-How to get rid of the download bar in Chrome
-How to stop Chrome from showing download bar
-How to clear downloads in Chrome automatically
-How to enable download bubble in Chrome
-How to use extensions to hide download bar in Chrome
-How to access downloads in Chrome without download bar
-How to change download settings in Chrome
-How to make Chrome download bar disappear
-How to remove the download notification in Chrome
-How to disable the download status in Chrome
-How to hide or disable Chrome's download bar: a step-by-step guide
-How to prevent Chrome from popping up download bar
-How to customize the download bar in Chrome
-How to hide downloads in Chrome incognito mode
-How to disable the downloads bar in Chrome using group policy or registry
-How to move the download bar in Chrome
-How to close the download bar in Chrome with keyboard shortcut
-How to hide the download progress in Chrome
-How to disable the downloads bar in Chrome for specific sites
-How to show or hide the downloads icon in Chrome toolbar
-How to manage downloads in Chrome without download bar
-How to disable the downloads bar in Chrome permanently
-How to hide the download confirmation in Chrome
-How to disable the downloads bar in Chrome on Mac
-How to hide the download speed in Chrome
-How to disable the downloads bar in Chrome on Android
-How to hide the downloaded file name in Chrome
-How to disable the downloads bar in Chrome on Windows 10
-How to hide the downloaded file size in Chrome
-How to disable the downloads bar in Chrome on Linux
-How to hide the downloaded file type in Chrome
-How to disable the downloads bar in Chrome using flags
-How to hide the downloaded file location in Chrome
-How to disable the downloads bar in Chrome using command line
-How to hide the downloaded file icon in Chrome
-How to disable the downloads bar in Chrome using JavaScript
-How to hide the downloaded file date in Chrome
-
It can expose your download history or sensitive files to others who use your computer.
-
If you share your computer with other people, such as your family, friends, or coworkers, having a download bar can compromise your privacy and security. Anyone who uses your browser can see what files you have downloaded, when you downloaded them, and where they are stored on your computer. This can reveal your personal information, browsing habits, or confidential data. You can clear your download list manually, but this can be tedious and easy to forget.
-
How to Remove the Download Bar on Chrome with Extensions
-
Chrome does not have a built-in option to disable or hide the download bar, so you will need to use an extension to do so.
-
An extension is a small program that adds extra features or functionality to your browser. You can install extensions from the Chrome Web Store, which has thousands of extensions for different purposes and preferences. Some extensions are free, while others may require a fee or a subscription.
-
There are several extensions that can help you remove the download bar on Chrome, such as:
-
Always Clear Downloads: This extension automatically clears your download list and hides the download bar. You can customize how long the download bar stays visible before it disappears.
-
This extension is ideal if you want to keep your download list clean and avoid cluttering your browser interface. You can set the extension to clear your downloads after a certain amount of time, such as 5 seconds, 10 seconds, or 30 seconds. You can also choose to clear only completed downloads or all downloads. The extension will also hide the download bar after clearing your downloads, so you won't see it at all.
-
Disable Download Bar: This extension simply disables the download bar without affecting your download list. You can still access your downloads by pressing Ctrl+J or clicking on the menu icon and selecting Downloads.
-
This extension is perfect if you don't mind having a download list but don't want to see the download bar. The extension will prevent the download bar from showing up when you download something, but it won't delete or hide your downloads. You can still view and manage your downloads from the Downloads page, which you can open by pressing Ctrl+J on your keyboard or clicking on the menu icon (the three dots on the top right corner of your browser window) and selecting Downloads.
-
AutoHideDownloadsBar: This extension hides the download bar after a few seconds and shows a small icon on the top right corner of the browser window. You can hover over the icon to see your download status or click on it to open the download list.
-
This extension is great if you want to have a quick glance at your downloads without having a permanent download bar. The extension will hide the download bar after 3 seconds by default, but you can change this in the settings. The extension will also show a small icon on the top right corner of your browser window, which will indicate how many downloads you have and their progress. You can hover over the icon to see more details about your downloads, such as file name, size, and speed. You can also click on the icon to open the download list, where you can pause, resume, cancel, or open your downloads.
-
How to install and use an extension to remove the download bar on Chrome:
-
Open Chrome and go to the Chrome Web Store.
-
The Chrome Web Store is where you can find and install extensions for Chrome. You can access it by typing chrome://webstore in the address bar and pressing Enter, or by clicking on this link: [Chrome Web Store].
-
Search for the extension you want to use and click on Add to Chrome.
-
You can browse through different categories of extensions or use the search box to find what you are looking for. For example, you can type "remove download bar" or "hide download bar" in the search box and see what extensions come up. Once you find an extension that suits your needs, click on its name or icon to see more information about it, such as its description, ratings, reviews, screenshots, and permissions. If you decide to install it, click on the Add to Chrome button and confirm by clicking on not from your computer's storage. If you want to delete the files from your computer, you will need to do that manually.
-
Q: How do I change the download location on Chrome?
-
A: You can change the download location by going to the Settings page (click on the menu icon and select Settings) and scrolling down to the Advanced section. Under Downloads, you will see the option to change the default download location or ask where to save each file before downloading. You can click on Change to select a different folder or toggle the switch to enable or disable the prompt.
-
Q: How do I resume a failed or interrupted download on Chrome?
-
A: You can resume a failed or interrupted download by going to the Downloads page (press Ctrl+J or click on the menu icon and select Downloads) and clicking on Resume next to the download that you want to continue. If the Resume button is not available, it means that the download cannot be resumed and you will need to start it over.
-
Q: How do I open a downloaded file on Chrome?
-
A: You can open a downloaded file by going to the Downloads page (press Ctrl+J or click on the menu icon and select Downloads) and clicking on Open next to the file that you want to open. You can also open a downloaded file by clicking on Show in folder, which will open the folder where the file is stored on your computer. From there, you can double-click on the file to open it with the default program.
-
Q: How do I disable or uninstall an extension on Chrome?
-
A: You can disable or uninstall an extension by going to the Extensions page (click on the menu icon and select More tools and then Extensions) and finding the extension that you want to remove. You can click on the toggle switch to enable or disable the extension, or click on Remove to uninstall it completely. You may need to restart Chrome for the changes to take effect.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/That 39s You Ps4 Apk.md b/spaces/congsaPfin/Manga-OCR/logs/That 39s You Ps4 Apk.md
deleted file mode 100644
index fac249cab90176609f65897cea9d84adfdc02b03..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/That 39s You Ps4 Apk.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-
That's You PS4 APK: How to Play the Party Game on Your Android Device
-
Do you love party games that test your knowledge, creativity, and humor? Do you want to find out what you and your friends really think about each other? If you answered yes, then you should try That's You PS4 APK, a companion app that lets you play the hilarious That's You game for the PlayStation®4 system on your Android device. In this article, we will show you how to download, install, and play That's You PS4 APK on your Android device, as well as the benefits of doing so. Let's get started!
-
What is That's You PS4 APK?
-
A brief introduction to the game and its features
-
That's You is a party game that was released in 2017 as part of the PlayLink series, which are games that use smartphones or tablets as controllers instead of the traditional DUALSHOCK 4 wireless controllers. The game is all about you and your friends, as you answer questions, take pictures, draw doodles, and vote on who is most likely to do or say something. The game features over 1,000 questions across six different themes, such as school, adventures, romance, and more. You can also create your own questions and pictures using the app. The game supports up to six players online or offline, and you can share your results on social media.
An APK file is an Android Package file that contains all the files and data needed to run an app on an Android device. To download and install That's You PS4 APK, you need to follow these steps:
-
-
Go to [this link](^1^) on your Android device and tap on Install.
-
Allow the app to access your device's storage, camera, microphone, and location.
-
Wait for the app to download and install on your device.
-
Launch the app and follow the in-app instructions to connect it to your PS4 system.
-
-
Note: Make sure that your Android device and your PS4 system are connected to the same Wi-Fi network before launching the app.
-
-
How to Play That's You PS4 APK on Your Android Device
-
What you need to play the game
-
To play That's You PS4 APK on your Android device, you need the following:
-
-
An Android device with Android 5.0 or higher.
-
A PlayStation®4 system with That's You game installed.
-
A Wi-Fi network that connects both devices.
-
A PlayStation Network account (optional).
-
-
How to connect your device to your PS4 system
-
How to connect your device to your PS4 system
-
To connect your device to your PS4 system, you need to follow these steps:
-
-
Turn on your PS4 system and launch That's You game.
-
On your Android device, open That's You PS4 APK app and tap on Play.
-
Scan the QR code that appears on your TV screen using your device's camera.
-
Enter your name and choose an avatar on your device.
-
Wait for the game to start and enjoy!
-
-
How to use the app as a controller
-
Once you have connected your device to your PS4 system, you can use the app as a controller to play the game. Here are some of the things you can do with the app:
-
-
Swipe left or right to select a theme or a question.
-
Tap on an answer or a picture to vote or submit it.
-
Use your device's camera to take selfies or pictures of your friends.
-
Use your device's touch screen to draw doodles or write captions.
-
Use your device's microphone to record voice messages or sound effects.
-
-
Tips and tricks for playing the game
-
To make the most out of playing That's You PS4 APK on your Android device, here are some tips and tricks you can try:
-
-
Be creative and funny with your answers, pictures, and doodles. The more you make your friends laugh, the more points you get.
-
Be honest and daring with your votes. The more you agree with the majority, the more points you get.
-
Be strategic and sneaky with your jokers. You can use them to double your points or sabotage your friends' points.
-
Be social and friendly with your friends. You can chat with them, send them stickers, or share your results on social media.
-
-
Benefits of Playing That's You PS4 APK on Your Android Device
-
Why playing on your Android device is more fun and convenient
-
Playing That's You PS4 APK on your Android device has many advantages over playing with a DUALSHOCK 4 wireless controller. Here are some of them:
-
-
You can use your device's features to enhance your gameplay, such as the camera, microphone, touch screen, and speakers.
-
You can play the game anywhere and anytime, as long as you have a Wi-Fi connection and a PS4 system.
-
You can play the game with anyone, even if they don't have a PS4 system or a DUALSHOCK 4 wireless controller.
-
You can play the game for free, as long as you have a PlayStation Plus membership or a friend who has one.
-
-
How playing on your Android device can enhance your social gaming experience
-
Playing That's You PS4 APK on your Android device can also improve your social gaming experience. Here are some of the ways it can do that:
-
-
You can get to know your friends better, as you discover their secrets, preferences, and personalities.
-
You can bond with your friends more, as you share laughs, jokes, and memories.
-
You can challenge your friends more, as you compete for points, trophies, and bragging rights.
-
You can invite more friends to join the fun, as you can play with up to six players online or offline.
-
-
Conclusion
-
A summary of the main points and a call to action
-
In conclusion, That's You PS4 APK is a companion app that lets you play the party game That's You for the PlayStation®4 system on your Android device. It is easy to download, install, and play. It is fun, convenient, and social. It is a great way to spend some quality time with your friends and family. So what are you waiting for? Download That's You PS4 APK today and find out who knows you best!
-
FAQs
-
Q1: Is That's You PS4 APK safe and legal to use?
-
Q2: Can I play That's You PS4 APK offline or without a PS4 system?
-
A2: No, you cannot play That's You PS4 APK offline or without a PS4 system. You need a Wi-Fi connection and a PS4 system to play the game. The app only works as a controller and does not contain the game itself.
-
Q3: How many players can join a game of That's You PS4 APK?
-
A3: You can play That's You PS4 APK with up to six players online or offline. You can either join an existing game or create your own game. You can also play solo if you want to practice or explore the game.
-
Q4: Can I create my own questions and pictures for That's You PS4 APK?
-
A4: Yes, you can create your own questions and pictures for That's You PS4 APK. You can use the app to take pictures, draw doodles, write captions, and record voice messages. You can also use the app to edit, delete, or share your creations. You can use your creations in your own games or send them to your friends.
-
Q5: What are some other games similar to That's You PS4 APK?
-
A5: If you like That's You PS4 APK, you might also enjoy some other games from the PlayLink series, such as:
-
-
Knowledge is Power: A trivia game that tests your general knowledge and speed.
-
Hidden Agenda: A crime thriller game that challenges your moral choices and teamwork.
-
SingStar Celebration: A karaoke game that lets you sing along to your favorite songs.
-
Frantics: A party game that features a collection of mini-games and challenges.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/__init__.py
deleted file mode 100644
index 965605587211b7bf0bd6bc3acdbb33dd49cab023..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/core/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .evaluation import * # noqa: F401, F403
-from .seg import * # noqa: F401, F403
-from .utils import * # noqa: F401, F403
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/datasets/README.md b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/datasets/README.md
deleted file mode 100644
index 9fb3e4f7afec17137c95c78be6ef06d520ec8032..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/datasets/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-### Common Datasets
-
-The dataset implemented here do not need to load the data into the final format.
-It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
-
-For example, for an image dataset, just provide the file names and labels, but don't read the images.
-Let the downstream decide how to read.
diff --git a/spaces/cvlab/zero123-live/CLIP/setup.py b/spaces/cvlab/zero123-live/CLIP/setup.py
deleted file mode 100644
index c9ea7d0d2f3d2fcf66d6f6e2aa0eb1a97a524bb6..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123-live/CLIP/setup.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import os
-
-import pkg_resources
-from setuptools import setup, find_packages
-
-setup(
- name="clip",
- py_modules=["clip"],
- version="1.0",
- description="",
- author="OpenAI",
- packages=find_packages(exclude=["tests*"]),
- install_requires=[
- str(r)
- for r in pkg_resources.parse_requirements(
- open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
- )
- ],
- include_package_data=True,
- extras_require={'dev': ['pytest']},
-)
diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/mocap/ifacialmocap_pose.py b/spaces/cymic/Talking_Head_Anime_3/tha3/mocap/ifacialmocap_pose.py
deleted file mode 100644
index d90936e4cc906293ea673f613f16520a57650883..0000000000000000000000000000000000000000
--- a/spaces/cymic/Talking_Head_Anime_3/tha3/mocap/ifacialmocap_pose.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from tha3.mocap.ifacialmocap_constants import BLENDSHAPE_NAMES, HEAD_BONE_X, HEAD_BONE_Y, HEAD_BONE_Z, \
- HEAD_BONE_QUAT, LEFT_EYE_BONE_X, LEFT_EYE_BONE_Y, LEFT_EYE_BONE_Z, LEFT_EYE_BONE_QUAT, RIGHT_EYE_BONE_X, \
- RIGHT_EYE_BONE_Y, RIGHT_EYE_BONE_Z, RIGHT_EYE_BONE_QUAT
-
-
-def create_default_ifacialmocap_pose():
- data = {}
-
- for blendshape_name in BLENDSHAPE_NAMES:
- data[blendshape_name] = 0.0
-
- data[HEAD_BONE_X] = 0.0
- data[HEAD_BONE_Y] = 0.0
- data[HEAD_BONE_Z] = 0.0
- data[HEAD_BONE_QUAT] = [0.0, 0.0, 0.0, 1.0]
-
- data[LEFT_EYE_BONE_X] = 0.0
- data[LEFT_EYE_BONE_Y] = 0.0
- data[LEFT_EYE_BONE_Z] = 0.0
- data[LEFT_EYE_BONE_QUAT] = [0.0, 0.0, 0.0, 1.0]
-
- data[RIGHT_EYE_BONE_X] = 0.0
- data[RIGHT_EYE_BONE_Y] = 0.0
- data[RIGHT_EYE_BONE_Z] = 0.0
- data[RIGHT_EYE_BONE_QUAT] = [0.0, 0.0, 0.0, 1.0]
-
- return data
\ No newline at end of file
diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/latent_diffusion/openaimodel.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/latent_diffusion/openaimodel.py
deleted file mode 100644
index 831d7aafb36bba16888e4389153979a6c13639f5..0000000000000000000000000000000000000000
--- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/latent_diffusion/openaimodel.py
+++ /dev/null
@@ -1,1069 +0,0 @@
-from abc import abstractmethod
-import math
-
-import numpy as np
-import torch as th
-import torch.nn as nn
-import torch.nn.functional as F
-
-from audioldm.latent_diffusion.util import (
- checkpoint,
- conv_nd,
- linear,
- avg_pool_nd,
- zero_module,
- normalization,
- timestep_embedding,
-)
-from audioldm.latent_diffusion.attention import SpatialTransformer
-
-
-# dummy replace
-def convert_module_to_f16(x):
- pass
-
-
-def convert_module_to_f32(x):
- pass
-
-
-## go
-class AttentionPool2d(nn.Module):
- """
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
- """
-
- def __init__(
- self,
- spacial_dim: int,
- embed_dim: int,
- num_heads_channels: int,
- output_dim: int = None,
- ):
- super().__init__()
- self.positional_embedding = nn.Parameter(
- th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
- )
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
- self.num_heads = embed_dim // num_heads_channels
- self.attention = QKVAttention(self.num_heads)
-
- def forward(self, x):
- b, c, *_spatial = x.shape
- x = x.reshape(b, c, -1).contiguous() # NC(HW)
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
- x = self.qkv_proj(x)
- x = self.attention(x)
- x = self.c_proj(x)
- return x[:, :, 0]
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
-
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb, context=None):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb)
- elif isinstance(layer, SpatialTransformer):
- x = layer(x, context)
- else:
- x = layer(x)
- return x
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(
- dims, self.channels, self.out_channels, 3, padding=padding
- )
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
- )
- else:
- x = F.interpolate(x, scale_factor=2, mode="nearest")
- if self.use_conv:
- x = self.conv(x)
- return x
-
-
-class TransposedUpsample(nn.Module):
- "Learned 2x upsampling without padding"
-
- def __init__(self, channels, out_channels=None, ks=5):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
-
- self.up = nn.ConvTranspose2d(
- self.channels, self.out_channels, kernel_size=ks, stride=2
- )
-
- def forward(self, x):
- return self.up(x)
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims,
- self.channels,
- self.out_channels,
- 3,
- stride=stride,
- padding=padding,
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param use_checkpoint: if True, use gradient checkpointing on this module.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_conv=False,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- up=False,
- down=False,
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
- ),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(
- dims, channels, self.out_channels, 3, padding=1
- )
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- def forward(self, x, emb):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- return checkpoint(
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
- )
-
- def _forward(self, x, emb):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = th.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- return self.skip_connection(x) + h
-
-
-class AttentionBlock(nn.Module):
- """
- An attention block that allows spatial positions to attend to each other.
- Originally ported from here, but adapted to the N-d case.
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
- """
-
- def __init__(
- self,
- channels,
- num_heads=1,
- num_head_channels=-1,
- use_checkpoint=False,
- use_new_attention_order=False,
- ):
- super().__init__()
- self.channels = channels
- if num_head_channels == -1:
- self.num_heads = num_heads
- else:
- assert (
- channels % num_head_channels == 0
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
- self.num_heads = channels // num_head_channels
- self.use_checkpoint = use_checkpoint
- self.norm = normalization(channels)
- self.qkv = conv_nd(1, channels, channels * 3, 1)
- if use_new_attention_order:
- # split qkv before split heads
- self.attention = QKVAttention(self.num_heads)
- else:
- # split heads before split qkv
- self.attention = QKVAttentionLegacy(self.num_heads)
-
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
-
- def forward(self, x):
- return checkpoint(
- self._forward, (x,), self.parameters(), True
- ) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
- # return pt_checkpoint(self._forward, x) # pytorch
-
- def _forward(self, x):
- b, c, *spatial = x.shape
- x = x.reshape(b, c, -1).contiguous()
- qkv = self.qkv(self.norm(x)).contiguous()
- h = self.attention(qkv).contiguous()
- h = self.proj_out(h).contiguous()
- return (x + h).reshape(b, c, *spatial).contiguous()
-
-
-def count_flops_attn(model, _x, y):
- """
- A counter for the `thop` package to count the operations in an
- attention operation.
- Meant to be used like:
- macs, params = thop.profile(
- model,
- inputs=(inputs, timestamps),
- custom_ops={QKVAttention: QKVAttention.count_flops},
- )
- """
- b, c, *spatial = y[0].shape
- num_spatial = int(np.prod(spatial))
- # We perform two matmuls with the same number of ops.
- # The first computes the weight matrix, the second computes
- # the combination of the value vectors.
- matmul_ops = 2 * b * (num_spatial**2) * c
- model.total_ops += th.DoubleTensor([matmul_ops])
-
-
-class QKVAttentionLegacy(nn.Module):
- """
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = (
- qkv.reshape(bs * self.n_heads, ch * 3, length).contiguous().split(ch, dim=1)
- )
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts", q * scale, k * scale
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum("bts,bcs->bct", weight, v)
- return a.reshape(bs, -1, length).contiguous()
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class QKVAttention(nn.Module):
- """
- A module which performs QKV attention and splits in a different order.
- """
-
- def __init__(self, n_heads):
- super().__init__()
- self.n_heads = n_heads
-
- def forward(self, qkv):
- """
- Apply QKV attention.
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
- :return: an [N x (H * C) x T] tensor after attention.
- """
- bs, width, length = qkv.shape
- assert width % (3 * self.n_heads) == 0
- ch = width // (3 * self.n_heads)
- q, k, v = qkv.chunk(3, dim=1)
- scale = 1 / math.sqrt(math.sqrt(ch))
- weight = th.einsum(
- "bct,bcs->bts",
- (q * scale).view(bs * self.n_heads, ch, length),
- (k * scale).view(bs * self.n_heads, ch, length),
- ) # More stable with f16 than dividing afterwards
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
- a = th.einsum(
- "bts,bcs->bct",
- weight,
- v.reshape(bs * self.n_heads, ch, length).contiguous(),
- )
- return a.reshape(bs, -1, length).contiguous()
-
- @staticmethod
- def count_flops(model, _x, y):
- return count_flops_attn(model, _x, y)
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
- :param in_channels: channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- :param use_new_attention_order: use a different attention pattern for potentially
- increased efficiency.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- num_classes=None,
- extra_film_condition_dim=None,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=-1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- extra_film_use_concat=False, # If true, concatenate extrafilm condition with time embedding, else addition
- resblock_updown=False,
- use_new_attention_order=False,
- use_spatial_transformer=False, # custom transformer support
- transformer_depth=1, # custom transformer support
- context_dim=None, # custom transformer support
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
- legacy=True,
- ):
- super().__init__()
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- if num_heads == -1:
- assert (
- num_head_channels != -1
- ), "Either num_heads or num_head_channels has to be set"
-
- if num_head_channels == -1:
- assert (
- num_heads != -1
- ), "Either num_heads or num_head_channels has to be set"
-
- self.image_size = image_size
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.num_classes = num_classes
- self.extra_film_condition_dim = extra_film_condition_dim
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
- self.predict_codebook_ids = n_embed is not None
- self.extra_film_use_concat = extra_film_use_concat
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- assert not (
- self.num_classes is not None and self.extra_film_condition_dim is not None
- ), "As for the condition of theh UNet model, you can only set using class label or an extra embedding vector (such as from CLAP). You cannot set both num_classes and extra_film_condition_dim."
-
- if self.num_classes is not None:
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
-
- self.use_extra_film_by_concat = (
- self.extra_film_condition_dim is not None and self.extra_film_use_concat
- )
- self.use_extra_film_by_addition = (
- self.extra_film_condition_dim is not None and not self.extra_film_use_concat
- )
-
- if self.extra_film_condition_dim is not None:
- self.film_emb = nn.Linear(self.extra_film_condition_dim, time_embed_dim)
- # print("+ Use extra condition on UNet channel using Film. Extra condition dimension is %s. " % self.extra_film_condition_dim)
- # if(self.use_extra_film_by_concat):
- # print("\t By concatenation with time embedding")
- # elif(self.use_extra_film_by_concat):
- # print("\t By addition with time embedding")
-
- if use_spatial_transformer and (
- self.use_extra_film_by_concat or self.use_extra_film_by_addition
- ):
- # print("+ Spatial transformer will only be used as self-attention. Because you have choose to use film as your global condition.")
- spatial_transformer_no_context = True
- else:
- spatial_transformer_no_context = False
-
- if use_spatial_transformer and not spatial_transformer_no_context:
- assert (
- context_dim is not None
- ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..."
-
- if context_dim is not None and not spatial_transformer_no_context:
- assert (
- use_spatial_transformer
- ), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..."
- from omegaconf.listconfig import ListConfig
-
- if type(context_dim) == ListConfig:
- context_dim = list(context_dim)
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim
- if (not self.use_extra_film_by_concat)
- else time_embed_dim * 2,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- dim_head = (
- ch // num_heads
- if use_spatial_transformer
- else num_head_channels
- )
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- )
- if not use_spatial_transformer
- else SpatialTransformer(
- ch,
- num_heads,
- dim_head,
- depth=transformer_depth,
- context_dim=context_dim,
- no_context=spatial_transformer_no_context,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim
- if (not self.use_extra_film_by_concat)
- else time_embed_dim * 2,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- # num_heads = 1
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim
- if (not self.use_extra_film_by_concat)
- else time_embed_dim * 2,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- )
- if not use_spatial_transformer
- else SpatialTransformer(
- ch,
- num_heads,
- dim_head,
- depth=transformer_depth,
- context_dim=context_dim,
- no_context=spatial_transformer_no_context,
- ),
- ResBlock(
- ch,
- time_embed_dim
- if (not self.use_extra_film_by_concat)
- else time_embed_dim * 2,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(
- ch + ich,
- time_embed_dim
- if (not self.use_extra_film_by_concat)
- else time_embed_dim * 2,
- dropout,
- out_channels=model_channels * mult,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = model_channels * mult
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- if legacy:
- # num_heads = 1
- dim_head = (
- ch // num_heads
- if use_spatial_transformer
- else num_head_channels
- )
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads_upsample,
- num_head_channels=dim_head,
- use_new_attention_order=use_new_attention_order,
- )
- if not use_spatial_transformer
- else SpatialTransformer(
- ch,
- num_heads,
- dim_head,
- depth=transformer_depth,
- context_dim=context_dim,
- no_context=spatial_transformer_no_context,
- )
- )
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(
- ch,
- time_embed_dim
- if (not self.use_extra_film_by_concat)
- else time_embed_dim * 2,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True,
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
- if self.predict_codebook_ids:
- self.id_predictor = nn.Sequential(
- normalization(ch),
- conv_nd(dims, model_channels, n_embed, 1),
- # nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
- )
-
- self.shape_reported = False
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
- self.output_blocks.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
- self.output_blocks.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps=None, context=None, y=None, **kwargs):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :param context: conditioning plugged in via crossattn
- :param y: an [N] Tensor of labels, if class-conditional. an [N, extra_film_condition_dim] Tensor if film-embed conditional
- :return: an [N x C x ...] Tensor of outputs.
- """
- if not self.shape_reported:
- # print("The shape of UNet input is", x.size())
- self.shape_reported = True
-
- assert (y is not None) == (
- self.num_classes is not None or self.extra_film_condition_dim is not None
- ), "must specify y if and only if the model is class-conditional or film embedding conditional"
- hs = []
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
- emb = self.time_embed(t_emb)
-
- if self.num_classes is not None:
- assert y.shape == (x.shape[0],)
- emb = emb + self.label_emb(y)
-
- if self.use_extra_film_by_addition:
- emb = emb + self.film_emb(y)
- elif self.use_extra_film_by_concat:
- emb = th.cat([emb, self.film_emb(y)], dim=-1)
-
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb, context)
- hs.append(h)
- h = self.middle_block(h, emb, context)
- for module in self.output_blocks:
- h = th.cat([h, hs.pop()], dim=1)
- h = module(h, emb, context)
- h = h.type(x.dtype)
- if self.predict_codebook_ids:
- return self.id_predictor(h)
- else:
- return self.out(h)
-
-
-class EncoderUNetModel(nn.Module):
- """
- The half UNet model with attention and timestep embedding.
- For usage, see UNet.
- """
-
- def __init__(
- self,
- image_size,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- use_checkpoint=False,
- use_fp16=False,
- num_heads=1,
- num_head_channels=-1,
- num_heads_upsample=-1,
- use_scale_shift_norm=False,
- resblock_updown=False,
- use_new_attention_order=False,
- pool="adaptive",
- *args,
- **kwargs,
- ):
- super().__init__()
-
- if num_heads_upsample == -1:
- num_heads_upsample = num_heads
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.use_checkpoint = use_checkpoint
- self.dtype = th.float16 if use_fp16 else th.float32
- self.num_heads = num_heads
- self.num_head_channels = num_head_channels
- self.num_heads_upsample = num_heads_upsample
-
- time_embed_dim = model_channels * 4
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
- )
- ]
- )
- self._feature_size = model_channels
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=mult * model_channels,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- layers.append(
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- self._feature_size += ch
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- out_channels=out_ch,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True,
- )
- if resblock_updown
- else Downsample(
- ch, conv_resample, dims=dims, out_channels=out_ch
- )
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
- self._feature_size += ch
-
- self.middle_block = TimestepEmbedSequential(
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- AttentionBlock(
- ch,
- use_checkpoint=use_checkpoint,
- num_heads=num_heads,
- num_head_channels=num_head_channels,
- use_new_attention_order=use_new_attention_order,
- ),
- ResBlock(
- ch,
- time_embed_dim,
- dropout,
- dims=dims,
- use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- ),
- )
- self._feature_size += ch
- self.pool = pool
- if pool == "adaptive":
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- nn.AdaptiveAvgPool2d((1, 1)),
- zero_module(conv_nd(dims, ch, out_channels, 1)),
- nn.Flatten(),
- )
- elif pool == "attention":
- assert num_head_channels != -1
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- AttentionPool2d(
- (image_size // ds), ch, num_head_channels, out_channels
- ),
- )
- elif pool == "spatial":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- nn.ReLU(),
- nn.Linear(2048, self.out_channels),
- )
- elif pool == "spatial_v2":
- self.out = nn.Sequential(
- nn.Linear(self._feature_size, 2048),
- normalization(2048),
- nn.SiLU(),
- nn.Linear(2048, self.out_channels),
- )
- else:
- raise NotImplementedError(f"Unexpected {pool} pooling")
-
- def convert_to_fp16(self):
- """
- Convert the torso of the model to float16.
- """
- self.input_blocks.apply(convert_module_to_f16)
- self.middle_block.apply(convert_module_to_f16)
-
- def convert_to_fp32(self):
- """
- Convert the torso of the model to float32.
- """
- self.input_blocks.apply(convert_module_to_f32)
- self.middle_block.apply(convert_module_to_f32)
-
- def forward(self, x, timesteps):
- """
- Apply the model to an input batch.
- :param x: an [N x C x ...] Tensor of inputs.
- :param timesteps: a 1-D batch of timesteps.
- :return: an [N x K] Tensor of outputs.
- """
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
-
- results = []
- h = x.type(self.dtype)
- for module in self.input_blocks:
- h = module(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = self.middle_block(h, emb)
- if self.pool.startswith("spatial"):
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
- h = th.cat(results, axis=-1)
- return self.out(h)
- else:
- h = h.type(x.dtype)
- return self.out(h)
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/security/api_key.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/security/api_key.py
deleted file mode 100644
index 8b2c5c08059fc6911ceb34efdff8f5bf80df469f..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/security/api_key.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from typing import Optional
-
-from fastapi.openapi.models import APIKey, APIKeyIn
-from fastapi.security.base import SecurityBase
-from starlette.exceptions import HTTPException
-from starlette.requests import Request
-from starlette.status import HTTP_403_FORBIDDEN
-
-
-class APIKeyBase(SecurityBase):
- pass
-
-
-class APIKeyQuery(APIKeyBase):
- def __init__(
- self,
- *,
- name: str,
- scheme_name: Optional[str] = None,
- description: Optional[str] = None,
- auto_error: bool = True,
- ):
- self.model: APIKey = APIKey(
- **{"in": APIKeyIn.query}, # type: ignore[arg-type]
- name=name,
- description=description,
- )
- self.scheme_name = scheme_name or self.__class__.__name__
- self.auto_error = auto_error
-
- async def __call__(self, request: Request) -> Optional[str]:
- api_key = request.query_params.get(self.model.name)
- if not api_key:
- if self.auto_error:
- raise HTTPException(
- status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
- )
- else:
- return None
- return api_key
-
-
-class APIKeyHeader(APIKeyBase):
- def __init__(
- self,
- *,
- name: str,
- scheme_name: Optional[str] = None,
- description: Optional[str] = None,
- auto_error: bool = True,
- ):
- self.model: APIKey = APIKey(
- **{"in": APIKeyIn.header}, # type: ignore[arg-type]
- name=name,
- description=description,
- )
- self.scheme_name = scheme_name or self.__class__.__name__
- self.auto_error = auto_error
-
- async def __call__(self, request: Request) -> Optional[str]:
- api_key = request.headers.get(self.model.name)
- if not api_key:
- if self.auto_error:
- raise HTTPException(
- status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
- )
- else:
- return None
- return api_key
-
-
-class APIKeyCookie(APIKeyBase):
- def __init__(
- self,
- *,
- name: str,
- scheme_name: Optional[str] = None,
- description: Optional[str] = None,
- auto_error: bool = True,
- ):
- self.model: APIKey = APIKey(
- **{"in": APIKeyIn.cookie}, # type: ignore[arg-type]
- name=name,
- description=description,
- )
- self.scheme_name = scheme_name or self.__class__.__name__
- self.auto_error = auto_error
-
- async def __call__(self, request: Request) -> Optional[str]:
- api_key = request.cookies.get(self.model.name)
- if not api_key:
- if self.auto_error:
- raise HTTPException(
- status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
- )
- else:
- return None
- return api_key
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/t1Lib/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/t1Lib/__init__.py
deleted file mode 100644
index e98acb7c52e89a83b7750601c6d80cbd094637d7..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/t1Lib/__init__.py
+++ /dev/null
@@ -1,638 +0,0 @@
-"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts (Python2 only)
-
-Functions for reading and writing raw Type 1 data:
-
-read(path)
- reads any Type 1 font file, returns the raw data and a type indicator:
- 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed
- to by 'path'.
- Raises an error when the file does not contain valid Type 1 data.
-
-write(path, data, kind='OTHER', dohex=False)
- writes raw Type 1 data to the file pointed to by 'path'.
- 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'.
- 'dohex' is a flag which determines whether the eexec encrypted
- part should be written as hexadecimal or binary, but only if kind
- is 'OTHER'.
-"""
-import fontTools
-from fontTools.misc import eexec
-from fontTools.misc.macCreatorType import getMacCreatorAndType
-from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes
-from fontTools.misc.psOperators import (
- _type1_pre_eexec_order,
- _type1_fontinfo_order,
- _type1_post_eexec_order,
-)
-from fontTools.encodings.StandardEncoding import StandardEncoding
-import os
-import re
-
-__author__ = "jvr"
-__version__ = "1.0b3"
-DEBUG = 0
-
-
-try:
- try:
- from Carbon import Res
- except ImportError:
- import Res # MacPython < 2.2
-except ImportError:
- haveMacSupport = 0
-else:
- haveMacSupport = 1
-
-
-class T1Error(Exception):
- pass
-
-
-class T1Font(object):
-
- """Type 1 font class.
-
- Uses a minimal interpeter that supports just about enough PS to parse
- Type 1 fonts.
- """
-
- def __init__(self, path, encoding="ascii", kind=None):
- if kind is None:
- self.data, _ = read(path)
- elif kind == "LWFN":
- self.data = readLWFN(path)
- elif kind == "PFB":
- self.data = readPFB(path)
- elif kind == "OTHER":
- self.data = readOther(path)
- else:
- raise ValueError(kind)
- self.encoding = encoding
-
- def saveAs(self, path, type, dohex=False):
- write(path, self.getData(), type, dohex)
-
- def getData(self):
- if not hasattr(self, "data"):
- self.data = self.createData()
- return self.data
-
- def getGlyphSet(self):
- """Return a generic GlyphSet, which is a dict-like object
- mapping glyph names to glyph objects. The returned glyph objects
- have a .draw() method that supports the Pen protocol, and will
- have an attribute named 'width', but only *after* the .draw() method
- has been called.
-
- In the case of Type 1, the GlyphSet is simply the CharStrings dict.
- """
- return self["CharStrings"]
-
- def __getitem__(self, key):
- if not hasattr(self, "font"):
- self.parse()
- return self.font[key]
-
- def parse(self):
- from fontTools.misc import psLib
- from fontTools.misc import psCharStrings
-
- self.font = psLib.suckfont(self.data, self.encoding)
- charStrings = self.font["CharStrings"]
- lenIV = self.font["Private"].get("lenIV", 4)
- assert lenIV >= 0
- subrs = self.font["Private"]["Subrs"]
- for glyphName, charString in charStrings.items():
- charString, R = eexec.decrypt(charString, 4330)
- charStrings[glyphName] = psCharStrings.T1CharString(
- charString[lenIV:], subrs=subrs
- )
- for i in range(len(subrs)):
- charString, R = eexec.decrypt(subrs[i], 4330)
- subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
- del self.data
-
- def createData(self):
- sf = self.font
-
- eexec_began = False
- eexec_dict = {}
- lines = []
- lines.extend(
- [
- self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"),
- self._tobytes(f"%t1Font: ({fontTools.version})"),
- self._tobytes(f"%%BeginResource: font {sf['FontName']}"),
- ]
- )
- # follow t1write.c:writeRegNameKeyedFont
- size = 3 # Headroom for new key addition
- size += 1 # FontMatrix is always counted
- size += 1 + 1 # Private, CharStings
- for key in font_dictionary_keys:
- size += int(key in sf)
- lines.append(self._tobytes(f"{size} dict dup begin"))
-
- for key, value in sf.items():
- if eexec_began:
- eexec_dict[key] = value
- continue
-
- if key == "FontInfo":
- fi = sf["FontInfo"]
- # follow t1write.c:writeFontInfoDict
- size = 3 # Headroom for new key addition
- for subkey in FontInfo_dictionary_keys:
- size += int(subkey in fi)
- lines.append(self._tobytes(f"/FontInfo {size} dict dup begin"))
-
- for subkey, subvalue in fi.items():
- lines.extend(self._make_lines(subkey, subvalue))
- lines.append(b"end def")
- elif key in _type1_post_eexec_order: # usually 'Private'
- eexec_dict[key] = value
- eexec_began = True
- else:
- lines.extend(self._make_lines(key, value))
- lines.append(b"end")
- eexec_portion = self.encode_eexec(eexec_dict)
- lines.append(bytesjoin([b"currentfile eexec ", eexec_portion]))
-
- for _ in range(8):
- lines.append(self._tobytes("0" * 64))
- lines.extend([b"cleartomark", b"%%EndResource", b"%%EOF"])
-
- data = bytesjoin(lines, "\n")
- return data
-
- def encode_eexec(self, eexec_dict):
- lines = []
-
- # '-|', '|-', '|'
- RD_key, ND_key, NP_key = None, None, None
-
- for key, value in eexec_dict.items():
- if key == "Private":
- pr = eexec_dict["Private"]
- # follow t1write.c:writePrivateDict
- size = 3 # for RD, ND, NP
- for subkey in Private_dictionary_keys:
- size += int(subkey in pr)
- lines.append(b"dup /Private")
- lines.append(self._tobytes(f"{size} dict dup begin"))
- for subkey, subvalue in pr.items():
- if not RD_key and subvalue == RD_value:
- RD_key = subkey
- elif not ND_key and subvalue == ND_value:
- ND_key = subkey
- elif not NP_key and subvalue == PD_value:
- NP_key = subkey
-
- if subkey == "OtherSubrs":
- # XXX: assert that no flex hint is used
- lines.append(self._tobytes(hintothers))
- elif subkey == "Subrs":
- # XXX: standard Subrs only
- lines.append(b"/Subrs 5 array")
- for i, subr_bin in enumerate(std_subrs):
- encrypted_subr, R = eexec.encrypt(
- bytesjoin([char_IV, subr_bin]), 4330
- )
- lines.append(
- bytesjoin(
- [
- self._tobytes(
- f"dup {i} {len(encrypted_subr)} {RD_key} "
- ),
- encrypted_subr,
- self._tobytes(f" {NP_key}"),
- ]
- )
- )
- lines.append(b"def")
-
- lines.append(b"put")
- else:
- lines.extend(self._make_lines(subkey, subvalue))
- elif key == "CharStrings":
- lines.append(b"dup /CharStrings")
- lines.append(
- self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin")
- )
- for glyph_name, char_bin in eexec_dict["CharStrings"].items():
- char_bin.compile()
- encrypted_char, R = eexec.encrypt(
- bytesjoin([char_IV, char_bin.bytecode]), 4330
- )
- lines.append(
- bytesjoin(
- [
- self._tobytes(
- f"/{glyph_name} {len(encrypted_char)} {RD_key} "
- ),
- encrypted_char,
- self._tobytes(f" {ND_key}"),
- ]
- )
- )
- lines.append(b"end put")
- else:
- lines.extend(self._make_lines(key, value))
-
- lines.extend(
- [
- b"end",
- b"dup /FontName get exch definefont pop",
- b"mark",
- b"currentfile closefile\n",
- ]
- )
-
- eexec_portion = bytesjoin(lines, "\n")
- encrypted_eexec, R = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
-
- return encrypted_eexec
-
- def _make_lines(self, key, value):
- if key == "FontName":
- return [self._tobytes(f"/{key} /{value} def")]
- if key in ["isFixedPitch", "ForceBold", "RndStemUp"]:
- return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
- elif key == "Encoding":
- if value == StandardEncoding:
- return [self._tobytes(f"/{key} StandardEncoding def")]
- else:
- # follow fontTools.misc.psOperators._type1_Encoding_repr
- lines = []
- lines.append(b"/Encoding 256 array")
- lines.append(b"0 1 255 {1 index exch /.notdef put} for")
- for i in range(256):
- name = value[i]
- if name != ".notdef":
- lines.append(self._tobytes(f"dup {i} /{name} put"))
- lines.append(b"def")
- return lines
- if isinstance(value, str):
- return [self._tobytes(f"/{key} ({value}) def")]
- elif isinstance(value, bool):
- return [self._tobytes(f"/{key} {'true' if value else 'false'} def")]
- elif isinstance(value, list):
- return [self._tobytes(f"/{key} [{' '.join(str(v) for v in value)}] def")]
- elif isinstance(value, tuple):
- return [self._tobytes(f"/{key} {{{' '.join(str(v) for v in value)}}} def")]
- else:
- return [self._tobytes(f"/{key} {value} def")]
-
- def _tobytes(self, s, errors="strict"):
- return tobytes(s, self.encoding, errors)
-
-
-# low level T1 data read and write functions
-
-
-def read(path, onlyHeader=False):
- """reads any Type 1 font file, returns raw data"""
- _, ext = os.path.splitext(path)
- ext = ext.lower()
- creator, typ = getMacCreatorAndType(path)
- if typ == "LWFN":
- return readLWFN(path, onlyHeader), "LWFN"
- if ext == ".pfb":
- return readPFB(path, onlyHeader), "PFB"
- else:
- return readOther(path), "OTHER"
-
-
-def write(path, data, kind="OTHER", dohex=False):
- assertType1(data)
- kind = kind.upper()
- try:
- os.remove(path)
- except os.error:
- pass
- err = 1
- try:
- if kind == "LWFN":
- writeLWFN(path, data)
- elif kind == "PFB":
- writePFB(path, data)
- else:
- writeOther(path, data, dohex)
- err = 0
- finally:
- if err and not DEBUG:
- try:
- os.remove(path)
- except os.error:
- pass
-
-
-# -- internal --
-
-LWFNCHUNKSIZE = 2000
-HEXLINELENGTH = 80
-
-
-def readLWFN(path, onlyHeader=False):
- """reads an LWFN font file, returns raw data"""
- from fontTools.misc.macRes import ResourceReader
-
- reader = ResourceReader(path)
- try:
- data = []
- for res in reader.get("POST", []):
- code = byteord(res.data[0])
- if byteord(res.data[1]) != 0:
- raise T1Error("corrupt LWFN file")
- if code in [1, 2]:
- if onlyHeader and code == 2:
- break
- data.append(res.data[2:])
- elif code in [3, 5]:
- break
- elif code == 4:
- with open(path, "rb") as f:
- data.append(f.read())
- elif code == 0:
- pass # comment, ignore
- else:
- raise T1Error("bad chunk code: " + repr(code))
- finally:
- reader.close()
- data = bytesjoin(data)
- assertType1(data)
- return data
-
-
-def readPFB(path, onlyHeader=False):
- """reads a PFB font file, returns raw data"""
- data = []
- with open(path, "rb") as f:
- while True:
- if f.read(1) != bytechr(128):
- raise T1Error("corrupt PFB file")
- code = byteord(f.read(1))
- if code in [1, 2]:
- chunklen = stringToLong(f.read(4))
- chunk = f.read(chunklen)
- assert len(chunk) == chunklen
- data.append(chunk)
- elif code == 3:
- break
- else:
- raise T1Error("bad chunk code: " + repr(code))
- if onlyHeader:
- break
- data = bytesjoin(data)
- assertType1(data)
- return data
-
-
-def readOther(path):
- """reads any (font) file, returns raw data"""
- with open(path, "rb") as f:
- data = f.read()
- assertType1(data)
- chunks = findEncryptedChunks(data)
- data = []
- for isEncrypted, chunk in chunks:
- if isEncrypted and isHex(chunk[:4]):
- data.append(deHexString(chunk))
- else:
- data.append(chunk)
- return bytesjoin(data)
-
-
-# file writing tools
-
-
-def writeLWFN(path, data):
- # Res.FSpCreateResFile was deprecated in OS X 10.5
- Res.FSpCreateResFile(path, "just", "LWFN", 0)
- resRef = Res.FSOpenResFile(path, 2) # write-only
- try:
- Res.UseResFile(resRef)
- resID = 501
- chunks = findEncryptedChunks(data)
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- code = 2
- else:
- code = 1
- while chunk:
- res = Res.Resource(bytechr(code) + "\0" + chunk[: LWFNCHUNKSIZE - 2])
- res.AddResource("POST", resID, "")
- chunk = chunk[LWFNCHUNKSIZE - 2 :]
- resID = resID + 1
- res = Res.Resource(bytechr(5) + "\0")
- res.AddResource("POST", resID, "")
- finally:
- Res.CloseResFile(resRef)
-
-
-def writePFB(path, data):
- chunks = findEncryptedChunks(data)
- with open(path, "wb") as f:
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- code = 2
- else:
- code = 1
- f.write(bytechr(128) + bytechr(code))
- f.write(longToString(len(chunk)))
- f.write(chunk)
- f.write(bytechr(128) + bytechr(3))
-
-
-def writeOther(path, data, dohex=False):
- chunks = findEncryptedChunks(data)
- with open(path, "wb") as f:
- hexlinelen = HEXLINELENGTH // 2
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- code = 2
- else:
- code = 1
- if code == 2 and dohex:
- while chunk:
- f.write(eexec.hexString(chunk[:hexlinelen]))
- f.write(b"\r")
- chunk = chunk[hexlinelen:]
- else:
- f.write(chunk)
-
-
-# decryption tools
-
-EEXECBEGIN = b"currentfile eexec"
-# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
-# follow eexec
-EEXECEND = re.compile(b"(0[ \t\r\n]*){512}", flags=re.M)
-EEXECINTERNALEND = b"currentfile closefile"
-EEXECBEGINMARKER = b"%-- eexec start\r"
-EEXECENDMARKER = b"%-- eexec end\r"
-
-_ishexRE = re.compile(b"[0-9A-Fa-f]*$")
-
-
-def isHex(text):
- return _ishexRE.match(text) is not None
-
-
-def decryptType1(data):
- chunks = findEncryptedChunks(data)
- data = []
- for isEncrypted, chunk in chunks:
- if isEncrypted:
- if isHex(chunk[:4]):
- chunk = deHexString(chunk)
- decrypted, R = eexec.decrypt(chunk, 55665)
- decrypted = decrypted[4:]
- if (
- decrypted[-len(EEXECINTERNALEND) - 1 : -1] != EEXECINTERNALEND
- and decrypted[-len(EEXECINTERNALEND) - 2 : -2] != EEXECINTERNALEND
- ):
- raise T1Error("invalid end of eexec part")
- decrypted = decrypted[: -len(EEXECINTERNALEND) - 2] + b"\r"
- data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
- else:
- if chunk[-len(EEXECBEGIN) - 1 : -1] == EEXECBEGIN:
- data.append(chunk[: -len(EEXECBEGIN) - 1])
- else:
- data.append(chunk)
- return bytesjoin(data)
-
-
-def findEncryptedChunks(data):
- chunks = []
- while True:
- eBegin = data.find(EEXECBEGIN)
- if eBegin < 0:
- break
- eBegin = eBegin + len(EEXECBEGIN) + 1
- endMatch = EEXECEND.search(data, eBegin)
- if endMatch is None:
- raise T1Error("can't find end of eexec part")
- eEnd = endMatch.start()
- cypherText = data[eBegin : eEnd + 2]
- if isHex(cypherText[:4]):
- cypherText = deHexString(cypherText)
- plainText, R = eexec.decrypt(cypherText, 55665)
- eEndLocal = plainText.find(EEXECINTERNALEND)
- if eEndLocal < 0:
- raise T1Error("can't find end of eexec part")
- chunks.append((0, data[:eBegin]))
- chunks.append((1, cypherText[: eEndLocal + len(EEXECINTERNALEND) + 1]))
- data = data[eEnd:]
- chunks.append((0, data))
- return chunks
-
-
-def deHexString(hexstring):
- return eexec.deHexString(bytesjoin(hexstring.split()))
-
-
-# Type 1 assertion
-
-_fontType1RE = re.compile(rb"/FontType\s+1\s+def")
-
-
-def assertType1(data):
- for head in [b"%!PS-AdobeFont", b"%!FontType1"]:
- if data[: len(head)] == head:
- break
- else:
- raise T1Error("not a PostScript font")
- if not _fontType1RE.search(data):
- raise T1Error("not a Type 1 font")
- if data.find(b"currentfile eexec") < 0:
- raise T1Error("not an encrypted Type 1 font")
- # XXX what else?
- return data
-
-
-# pfb helpers
-
-
-def longToString(long):
- s = b""
- for i in range(4):
- s += bytechr((long & (0xFF << (i * 8))) >> i * 8)
- return s
-
-
-def stringToLong(s):
- if len(s) != 4:
- raise ValueError("string must be 4 bytes long")
- l = 0
- for i in range(4):
- l += byteord(s[i]) << (i * 8)
- return l
-
-
-# PS stream helpers
-
-font_dictionary_keys = list(_type1_pre_eexec_order)
-# t1write.c:writeRegNameKeyedFont
-# always counts following keys
-font_dictionary_keys.remove("FontMatrix")
-
-FontInfo_dictionary_keys = list(_type1_fontinfo_order)
-# extend because AFDKO tx may use following keys
-FontInfo_dictionary_keys.extend(
- [
- "FSType",
- "Copyright",
- ]
-)
-
-Private_dictionary_keys = [
- # We don't know what names will be actually used.
- # "RD",
- # "ND",
- # "NP",
- "Subrs",
- "OtherSubrs",
- "UniqueID",
- "BlueValues",
- "OtherBlues",
- "FamilyBlues",
- "FamilyOtherBlues",
- "BlueScale",
- "BlueShift",
- "BlueFuzz",
- "StdHW",
- "StdVW",
- "StemSnapH",
- "StemSnapV",
- "ForceBold",
- "LanguageGroup",
- "password",
- "lenIV",
- "MinFeature",
- "RndStemUp",
-]
-
-# t1write_hintothers.h
-hintothers = """/OtherSubrs[{}{}{}{systemdict/internaldict known not{pop 3}{1183615869
-systemdict/internaldict get exec dup/startlock known{/startlock get exec}{dup
-/strtlck known{/strtlck get exec}{pop 3}ifelse}ifelse}ifelse}executeonly]def"""
-# t1write.c:saveStdSubrs
-std_subrs = [
- # 3 0 callother pop pop setcurrentpoint return
- b"\x8e\x8b\x0c\x10\x0c\x11\x0c\x11\x0c\x21\x0b",
- # 0 1 callother return
- b"\x8b\x8c\x0c\x10\x0b",
- # 0 2 callother return
- b"\x8b\x8d\x0c\x10\x0b",
- # return
- b"\x0b",
- # 3 1 3 callother pop callsubr return
- b"\x8e\x8c\x8e\x0c\x10\x0c\x11\x0a\x0b",
-]
-# follow t1write.c:writeRegNameKeyedFont
-eexec_IV = b"cccc"
-char_IV = b"\x0c\x0c\x0c\x0c"
-RD_value = ("string", "currentfile", "exch", "readstring", "pop")
-ND_value = ("def",)
-PD_value = ("put",)
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio_client/cli/deploy_discord.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio_client/cli/deploy_discord.py
deleted file mode 100644
index bb26c088abeef28313142db9d58213a38d5581b4..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio_client/cli/deploy_discord.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import argparse
-
-from gradio_client import Client
-
-
-def main():
- parser = argparse.ArgumentParser(description="Deploy Space as Discord Bot.")
- parser.add_argument("deploy-discord")
- parser.add_argument(
- "--src",
- type=str,
- help="The space id or url or gradio app you want to deploy as a gradio bot.",
- )
- parser.add_argument(
- "--discord-bot-token",
- type=str,
- help="Discord bot token. Get one on the discord website.",
- )
- parser.add_argument(
- "--api-names",
- nargs="*",
- help="Api names to turn into discord bots",
- default=[],
- )
- parser.add_argument(
- "--to-id",
- type=str,
- help="Name of the space used to host the discord bot",
- default=None,
- )
- parser.add_argument(
- "--hf-token",
- type=str,
- help=(
- "Hugging Face token. Can be ommitted if you are logged in via huggingface_hub cli. "
- "Must be provided if upstream space is private."
- ),
- default=None,
- )
- parser.add_argument(
- "--private",
- type=bool,
- nargs="?",
- help="Whether the discord bot space is private.",
- const=True,
- default=False,
- )
- args = parser.parse_args()
- for i, name in enumerate(args.api_names):
- if "," in name:
- args.api_names[i] = tuple(name.split(","))
- Client(args.src).deploy_discord(
- discord_bot_token=args.discord_bot_token,
- api_names=args.api_names,
- to_id=args.to_id,
- hf_token=args.hf_token,
- private=args.private,
- )
diff --git a/spaces/declare-lab/tango/diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py b/spaces/declare-lab/tango/diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py
deleted file mode 100644
index 46595784b0bac0016b623b7122082275248363e9..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/scripts/convert_ddpm_original_checkpoint_to_diffusers.py
+++ /dev/null
@@ -1,431 +0,0 @@
-import argparse
-import json
-
-import torch
-
-from diffusers import AutoencoderKL, DDPMPipeline, DDPMScheduler, UNet2DModel, VQModel
-
-
-def shave_segments(path, n_shave_prefix_segments=1):
- """
- Removes segments. Positive values shave the first segments, negative shave the last segments.
- """
- if n_shave_prefix_segments >= 0:
- return ".".join(path.split(".")[n_shave_prefix_segments:])
- else:
- return ".".join(path.split(".")[:n_shave_prefix_segments])
-
-
-def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
- mapping = []
- for old_item in old_list:
- new_item = old_item
- new_item = new_item.replace("block.", "resnets.")
- new_item = new_item.replace("conv_shorcut", "conv1")
- new_item = new_item.replace("in_shortcut", "conv_shortcut")
- new_item = new_item.replace("temb_proj", "time_emb_proj")
-
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def renew_attention_paths(old_list, n_shave_prefix_segments=0, in_mid=False):
- mapping = []
- for old_item in old_list:
- new_item = old_item
-
- # In `model.mid`, the layer is called `attn`.
- if not in_mid:
- new_item = new_item.replace("attn", "attentions")
- new_item = new_item.replace(".k.", ".key.")
- new_item = new_item.replace(".v.", ".value.")
- new_item = new_item.replace(".q.", ".query.")
-
- new_item = new_item.replace("proj_out", "proj_attn")
- new_item = new_item.replace("norm", "group_norm")
-
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def assign_to_checkpoint(
- paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
-):
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
-
- if attention_paths_to_split is not None:
- if config is None:
- raise ValueError("Please specify the config if setting 'attention_paths_to_split' to 'True'.")
-
- for path, path_map in attention_paths_to_split.items():
- old_tensor = old_checkpoint[path]
- channels = old_tensor.shape[0] // 3
-
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
-
- num_heads = old_tensor.shape[0] // config.get("num_head_channels", 1) // 3
-
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
-
- checkpoint[path_map["query"]] = query.reshape(target_shape).squeeze()
- checkpoint[path_map["key"]] = key.reshape(target_shape).squeeze()
- checkpoint[path_map["value"]] = value.reshape(target_shape).squeeze()
-
- for path in paths:
- new_path = path["new"]
-
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
- continue
-
- new_path = new_path.replace("down.", "down_blocks.")
- new_path = new_path.replace("up.", "up_blocks.")
-
- if additional_replacements is not None:
- for replacement in additional_replacements:
- new_path = new_path.replace(replacement["old"], replacement["new"])
-
- if "attentions" in new_path:
- checkpoint[new_path] = old_checkpoint[path["old"]].squeeze()
- else:
- checkpoint[new_path] = old_checkpoint[path["old"]]
-
-
-def convert_ddpm_checkpoint(checkpoint, config):
- """
- Takes a state dict and a config, and returns a converted checkpoint.
- """
- new_checkpoint = {}
-
- new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["temb.dense.0.weight"]
- new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["temb.dense.0.bias"]
- new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["temb.dense.1.weight"]
- new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["temb.dense.1.bias"]
-
- new_checkpoint["conv_norm_out.weight"] = checkpoint["norm_out.weight"]
- new_checkpoint["conv_norm_out.bias"] = checkpoint["norm_out.bias"]
-
- new_checkpoint["conv_in.weight"] = checkpoint["conv_in.weight"]
- new_checkpoint["conv_in.bias"] = checkpoint["conv_in.bias"]
- new_checkpoint["conv_out.weight"] = checkpoint["conv_out.weight"]
- new_checkpoint["conv_out.bias"] = checkpoint["conv_out.bias"]
-
- num_down_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "down" in layer})
- down_blocks = {
- layer_id: [key for key in checkpoint if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
- }
-
- num_up_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "up" in layer})
- up_blocks = {layer_id: [key for key in checkpoint if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)}
-
- for i in range(num_down_blocks):
- block_id = (i - 1) // (config["layers_per_block"] + 1)
-
- if any("downsample" in layer for layer in down_blocks[i]):
- new_checkpoint[f"down_blocks.{i}.downsamplers.0.conv.weight"] = checkpoint[
- f"down.{i}.downsample.op.weight"
- ]
- new_checkpoint[f"down_blocks.{i}.downsamplers.0.conv.bias"] = checkpoint[f"down.{i}.downsample.op.bias"]
- # new_checkpoint[f'down_blocks.{i}.downsamplers.0.op.weight'] = checkpoint[f'down.{i}.downsample.conv.weight']
- # new_checkpoint[f'down_blocks.{i}.downsamplers.0.op.bias'] = checkpoint[f'down.{i}.downsample.conv.bias']
-
- if any("block" in layer for layer in down_blocks[i]):
- num_blocks = len(
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in down_blocks[i] if "block" in layer}
- )
- blocks = {
- layer_id: [key for key in down_blocks[i] if f"block.{layer_id}" in key]
- for layer_id in range(num_blocks)
- }
-
- if num_blocks > 0:
- for j in range(config["layers_per_block"]):
- paths = renew_resnet_paths(blocks[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint)
-
- if any("attn" in layer for layer in down_blocks[i]):
- num_attn = len(
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in down_blocks[i] if "attn" in layer}
- )
- attns = {
- layer_id: [key for key in down_blocks[i] if f"attn.{layer_id}" in key]
- for layer_id in range(num_blocks)
- }
-
- if num_attn > 0:
- for j in range(config["layers_per_block"]):
- paths = renew_attention_paths(attns[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, config=config)
-
- mid_block_1_layers = [key for key in checkpoint if "mid.block_1" in key]
- mid_block_2_layers = [key for key in checkpoint if "mid.block_2" in key]
- mid_attn_1_layers = [key for key in checkpoint if "mid.attn_1" in key]
-
- # Mid new 2
- paths = renew_resnet_paths(mid_block_1_layers)
- assign_to_checkpoint(
- paths,
- new_checkpoint,
- checkpoint,
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_1", "new": "resnets.0"}],
- )
-
- paths = renew_resnet_paths(mid_block_2_layers)
- assign_to_checkpoint(
- paths,
- new_checkpoint,
- checkpoint,
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_2", "new": "resnets.1"}],
- )
-
- paths = renew_attention_paths(mid_attn_1_layers, in_mid=True)
- assign_to_checkpoint(
- paths,
- new_checkpoint,
- checkpoint,
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "attn_1", "new": "attentions.0"}],
- )
-
- for i in range(num_up_blocks):
- block_id = num_up_blocks - 1 - i
-
- if any("upsample" in layer for layer in up_blocks[i]):
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[
- f"up.{i}.upsample.conv.weight"
- ]
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[f"up.{i}.upsample.conv.bias"]
-
- if any("block" in layer for layer in up_blocks[i]):
- num_blocks = len(
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in up_blocks[i] if "block" in layer}
- )
- blocks = {
- layer_id: [key for key in up_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks)
- }
-
- if num_blocks > 0:
- for j in range(config["layers_per_block"] + 1):
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
- paths = renew_resnet_paths(blocks[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
-
- if any("attn" in layer for layer in up_blocks[i]):
- num_attn = len(
- {".".join(shave_segments(layer, 2).split(".")[:2]) for layer in up_blocks[i] if "attn" in layer}
- )
- attns = {
- layer_id: [key for key in up_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks)
- }
-
- if num_attn > 0:
- for j in range(config["layers_per_block"] + 1):
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
- paths = renew_attention_paths(attns[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
-
- new_checkpoint = {k.replace("mid_new_2", "mid_block"): v for k, v in new_checkpoint.items()}
- return new_checkpoint
-
-
-def convert_vq_autoenc_checkpoint(checkpoint, config):
- """
- Takes a state dict and a config, and returns a converted checkpoint.
- """
- new_checkpoint = {}
-
- new_checkpoint["encoder.conv_norm_out.weight"] = checkpoint["encoder.norm_out.weight"]
- new_checkpoint["encoder.conv_norm_out.bias"] = checkpoint["encoder.norm_out.bias"]
-
- new_checkpoint["encoder.conv_in.weight"] = checkpoint["encoder.conv_in.weight"]
- new_checkpoint["encoder.conv_in.bias"] = checkpoint["encoder.conv_in.bias"]
- new_checkpoint["encoder.conv_out.weight"] = checkpoint["encoder.conv_out.weight"]
- new_checkpoint["encoder.conv_out.bias"] = checkpoint["encoder.conv_out.bias"]
-
- new_checkpoint["decoder.conv_norm_out.weight"] = checkpoint["decoder.norm_out.weight"]
- new_checkpoint["decoder.conv_norm_out.bias"] = checkpoint["decoder.norm_out.bias"]
-
- new_checkpoint["decoder.conv_in.weight"] = checkpoint["decoder.conv_in.weight"]
- new_checkpoint["decoder.conv_in.bias"] = checkpoint["decoder.conv_in.bias"]
- new_checkpoint["decoder.conv_out.weight"] = checkpoint["decoder.conv_out.weight"]
- new_checkpoint["decoder.conv_out.bias"] = checkpoint["decoder.conv_out.bias"]
-
- num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in checkpoint if "down" in layer})
- down_blocks = {
- layer_id: [key for key in checkpoint if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
- }
-
- num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in checkpoint if "up" in layer})
- up_blocks = {layer_id: [key for key in checkpoint if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)}
-
- for i in range(num_down_blocks):
- block_id = (i - 1) // (config["layers_per_block"] + 1)
-
- if any("downsample" in layer for layer in down_blocks[i]):
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = checkpoint[
- f"encoder.down.{i}.downsample.conv.weight"
- ]
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = checkpoint[
- f"encoder.down.{i}.downsample.conv.bias"
- ]
-
- if any("block" in layer for layer in down_blocks[i]):
- num_blocks = len(
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in down_blocks[i] if "block" in layer}
- )
- blocks = {
- layer_id: [key for key in down_blocks[i] if f"block.{layer_id}" in key]
- for layer_id in range(num_blocks)
- }
-
- if num_blocks > 0:
- for j in range(config["layers_per_block"]):
- paths = renew_resnet_paths(blocks[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint)
-
- if any("attn" in layer for layer in down_blocks[i]):
- num_attn = len(
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in down_blocks[i] if "attn" in layer}
- )
- attns = {
- layer_id: [key for key in down_blocks[i] if f"attn.{layer_id}" in key]
- for layer_id in range(num_blocks)
- }
-
- if num_attn > 0:
- for j in range(config["layers_per_block"]):
- paths = renew_attention_paths(attns[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, config=config)
-
- mid_block_1_layers = [key for key in checkpoint if "mid.block_1" in key]
- mid_block_2_layers = [key for key in checkpoint if "mid.block_2" in key]
- mid_attn_1_layers = [key for key in checkpoint if "mid.attn_1" in key]
-
- # Mid new 2
- paths = renew_resnet_paths(mid_block_1_layers)
- assign_to_checkpoint(
- paths,
- new_checkpoint,
- checkpoint,
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_1", "new": "resnets.0"}],
- )
-
- paths = renew_resnet_paths(mid_block_2_layers)
- assign_to_checkpoint(
- paths,
- new_checkpoint,
- checkpoint,
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "block_2", "new": "resnets.1"}],
- )
-
- paths = renew_attention_paths(mid_attn_1_layers, in_mid=True)
- assign_to_checkpoint(
- paths,
- new_checkpoint,
- checkpoint,
- additional_replacements=[{"old": "mid.", "new": "mid_new_2."}, {"old": "attn_1", "new": "attentions.0"}],
- )
-
- for i in range(num_up_blocks):
- block_id = num_up_blocks - 1 - i
-
- if any("upsample" in layer for layer in up_blocks[i]):
- new_checkpoint[f"decoder.up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[
- f"decoder.up.{i}.upsample.conv.weight"
- ]
- new_checkpoint[f"decoder.up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[
- f"decoder.up.{i}.upsample.conv.bias"
- ]
-
- if any("block" in layer for layer in up_blocks[i]):
- num_blocks = len(
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in up_blocks[i] if "block" in layer}
- )
- blocks = {
- layer_id: [key for key in up_blocks[i] if f"block.{layer_id}" in key] for layer_id in range(num_blocks)
- }
-
- if num_blocks > 0:
- for j in range(config["layers_per_block"] + 1):
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
- paths = renew_resnet_paths(blocks[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
-
- if any("attn" in layer for layer in up_blocks[i]):
- num_attn = len(
- {".".join(shave_segments(layer, 3).split(".")[:3]) for layer in up_blocks[i] if "attn" in layer}
- )
- attns = {
- layer_id: [key for key in up_blocks[i] if f"attn.{layer_id}" in key] for layer_id in range(num_blocks)
- }
-
- if num_attn > 0:
- for j in range(config["layers_per_block"] + 1):
- replace_indices = {"old": f"up_blocks.{i}", "new": f"up_blocks.{block_id}"}
- paths = renew_attention_paths(attns[j])
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[replace_indices])
-
- new_checkpoint = {k.replace("mid_new_2", "mid_block"): v for k, v in new_checkpoint.items()}
- new_checkpoint["quant_conv.weight"] = checkpoint["quant_conv.weight"]
- new_checkpoint["quant_conv.bias"] = checkpoint["quant_conv.bias"]
- if "quantize.embedding.weight" in checkpoint:
- new_checkpoint["quantize.embedding.weight"] = checkpoint["quantize.embedding.weight"]
- new_checkpoint["post_quant_conv.weight"] = checkpoint["post_quant_conv.weight"]
- new_checkpoint["post_quant_conv.bias"] = checkpoint["post_quant_conv.bias"]
-
- return new_checkpoint
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
- )
-
- parser.add_argument(
- "--config_file",
- default=None,
- type=str,
- required=True,
- help="The config json file corresponding to the architecture.",
- )
-
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
-
- args = parser.parse_args()
- checkpoint = torch.load(args.checkpoint_path)
-
- with open(args.config_file) as f:
- config = json.loads(f.read())
-
- # unet case
- key_prefix_set = {key.split(".")[0] for key in checkpoint.keys()}
- if "encoder" in key_prefix_set and "decoder" in key_prefix_set:
- converted_checkpoint = convert_vq_autoenc_checkpoint(checkpoint, config)
- else:
- converted_checkpoint = convert_ddpm_checkpoint(checkpoint, config)
-
- if "ddpm" in config:
- del config["ddpm"]
-
- if config["_class_name"] == "VQModel":
- model = VQModel(**config)
- model.load_state_dict(converted_checkpoint)
- model.save_pretrained(args.dump_path)
- elif config["_class_name"] == "AutoencoderKL":
- model = AutoencoderKL(**config)
- model.load_state_dict(converted_checkpoint)
- model.save_pretrained(args.dump_path)
- else:
- model = UNet2DModel(**config)
- model.load_state_dict(converted_checkpoint)
-
- scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
-
- pipe = DDPMPipeline(unet=model, scheduler=scheduler)
- pipe.save_pretrained(args.dump_path)
diff --git a/spaces/declare-lab/tango/diffusers/tests/conftest.py b/spaces/declare-lab/tango/diffusers/tests/conftest.py
deleted file mode 100644
index 6a02a38163ab01b1c2d0d12d5578e06d91b77cc8..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/tests/conftest.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# tests directory-specific settings - this file is run automatically
-# by pytest before any tests are run
-
-import sys
-import warnings
-from os.path import abspath, dirname, join
-
-
-# allow having multiple repository checkouts and not needing to remember to rerun
-# 'pip install -e .[dev]' when switching between checkouts and running tests.
-git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
-sys.path.insert(1, git_repo_path)
-
-# silence FutureWarning warnings in tests since often we can't act on them until
-# they become normal warnings - i.e. the tests still need to test the current functionality
-warnings.simplefilter(action="ignore", category=FutureWarning)
-
-
-def pytest_addoption(parser):
- from diffusers.utils.testing_utils import pytest_addoption_shared
-
- pytest_addoption_shared(parser)
-
-
-def pytest_terminal_summary(terminalreporter):
- from diffusers.utils.testing_utils import pytest_terminal_summary_main
-
- make_reports = terminalreporter.config.getoption("--make-reports")
- if make_reports:
- pytest_terminal_summary_main(terminalreporter, id=make_reports)
diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/test_software_company.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/test_software_company.py
deleted file mode 100644
index 00538442c9790771e4ed4df8090cb1656f78e252..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/tests/metagpt/test_software_company.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/15 11:40
-@Author : alexanderwu
-@File : test_software_company.py
-"""
-import pytest
-
-from metagpt.logs import logger
-from metagpt.software_company import SoftwareCompany
-
-
-@pytest.mark.asyncio
-async def test_software_company():
- company = SoftwareCompany()
- company.start_project("做一个基础搜索引擎,可以支持知识库")
- history = await company.run(n_round=5)
- logger.info(history)
diff --git a/spaces/deprem-ml/deprem_satellite_semantic_whu/app.py b/spaces/deprem-ml/deprem_satellite_semantic_whu/app.py
deleted file mode 100644
index ee138c48feee56025434039cbbe75f4a79fe2bf2..0000000000000000000000000000000000000000
--- a/spaces/deprem-ml/deprem_satellite_semantic_whu/app.py
+++ /dev/null
@@ -1,169 +0,0 @@
-from transformers import SegformerForSemanticSegmentation
-from transformers import SegformerImageProcessor
-from PIL import Image
-import gradio as gr
-import numpy as np
-import random
-import cv2
-import torch
-
-from imutils import perspective
-
-
-def midpoint(ptA, ptB):
- return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
-# Load in image, convert to gray scale, and Otsu's threshold
-kernel1 =( np.ones((5,5), dtype=np.float32))
-blur_radius=0.5
-kernel_sharpening = np.array([[-1,-1,-1],
- [-1,9,-1],
- [-1,-1,-1]])*(1/9)
-
-
-def cca_analysis(image,predicted_mask):
-
- image2=np.asarray(image)
- print(image.shape)
- image = cv2.resize(predicted_mask, (image2.shape[1],image2.shape[1]), interpolation = cv2.INTER_AREA)
-
- image=cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel1,iterations=1 )
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
-
- labels=cv2.connectedComponents(thresh,connectivity=8)[1]
- a=np.unique(labels)
- count2=0
- for label in a:
- if label == 0:
- continue
-
- # Create a mask
- mask = np.zeros(thresh.shape, dtype="uint8")
- mask[labels == label] = 255
-
- # Find contours and determine contour area
- cnts,hieararch = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- cnts = cnts[0]
- c_area = cv2.contourArea(cnts)
- # threshhold for tooth count
- if c_area>100:
- count2+=1
-
- rect = cv2.minAreaRect(cnts)
- box = cv2.boxPoints(rect)
- box = np.array(box, dtype="int")
- box = perspective.order_points(box)
- color1 = (list(np.random.choice(range(150), size=3)))
- color =[int(color1[0]), int(color1[1]), int(color1[2])]
- cv2.drawContours(image2,[box.astype("int")],0,color,2)
- (tl,tr,br,bl)=box
-
- (tltrX,tltrY)=midpoint(tl,tr)
- (blbrX,blbrY)=midpoint(bl,br)
- # compute the midpoint between the top-left and top-right points,
- # followed by the midpoint between the top-righ and bottom-right
- (tlblX,tlblY)=midpoint(tl,bl)
- (trbrX,trbrY)=midpoint(tr,br)
- # draw the midpoints on the image
- cv2.circle(image2, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
- cv2.circle(image2, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
- cv2.circle(image2, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
- cv2.circle(image2, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
- cv2.line(image2, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),color, 2)
- cv2.line(image2, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),color, 2)
- return image2
-
-def to_rgb(img):
- result_new=np.zeros((img.shape[1],img.shape[0],3))
- result_new[:,:,0]=img
- result_new[:,:,1]=img
- result_new[:,:,2]=img
- result_new=np.uint8(result_new*255)
- return result_new
-
-image_list = [
- "data/1.png",
- "data/2.png",
- "data/3.png",
- "data/4.png",
-]
-
-model_path = ['deprem-ml/deprem_satellite_semantic_whu']
-
-def visualize_instance_seg_mask(mask):
- # Initialize image with zeros with the image resolution
- # of the segmentation mask and 3 channels
- image = np.zeros((mask.shape[0], mask.shape[1], 3))
-
- # Create labels
- labels = np.unique(mask)
- label2color = {
- label: (
- random.randint(0, 255),
- random.randint(0, 255),
- random.randint(0, 255),
- )
- for label in labels
- }
-
- for height in range(image.shape[0]):
- for width in range(image.shape[1]):
- image[height, width, :] = label2color[mask[height, width]]
-
- image = image / 255
- return image
-
-
-def Segformer_Segmentation(image_path, model_id,postpro):
- output_save = "output.png"
-
- test_image = cv2.imread(image_path)
-
- model = SegformerForSemanticSegmentation.from_pretrained(model_id)
- proccessor = SegformerImageProcessor(model_id)
-
- inputs = proccessor(images=test_image, return_tensors="pt")
- with torch.no_grad():
- outputs = model(**inputs)
-
- result = proccessor.post_process_semantic_segmentation(outputs)[0]
- result = np.array(result)
- if postpro=="Connected Components Labelling":
- result=to_rgb(result)
- result=cca_analysis(test_image,result)
- else:
- result = visualize_instance_seg_mask(result)
- result=result*255
-
- cv2.imwrite(output_save, result)
- return image_path, output_save
-
-examples = [[image_list[0], "deprem-ml/deprem_satellite_semantic_whu"],
- [image_list[1], "deprem-ml/deprem_satellite_semantic_whu"],
- [image_list[2], "deprem-ml/deprem_satellite_semantic_whu"],
- [image_list[3], "deprem-ml/deprem_satellite_semantic_whu"]]
-
-title = "Deprem ML - Segformer Semantic Segmentation"
-
-app = gr.Blocks()
-with app:
- gr.HTML("
{}
".format(title))
- with gr.Row():
- with gr.Column():
- input_video = gr.Image(type='filepath')
- model_id = gr.Dropdown(value=model_path[0], choices=model_path,label="Model Name")
- cca = gr.Dropdown(value="Connected Components Labelling", choices=["Connected Components Labelling","No Post Process"],label="Post Process")
-
- input_video_button = gr.Button(value="Predict")
-
- with gr.Column():
- output_orijinal_image = gr.Image(type='filepath')
-
- with gr.Column():
- output_mask_image = gr.Image(type='filepath')
-
-
- gr.Examples(examples, inputs=[input_video, model_id,cca], outputs=[output_orijinal_image, output_mask_image], fn=Segformer_Segmentation, cache_examples=True)
- input_video_button.click(Segformer_Segmentation, inputs=[input_video, model_id,cca], outputs=[output_orijinal_image, output_mask_image])
-
-app.launch(debug=True)
diff --git a/spaces/dfhhr4/QQsign/README.md b/spaces/dfhhr4/QQsign/README.md
deleted file mode 100644
index bd56881a2a7709591343e2f15af9a6a8133e115b..0000000000000000000000000000000000000000
--- a/spaces/dfhhr4/QQsign/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: QQsign
-emoji: 🦀
-colorFrom: blue
-colorTo: purple
-sdk: docker
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Ableton Live Suite 10.1.11 Keygen Crack Full Version.md b/spaces/diacanFperku/AutoGPT/Ableton Live Suite 10.1.11 Keygen Crack Full Version.md
deleted file mode 100644
index b44592f337ceb678510869934e21443819b06399..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Ableton Live Suite 10.1.11 Keygen Crack Full Version.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
Ableton Live Suite 10.1.11 Keygen Crack full version
-
-A:
-
-This happens when you run the code and have no data.
-
-In that case, we have 5 workers running the code and creating 5 data sets.
-
-Hence, you are running 5 times the code that uses the data already prepared.
-
-You need to start and clear your data before running it.
-
-You could do that in a few ways:
-
-Uninstall the PyTorch library
-
-Remove any previous data that could be interfering with this code.
-
-If you use PyCharm, in the project's properties, there is an option to delete the data.
-
-Just remove the data from the data folder.
-
-, f4 and f6 are omitted to obtain the output voltages of Dout4 and Dout6, respectively. In this circuit, it is enough that the number of transistors is small to save space and to reduce the power consumption.
-
-However, the circuit shown in FIG. 7 has a problem that when a sufficient number of outputs cannot be obtained, an inverting amplifier circuit such as the inverting amplifier circuit shown in FIG. 4 must be used. Therefore, the circuit shown in FIG. 7 has a problem in that the circuit scale is large and an inverting amplifier circuit having a large circuit scale is needed.Q:
-
-Are there any kind of physical law that says black holes always have a singularity inside?
-
-I am reading an article on black hole singularity and I get a bit confused about the topic. The article says "The singularity is hidden by a horizon, which never becomes the point itself, but in its place there is an event horizon." But, I also find this concept in another book, The Fabric of the Cosmos. It says "The black hole singularity is shielded from direct view by a very thin event horizon."
-
-So, are there any kind of physical law which says that, no matter what kind of black holes the universe have, the singularity is always inside the horizon?
-
-The singularity is the point where the gravitational field gets very strong. Even a small (negligible) local mass density will make the spacetime curvature diverge. Of course you can think of "small" in a different way, if you have an object with huge mass you can ignore the curvature there and it would still diverge. So any mass will make the curvature diverge, and hence any point will make the curvature diverge.
-
-To 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Doraemon Cartoon In Urdu Download Torrent TOP.md b/spaces/diacanFperku/AutoGPT/Doraemon Cartoon In Urdu Download Torrent TOP.md
deleted file mode 100644
index 6fad5ed5d7412e9c1b59d0c08465d2cd4cea1d80..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Doraemon Cartoon In Urdu Download Torrent TOP.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-How exactly do you download a torrent file...? View Entire Discussion (8 Comments). More posts from the Doraemon community. 4d29de3e1b
-
-
-
diff --git a/spaces/diagaiwei/ir_chinese_medqa/utility/rankings/tune.py b/spaces/diagaiwei/ir_chinese_medqa/utility/rankings/tune.py
deleted file mode 100644
index 035d1c075837beabf8d399eb9d2f85a9aafd58d4..0000000000000000000000000000000000000000
--- a/spaces/diagaiwei/ir_chinese_medqa/utility/rankings/tune.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import os
-import ujson
-import random
-
-from argparse import ArgumentParser
-from colbert.utils.utils import print_message, create_directory
-from utility.utils.save_metadata import save_metadata
-
-
-def main(args):
- AllMetrics = {}
- Scores = {}
-
- for path in args.paths:
- with open(path) as f:
- metric = ujson.load(f)
- AllMetrics[path] = metric
-
- for k in args.metric:
- metric = metric[k]
-
- assert type(metric) is float
- Scores[path] = metric
-
- MaxKey = max(Scores, key=Scores.get)
-
- MaxCKPT = int(MaxKey.split('/')[-2].split('.')[-1])
- MaxARGS = os.path.join(os.path.dirname(MaxKey), 'logs', 'args.json')
-
- with open(MaxARGS) as f:
- logs = ujson.load(f)
- MaxCHECKPOINT = logs['checkpoint']
-
- assert MaxCHECKPOINT.endswith(f'colbert-{MaxCKPT}.dnn'), (MaxCHECKPOINT, MaxCKPT)
-
- with open(args.output, 'w') as f:
- f.write(MaxCHECKPOINT)
-
- args.Scores = Scores
- args.AllMetrics = AllMetrics
-
- save_metadata(f'{args.output}.meta', args)
-
- print('\n\n', args, '\n\n')
- print(args.output)
- print_message("#> Done.")
-
-
-if __name__ == "__main__":
- random.seed(12345)
-
- parser = ArgumentParser(description='.')
-
- # Input / Output Arguments
- parser.add_argument('--metric', dest='metric', required=True, type=str) # e.g., success.20
- parser.add_argument('--paths', dest='paths', required=True, type=str, nargs='+')
- parser.add_argument('--output', dest='output', required=True, type=str)
-
- args = parser.parse_args()
-
- args.metric = args.metric.split('.')
-
- assert not os.path.exists(args.output), args.output
- create_directory(os.path.dirname(args.output))
-
- main(args)
diff --git a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/transcribe_genshin.py
deleted file mode 100644
index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/transcribe_genshin.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# coding=gbk
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-
-import soundfile
-from scipy.io import wavfile
-from tqdm import tqdm
-
-global speaker_annos
-speaker_annos = []
-
-def process(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=args.sr)
- soundfile.write(
- os.path.join(args.out_dir, speaker, wav_name),
- wav,
- sr
- )
-
-def process_text(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- global speaker_annos
- tr_name = wav_name.replace('.wav', '')
- with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file:
- text = file.read()
- text = text.replace("{NICKNAME}",'')
- text = text.replace("{M#}{F#}",'')
- text = text.replace("{M#}{F#}",'')
- substring = "{M#}{F#}"
- if substring in text:
- if tr_name.endswith("a"):
- text = text.replace("{M#}{F#}",'')
- if tr_name.endswith("b"):
- text = text.replace("{M#}{F#}",'')
- text = text.replace("#",'')
- text = "ZH|" + text + "\n" #
- speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text)
-
-
-
-if __name__ == "__main__":
- parent_dir = "./genshin_dataset/"
- speaker_names = list(os.walk(parent_dir))[0][1]
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir")
- parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir")
- args = parser.parse_args()
- # processs = 8
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
- for i in os.listdir(spk_dir):
- if i.endswith("wav"):
- pro=(spk_dir, i, args)
- process_text(pro)
- if len(speaker_annos) == 0:
- print("transcribe error!!!")
- with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f:
- for line in speaker_annos:
- f.write(line)
- print("transcript file finished.")
diff --git a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/monotonic_align/core.py b/spaces/digitalxingtong/Nailv-read-Bert-Vits2/monotonic_align/core.py
deleted file mode 100644
index 5ff728cd74c9228346a82ec64a9829cb98ad315e..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Nailv-read-Bert-Vits2/monotonic_align/core.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import numba
-
-
-@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
- nopython=True, nogil=True)
-def maximum_path_jit(paths, values, t_ys, t_xs):
- b = paths.shape[0]
- max_neg_val = -1e9
- for i in range(int(b)):
- path = paths[i]
- value = values[i]
- t_y = t_ys[i]
- t_x = t_xs[i]
-
- v_prev = v_cur = 0.0
- index = t_x - 1
-
- for y in range(t_y):
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
- if x == y:
- v_cur = max_neg_val
- else:
- v_cur = value[y - 1, x]
- if x == 0:
- if y == 0:
- v_prev = 0.
- else:
- v_prev = max_neg_val
- else:
- v_prev = value[y - 1, x - 1]
- value[y, x] += max(v_prev, v_cur)
-
- for y in range(t_y - 1, -1, -1):
- path[y, index] = 1
- if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
- index = index - 1
\ No newline at end of file
diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/models.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/models.py
deleted file mode 100644
index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Nanami-Bert-VITS2/models.py
+++ /dev/null
@@ -1,707 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-
-from commons import init_weights, get_padding
-from text import symbols, num_tones, num_languages
-class DurationDiscriminator(nn.Module): #vits2
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.dur_proj = nn.Conv1d(1, filter_channels, 1)
-
- self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
- self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- self.output_layer = nn.Sequential(
- nn.Linear(filter_channels, 1),
- nn.Sigmoid()
- )
-
- def forward_probability(self, x, x_mask, dur, g=None):
- dur = self.dur_proj(dur)
- x = torch.cat([x, dur], dim=1)
- x = self.pre_out_conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.pre_out_norm_1(x)
- x = self.drop(x)
- x = self.pre_out_conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.pre_out_norm_2(x)
- x = self.drop(x)
- x = x * x_mask
- x = x.transpose(1, 2)
- output_prob = self.output_layer(x)
- return output_prob
-
- def forward(self, x, x_mask, dur_r, dur_hat, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
-
- output_probs = []
- for dur in [dur_r, dur_hat]:
- output_prob = self.forward_probability(x, x_mask, dur, g)
- output_probs.append(output_prob)
-
- return output_probs
-
-class TransformerCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- n_flows=4,
- gin_channels=0,
- share_parameter=False
- ):
-
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
-
- self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None
-
- for i in range(n_flows):
- self.flows.append(
- modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
- logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=0):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
- self.emb = nn.Embedding(len(symbols), hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
- self.tone_emb = nn.Embedding(num_tones, hidden_channels)
- nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5)
- self.language_emb = nn.Embedding(num_languages, hidden_channels)
- nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5)
- self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=self.gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, tone, language, bert, g=None):
- x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask, g=g)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
- gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
- k, u, padding=(k - u) // 2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-class ReferenceEncoder(nn.Module):
- '''
- inputs --- [N, Ty/r, n_mels*r] mels
- outputs --- [N, ref_enc_gru_size]
- '''
-
- def __init__(self, spec_channels, gin_channels=0):
-
- super().__init__()
- self.spec_channels = spec_channels
- ref_enc_filters = [32, 32, 64, 64, 128, 128]
- K = len(ref_enc_filters)
- filters = [1] + ref_enc_filters
- convs = [weight_norm(nn.Conv2d(in_channels=filters[i],
- out_channels=filters[i + 1],
- kernel_size=(3, 3),
- stride=(2, 2),
- padding=(1, 1))) for i in range(K)]
- self.convs = nn.ModuleList(convs)
- # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)])
-
- out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
- self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels,
- hidden_size=256 // 2,
- batch_first=True)
- self.proj = nn.Linear(128, gin_channels)
-
- def forward(self, inputs, mask=None):
- N = inputs.size(0)
- out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
- for conv in self.convs:
- out = conv(out)
- # out = wn(out)
- out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
-
- out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
- T = out.size(1)
- N = out.size(0)
- out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
-
- self.gru.flatten_parameters()
- memory, out = self.gru(out) # out --- [1, N, 128]
-
- return self.proj(out.squeeze(0))
-
- def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
- for i in range(n_convs):
- L = (L - kernel_size + 2 * pad) // stride + 1
- return L
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=256,
- gin_channels=256,
- use_sdp=True,
- n_flow_layer = 4,
- n_layers_trans_flow = 3,
- flow_share_parameter = False,
- use_transformer_flow = True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
- self.n_layers_trans_flow = n_layers_trans_flow
- self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True)
- self.use_sdp = use_sdp
- self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False)
- self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01)
- self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6)
- self.current_mas_noise_scale = self.mas_noise_scale_initial
- if self.use_spk_conditioned_encoder and gin_channels > 0:
- self.enc_gin_channels = gin_channels
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=self.enc_gin_channels)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
- gin_channels=gin_channels)
- if use_transformer_flow:
- self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)
- else:
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)
- self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers >= 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
- else:
- self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),
- s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
- if self.use_noise_scaled_mas:
- epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale
- neg_cent = neg_cent + epsilon
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
-
- l_length_sdp = self.sdp(x, x_mask, w, g=g)
- l_length_sdp = l_length_sdp / torch.sum(x_mask)
-
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging
-
- l_length = l_length_dp + l_length_sdp
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)
-
- def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):
- #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)
- # g = self.gst(y)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)
- logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
- 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:, :, :max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
diff --git a/spaces/dineshreddy/WALT/mmdet/core/mask/utils.py b/spaces/dineshreddy/WALT/mmdet/core/mask/utils.py
deleted file mode 100644
index c88208291ab2a605bee9fe6c1a28a443b74c6372..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/core/mask/utils.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import mmcv
-import numpy as np
-import pycocotools.mask as mask_util
-
-
-def split_combined_polys(polys, poly_lens, polys_per_mask):
- """Split the combined 1-D polys into masks.
-
- A mask is represented as a list of polys, and a poly is represented as
- a 1-D array. In dataset, all masks are concatenated into a single 1-D
- tensor. Here we need to split the tensor into original representations.
-
- Args:
- polys (list): a list (length = image num) of 1-D tensors
- poly_lens (list): a list (length = image num) of poly length
- polys_per_mask (list): a list (length = image num) of poly number
- of each mask
-
- Returns:
- list: a list (length = image num) of list (length = mask num) of \
- list (length = poly num) of numpy array.
- """
- mask_polys_list = []
- for img_id in range(len(polys)):
- polys_single = polys[img_id]
- polys_lens_single = poly_lens[img_id].tolist()
- polys_per_mask_single = polys_per_mask[img_id].tolist()
-
- split_polys = mmcv.slice_list(polys_single, polys_lens_single)
- mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
- mask_polys_list.append(mask_polys)
- return mask_polys_list
-
-
-# TODO: move this function to more proper place
-def encode_mask_results(mask_results):
- """Encode bitmap mask to RLE code.
-
- Args:
- mask_results (list | tuple[list]): bitmap mask results.
- In mask scoring rcnn, mask_results is a tuple of (segm_results,
- segm_cls_score).
-
- Returns:
- list | tuple: RLE encoded mask.
- """
- if isinstance(mask_results, tuple): # mask scoring
- cls_segms, cls_mask_scores = mask_results
- else:
- cls_segms = mask_results
- num_classes = len(cls_segms)
- encoded_mask_results = [[] for _ in range(num_classes)]
- for i in range(len(cls_segms)):
- for cls_segm in cls_segms[i]:
- encoded_mask_results[i].append(
- mask_util.encode(
- np.array(
- cls_segm[:, :, np.newaxis], order='F',
- dtype='uint8'))[0]) # encoded with RLE
- if isinstance(mask_results, tuple):
- return encoded_mask_results, cls_mask_scores
- else:
- return encoded_mask_results
diff --git a/spaces/dteam/chatgpt-dteam/bin_public/utils/Pinecone.py b/spaces/dteam/chatgpt-dteam/bin_public/utils/Pinecone.py
deleted file mode 100644
index 1d941acfe575e849f24b38fb868dcb1737effc17..0000000000000000000000000000000000000000
--- a/spaces/dteam/chatgpt-dteam/bin_public/utils/Pinecone.py
+++ /dev/null
@@ -1,177 +0,0 @@
-from loguru import logger
-import json
-from bin_public.utils.utils_db import *
-from bin_public.config.presets import MIGRAINE_PROMPT
-import PyPDF2
-import pinecone
-from langchain.vectorstores import Pinecone
-from langchain.embeddings.openai import OpenAIEmbeddings
-from langchain.text_splitter import RecursiveCharacterTextSplitter
-
-
-PINECONE_API_KEY = os.environ['PINECONE_API_KEY']
-PINECONE_API_ENV = os.environ['PINECONE_API_ENV']
-
-
-def load_local_file_PDF(path, file_name):
- result = {}
- temp = ''
- pdf_reader = PyPDF2.PdfReader(open(path, 'rb'))
- for i in range(len(pdf_reader.pages)):
- pages = pdf_reader.pages[i]
- temp += pages.extract_text()
- if file_name.endswith('.pdf'):
- index = file_name[:-4]
- temp = temp.replace('\n', '').replace('\t', '')
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
- texts = text_splitter.split_text(temp)
- i = 0
- for content in texts:
- result[f'{index}_{i}'] = content
- i += 1
- return result
-
-
-def holo_query_insert_file_contents(file_name, file_content):
- run_sql = f"""
- insert into s_context(
- file_name,
- content
- )
- select
- '{file_name}' as file_name,
- '{file_content}' as content
- """
- holo_query_func(run_sql, is_query=0)
-
-
-def holo_query_get_content(run_sql):
- temp = []
- data = holo_query_func(run_sql, is_query=1)
- for i in data:
- temp.append(i[1].replace('\n', '').replace('\t', ''))
- return temp
-
-
-def pdf2database(path, file_name):
- temp = ''
- pdf_reader = PyPDF2.PdfReader(open(path, 'rb'))
- for i in range(len(pdf_reader.pages)):
- pages = pdf_reader.pages[i]
- temp += pages.extract_text()
- if file_name.endswith('.pdf'):
- index = file_name[:-4]
- temp = temp.replace('\n', '').replace('\t', '')
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
- texts = text_splitter.split_text(temp)
- for i in range(len(texts)):
- holo_query_insert_file_contents(f'{index}_{i}', f'{texts[i]}')
- logger.info(f'{index}_{i} stored')
-
-
-def load_json(path):
- with open(path, 'r', encoding='utf-8') as f:
- data = json.load(f)
- return data
-
-
-def get_content_from_json(path):
- result = []
- data = load_json(path)
- for item in data:
- key = list(item.keys())[0]
- value = item[key]
- result.append(key + ',' + value)
- return result
-
-def data2embeddings(index_name, data, embeddings):
- pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)
- Pinecone.from_texts([t for t in data], embeddings, index_name=index_name)
- logger.info("Stored Successfully")
-
-
-def context_construction(api_key, query, model, pinecone_api_key, pinecone_api_env, temperature, index_name, mode="map_reduce"):
- temp = []
- embeddings = OpenAIEmbeddings(openai_api_key=api_key)
- # llm = OpenAI(temperature=temperature, openai_api_key=api_key, model_name=model)
- pinecone.init(api_key=pinecone_api_key, environment=pinecone_api_env)
- docsearch = Pinecone.from_existing_index(index_name=index_name, embedding=embeddings)
- # chain = load_qa_chain(llm, chain_type=mode)
- if not any(char.isalnum() for char in query):
- return " ", MIGRAINE_PROMPT, "Connecting to Pinecone"
- else:
- docs = docsearch.similarity_search(query, include_metadata=True, k=2)
- # response = chain.run(input_documents=docs, question=str(query))
- for i in docs:
- temp.append(i.page_content)
- return '用以下资料进行辅助回答\n' + ' '.join(temp), '\n' + ' '.join(temp), "Connecting to Pinecone"
-
-def chat_prerequisites(input, filter, embeddings, top_k=4):
- # filter : dic
-
- # input_prompt = '只基于以下规范的两种分类对形如 "position_name: xx job_name: xx job_description: xxx"的描述进行分类,只要回复规范的类别名'
- input_prompt = '接下来我会给你一段"不规范的招聘职位描述",以及4个用(选项一,选项二,选项三,选项四)四个选项表示的规范的职业分类描述。' \
- '你需要将"不规范的招聘职位描述"归类为”选项一“或“选项二”或“选项三”或“选项四”。' \
- '你只需要回复”选项一“或“选项二”或“选项三”或“选项四”,不要回复任何别的东西'
- query = input_prompt + input
-
- temp = []
- docsearch = Pinecone.from_existing_index(index_name=pinecone.list_indexes()[0], embedding=embeddings)
- docs = docsearch.similarity_search(query, k=top_k, filter=filter)
- for index, i in enumerate(docs):
- if index == 0:
- temp.append("选项一:" + i.page_content + "##")
- if index == 1:
- temp.append("选项二:" + i.page_content + "##")
- if index == 2:
- temp.append("选项三:" + i.page_content + "##")
- if index == 3:
- temp.append("选项四:" + i.page_content + "##")
-
- system_prompt = ' '.join(temp)
-
- return system_prompt, query
-
-
-def chat(input, filter, embeddings):
- system_prompt, query = chat_prerequisites(input, filter, embeddings)
- logger.info('prerequisites satisfied')
- completion = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": system_prompt},
- {"role": "user", "content": query}
- ])
- return completion.choices[0].message['content'], system_prompt
-
-def chat_data_cleaning(input):
- clean_prompt = '我要求你提取出这段文字中的岗位名称、岗位描述(用一句或者两句话概括),去除无关紧要的信息,比如工资,地点等等,并严格遵守"岗位名称: xxx # 岗位描述: xxx # "的格式进行回复'
- completion = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": clean_prompt},
- {"role": "user", "content": clean_prompt + input}
- ])
- return completion.choices[0].message['content']
-
-def local_emb2pinecone(PINECONE_API_KEY, PINECONE_API_ENV, level, emb_path, text_path, delete=False):
- pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV)
- logger.info('Pinecone initialized')
- logger.info(pinecone.list_indexes()[0])
- l = load_json(emb_path)
- print(f'level{level} loaded')
- with open(text_path, 'r', encoding='utf-8') as f:
- texts = f.readlines()
- texts = [i.replace('\n', '') for i in texts]
- index = pinecone.Index(pinecone.list_indexes()[0])
- if delete:
- if input('press y to delete all the vectors: ') == 'y':
- index.delete(delete_all=True)
- logger.info('delete all')
- else:
- pass
- else:
- pass
- for key, value, text in zip(list(l.keys()), list(l.values()), texts):
- index.upsert([(key, value, {"text": text, "level": level})])
- logger.info('upload successfully')
\ No newline at end of file
diff --git a/spaces/emc348/faces-through-time/training/coaches/multi_id_coach.py b/spaces/emc348/faces-through-time/training/coaches/multi_id_coach.py
deleted file mode 100644
index 0a1b40bb66114895b39124d3e119bfb2478b378d..0000000000000000000000000000000000000000
--- a/spaces/emc348/faces-through-time/training/coaches/multi_id_coach.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import os
-
-import torch
-from tqdm import tqdm
-from color_transfer_loss import ColorTransferLoss
-
-from configs import paths_config, hyperparameters, global_config
-from training.coaches.base_coach import BaseCoach
-from utils.log_utils import log_images_from_w
-
-
-class MultiIDCoach(BaseCoach):
- def __init__(self, data_loader, in_year, use_wandb):
- super().__init__(data_loader, in_year, use_wandb)
-
- def train(self):
- self.G.synthesis.train()
- self.G.mapping.train()
-
- w_path_dir = f"{paths_config.embedding_base_dir}/{paths_config.input_data_id}"
- os.makedirs(w_path_dir, exist_ok=True)
- os.makedirs(f"{w_path_dir}/{paths_config.pti_results_keyword}", exist_ok=True)
-
- use_ball_holder = True
- w_pivots = []
- images = []
-
- for fname, image in self.data_loader:
- if self.image_counter >= hyperparameters.max_images_to_invert:
- break
-
- image_name = fname[0]
- if hyperparameters.first_inv_type == "w+":
- embedding_dir = (
- f"{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}"
- )
- else:
- embedding_dir = (
- f"{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}"
- )
- os.makedirs(embedding_dir, exist_ok=True)
-
- w_pivot = self.get_inversion(w_path_dir, image_name, image)
- w_pivots.append(w_pivot)
- images.append((image_name, image))
- self.image_counter += 1
-
- for i in tqdm(range(hyperparameters.max_pti_steps)):
- self.image_counter = 0
-
- for data, w_pivot in zip(images, w_pivots):
- image_name, image = data
-
- if self.image_counter >= hyperparameters.max_images_to_invert:
- break
-
- real_images_batch = image.to(global_config.device)
-
- generated_images = self.forward(w_pivot)
-
- loss, l2_loss_val, loss_lpips = self.calc_loss(
- generated_images,
- real_images_batch,
- image_name,
- self.G,
- use_ball_holder,
- w_pivot,
- {},
- )
-
- self.optimizer.zero_grad()
- loss.backward()
- self.optimizer.step()
-
- use_ball_holder = (
- global_config.training_step
- % hyperparameters.locality_regularization_interval
- == 0
- )
-
- global_config.training_step += 1
- self.image_counter += 1
-
- if self.use_wandb:
- log_images_from_w(w_pivots, self.G, [image[0] for image in images])
-
- torch.save(
- self.G,
- f"{paths_config.checkpoints_dir}/model_{global_config.run_name}_multi_id.pt",
- )
diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/tokenizer/train_tokenizer.py b/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/tokenizer/train_tokenizer.py
deleted file mode 100644
index 14a8e29348b141f29f0afe6e52fda46da5210b85..0000000000000000000000000000000000000000
--- a/spaces/eson/tokenizer-arena/vocab/gpt_neox_chinese_v1/tokenizer/train_tokenizer.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2021, EleutherAI
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Assumes a dataset of jsonl files in the same format as the neox training set.
-"""
-
-from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers
-from tokenizers.normalizers import NFKC
-
-from glob import glob
-import os
-import json
-import argparse
-
-
-def load_jsonl(input_path, quiet=True) -> list:
- """
- Read list of objects from a JSON lines file.
- """
- data = []
- with open(input_path, "r", encoding="utf-8") as f:
- for line in f:
- data.append(json.loads(line.rstrip("\n|\r")))
- if not quiet:
- print("Loaded {} records from {}".format(len(data), input_path))
- return data
-
-
-def json_iterator(input_dir, text_key="text"):
- all_jsonls = glob(f"{input_dir}/*.jsonl") + glob(f"{input_dir}/*.json")
- for j in all_jsonls:
- data = load_jsonl(j)
- for doc in data:
- yield doc[text_key]
-
-
-def train_tokenizer(
- input_dir: str, save_path: str, tokenizer_type: str = "BPE", vocab_size: int = 52000
-):
- """
- Trains a tokenizer on all the json files in `input_dir` and saves it to `save_path`
-
- :param input_dir: input directory containing jsonl files
- :param save_path: path to save tokenizer to
- :param tokenizer_type: type of tokenizer to train.
- :param vocab_size: int, size of tokenizer's vocab
- :return:
- """
-
- if tokenizer_type == "BPE":
- model = models.BPE()
- else:
- raise NotImplementedError(f"Tokenizer type {tokenizer_type} not implemented")
- tokenizer = Tokenizer(model)
-
- # Customize pre-tokenization and decoding
- tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
- tokenizer.decoder = decoders.ByteLevel()
- tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
- tokenizer.normalizer = NFKC()
-
- # And then train
- trainer = trainers.BpeTrainer(
- vocab_size=vocab_size, special_tokens=["<|endoftext|>", "<|padding|>"]
- )
- tokenizer.train_from_iterator(json_iterator(input_dir), trainer)
-
- # And Save it
- tokenizer.save(save_path, pretty=True)
- print(f"Tokenizer saved at {save_path}")
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description="script for training a multilingual "
- "HF tokenizer on CC dumps with upweighting for low resource languages"
- )
- parser.add_argument(
- "--json_input_dir",
- type=str,
- help="Path to folder containing tokenizer training data in jsonl format",
- )
- parser.add_argument(
- "--tokenizer_output_path",
- type=str,
- help="Path to which your trained tokenizer will be saved (should end in .json)",
- )
- parser.add_argument(
- "--tokenizer_type",
- type=str,
- help="type of tokenizer to train, currently only BPE is supported",
- choices=["BPE"],
- default=["BPE"],
- )
- parser.add_argument(
- "-v",
- "--vocab_size",
- help="vocabulary size of tokenizer, default=52k",
- type=int,
- default=52000,
- )
- return parser.parse_args()
-
-
-if __name__ == "__main__":
-
- args = parse_args()
-
- train_tokenizer(
- args.json_input_dir,
- save_path=args.tokenizer_output_path,
- tokenizer_type=args.tokenizer_type,
- vocab_size=args.vocab_size,
- )
diff --git a/spaces/facebook/MusicGen/tests/modules/test_seanet.py b/spaces/facebook/MusicGen/tests/modules/test_seanet.py
deleted file mode 100644
index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000
--- a/spaces/facebook/MusicGen/tests/modules/test_seanet.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from itertools import product
-
-import pytest
-import torch
-
-from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock
-from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d
-
-
-class TestSEANetModel:
-
- def test_base(self):
- encoder = SEANetEncoder()
- decoder = SEANetDecoder()
-
- x = torch.randn(1, 1, 24000)
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def test_causal(self):
- encoder = SEANetEncoder(causal=True)
- decoder = SEANetDecoder(causal=True)
- x = torch.randn(1, 1, 24000)
-
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def test_conv_skip_connection(self):
- encoder = SEANetEncoder(true_skip=False)
- decoder = SEANetDecoder(true_skip=False)
-
- x = torch.randn(1, 1, 24000)
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def test_seanet_encoder_decoder_final_act(self):
- encoder = SEANetEncoder(true_skip=False)
- decoder = SEANetDecoder(true_skip=False, final_activation='Tanh')
-
- x = torch.randn(1, 1, 24000)
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str):
- n_blocks = 0
- for layer in encoder.model:
- if isinstance(layer, StreamableConv1d):
- n_blocks += 1
- assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm
- elif isinstance(layer, SEANetResnetBlock):
- for resnet_layer in layer.block:
- if isinstance(resnet_layer, StreamableConv1d):
- # here we add + 1 to n_blocks as we increment n_blocks just after the block
- assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm
-
- def test_encoder_disable_norm(self):
- n_residuals = [0, 1, 3]
- disable_blocks = [0, 1, 2, 3, 4, 5, 6]
- norms = ['weight_norm', 'none']
- for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
- encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm,
- disable_norm_outer_blocks=disable_blocks)
- self._check_encoder_blocks_norm(encoder, disable_blocks, norm)
-
- def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str):
- n_blocks = 0
- for layer in decoder.model:
- if isinstance(layer, StreamableConv1d):
- n_blocks += 1
- assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
- elif isinstance(layer, StreamableConvTranspose1d):
- n_blocks += 1
- assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
- elif isinstance(layer, SEANetResnetBlock):
- for resnet_layer in layer.block:
- if isinstance(resnet_layer, StreamableConv1d):
- assert resnet_layer.conv.norm_type == 'none' \
- if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
-
- def test_decoder_disable_norm(self):
- n_residuals = [0, 1, 3]
- disable_blocks = [0, 1, 2, 3, 4, 5, 6]
- norms = ['weight_norm', 'none']
- for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
- decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm,
- disable_norm_outer_blocks=disable_blocks)
- self._check_decoder_blocks_norm(decoder, disable_blocks, norm)
-
- def test_disable_norm_raises_exception(self):
- # Invalid disable_norm_outer_blocks values raise exceptions
- with pytest.raises(AssertionError):
- SEANetEncoder(disable_norm_outer_blocks=-1)
-
- with pytest.raises(AssertionError):
- SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
-
- with pytest.raises(AssertionError):
- SEANetDecoder(disable_norm_outer_blocks=-1)
-
- with pytest.raises(AssertionError):
- SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
diff --git a/spaces/falterWliame/Face_Mask_Detection/Mafia2joesadventuresdlcfree [PORTABLE]downloadpc.md b/spaces/falterWliame/Face_Mask_Detection/Mafia2joesadventuresdlcfree [PORTABLE]downloadpc.md
deleted file mode 100644
index 9a9f2a500a56ca7a21ffc5e36ba3376d09879335..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Mafia2joesadventuresdlcfree [PORTABLE]downloadpc.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
Mafia 2 Joe's Adventures DLC Free Download PC: How to Get It and What to Expect
-
-
If you are a fan of Mafia 2, you might be interested in downloading the free DLC called Joe's Adventures. This add-on lets you play as Joe, Vito's best friend, and find out what happened with the Clemente family while Vito was in prison. You will also get to explore new locations, drive new cars, wear new clothes, and compete for high scores in arcade-style missions.
-
-
But how can you get this DLC for free on your PC? And what are the features and benefits of playing it? Here are some answers to these questions.
How to Download Mafia 2 Joe's Adventures DLC for Free on PC
-
-
There are several websites that offer free downloads of Mafia 2 Joe's Adventures DLC for PC, such as Free Download Manager, Software Informer, and Archive.org. However, before you download anything from these sources, make sure you have a reliable antivirus software installed on your computer and scan the files for any malware or viruses. Also, be aware that some of these websites may contain ads or pop-ups that could be annoying or harmful.
-
-
Another way to download Mafia 2 Joe's Adventures DLC for free on PC is to use a torrent client, such as uTorrent or BitTorrent. You can find torrent links for this DLC on various websites, such as Video Game Mods or Peatix. However, using torrents can be risky, as they may contain illegal or pirated content, or expose you to cyberattacks or legal issues. Therefore, we do not recommend this method unless you know what you are doing and take full responsibility for your actions.
-
-
The safest and easiest way to download Mafia 2 Joe's Adventures DLC for free on PC is to use Steam, the popular digital distribution platform for games. Steam offers this DLC as a free update for anyone who owns Mafia 2 on their library. All you need to do is to launch Steam, go to your library, right-click on Mafia 2, select Properties, go to the DLC tab, and check the box next to Joe's Adventures. Then, Steam will automatically download and install the DLC for you. You can also access the DLC from the main menu of Mafia 2 by selecting Joe's Adventures.
-
-
What to Expect from Mafia 2 Joe's Adventures DLC
-
-
Mafia 2 Joe's Adventures DLC is a fun and engaging add-on that expands the story and gameplay of Mafia 2. Here are some of the features and benefits of playing it:
-
-
-
You will get to play as Joe, a charismatic and ruthless character who has his own style and personality. You will experience his side of the story and his loyalty to Vito.
-
You will get to explore new locations in Empire Bay, such as a train station, a cathouse, a boat yard, and a seasonally changing lakefront. You will also see some never-before-seen buildings and landmarks.
-
You will get to drive new cars from the '50s, such as hot rods, sports cars, and motorcycles. You will also get to wear new clothes that suit Joe's taste and attitude.
-
You will get to enjoy new music from the era, such as rockabilly, doo-wop, and blues. You will also find new Playboy magazines hidden throughout the city.
-
You will get to complete new missions that are more action-packed and arcade-style than the main game. You will have to shoot, chase, race, d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Bus Simulator Indonesia APK How to Install and Play on Your Phone.md b/spaces/fatiXbelha/sd/Bus Simulator Indonesia APK How to Install and Play on Your Phone.md
deleted file mode 100644
index 500a033726935cd1c281d8e4037153466727d738..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Bus Simulator Indonesia APK How to Install and Play on Your Phone.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
Bus Simulator Indonesia APK: Everything You Need to Know
-
If you are looking for a fun and realistic bus driving game on your Android device, you might want to check out Bus Simulator Indonesia APK. This game will let you experience what it is like to be a bus driver in Indonesia, with its unique features and authentic environment. In this article, we will tell you everything you need to know about Bus Simulator Indonesia APK, including what it is, what are its features, and how to download and install it on your device.
-
What is Bus Simulator Indonesia?
-
Bus Simulator Indonesia, or BUSSID for short, is a mobile game developed by Maleo, an Indonesian game developer. It was released in 2017 and has since gained millions of downloads and positive reviews from players all over the world. BUSSID is not the first bus simulator game on the market, but it is probably one of the only ones that focuses on the Indonesian bus culture and environment. It aims to provide a fun and authentic experience for bus enthusiasts and casual gamers alike.
BUSSID has many features that make it stand out from other bus simulator games. Here are some of them:
-
Design your own livery
-
One of the most unique features of BUSSID is that you can design your own livery for your bus. You can customize the color, pattern, logo, and even the interior of your bus. You can also share your livery with other players online or download their liveries for your own use.
-
Easy and intuitive control
-
BUSSID has a very easy and intuitive control system that lets you drive your bus smoothly and comfortably. You can choose between tilt, steering wheel, or buttons to control your bus. You can also adjust the sensitivity and feedback of the controls according to your preference.
-
Authentic Indonesian environment
-
BUSSID features authentic Indonesian cities and places that you can explore with your bus. You can drive through Jakarta, Surabaya, Bali, Sumatra, and many other locations. You can also see famous landmarks, buildings, monuments, and scenery along the way. The game also has realistic traffic, weather, day and night cycle, and passengers that add to the immersion.
-
Cool and fun honks
-
BUSSID lets you honk your horn in a cool and fun way. You can choose from various types of horns, such as air horns, musical horns, or even the famous "Om Telolet Om" horn that became a viral sensation in Indonesia. You can also hear other buses honking their horns as well.
-
High quality and detailed graphics
-
BUSSID has high quality and detailed 3D graphics that make the game look stunning and realistic. The game also runs smoothly on most devices without lagging or crashing. You can also adjust the graphics settings according to your device's performance.
-
No obstructive ads while driving
-
BUSSID does not have any annoying or obstructive ads while you are driving your bus. The only ads that you will see are on the billboards along the road, which add to the realism of the game. You can also support the developer by watching optional video ads to get rewards.
-
bus simulator indonesia apk todo mod
-bus simulator indonesia apk todo download
-bus simulator indonesia apk todo free
-bus simulator indonesia apk todo latest version
-bus simulator indonesia apk todo update
-bus simulator indonesia apk todo hack
-bus simulator indonesia apk todo offline
-bus simulator indonesia apk todo online
-bus simulator indonesia apk todo unlimited money
-bus simulator indonesia apk todo 2023
-bus simulator indonesia apk todo 3d
-bus simulator indonesia apk todo maleo
-bus simulator indonesia apk todo android
-bus simulator indonesia apk todo ios
-bus simulator indonesia apk todo pc
-bus simulator indonesia apk todo windows
-bus simulator indonesia apk todo mac
-bus simulator indonesia apk todo linux
-bus simulator indonesia apk todo review
-bus simulator indonesia apk todo gameplay
-bus simulator indonesia apk todo features
-bus simulator indonesia apk todo tips
-bus simulator indonesia apk todo tricks
-bus simulator indonesia apk todo cheats
-bus simulator indonesia apk todo guide
-bus simulator indonesia apk todo tutorial
-bus simulator indonesia apk todo install
-bus simulator indonesia apk todo xapk
-bus simulator indonesia apk todo obb
-bus simulator indonesia apk todo data
-bus simulator indonesia apk todo file
-bus simulator indonesia apk todo size
-bus simulator indonesia apk todo requirements
-bus simulator indonesia apk todo graphics
-bus simulator indonesia apk todo sound
-bus simulator indonesia apk todo music
-bus simulator indonesia apk todo controls
-bus simulator indonesia apk todo camera
-bus simulator indonesia apk todo modes
-bus simulator indonesia apk todo maps
-bus simulator indonesia apk todo routes
-bus simulator indonesia apk todo cities
-bus simulator indonesia apk todo traffic
-bus simulator indonesia apk todo passengers
-bus simulator indonesia apk todo buses
-bus simulator indonesia apk todo skins
-bus simulator indonesia apk todo mods
-bus simulator indonesia apk todo vehicles
-bus simulator indonesia apk todo customization
-
Leaderboard and online multiplayer
-
BUSSID has a leaderboard system that lets you compete with other players around the world based on your score, distance, speed, and fuel consumption. You can also join online multiplayer convoys with other players and drive together in real time.
-
Vehicle mod system
-
BU
BUSSID also has a vehicle mod system that lets you add new buses or other vehicles to the game. You can download mods from the internet or create your own using the mod tools provided by the developer. You can also share your mods with other players online or download their mods for your own use.
-
How to download and install Bus Simulator Indonesia APK?
-
If you want to play BUSSID on your Android device, you will need to download and install the APK file of the game. APK stands for Android Package Kit, which is a file format that contains all the necessary files and data for an Android application. Here are the requirements and steps to download and install BUSSID APK on your device:
-
Requirements for Bus Simulator Indonesia APK
-
Before you download and install BUSSID APK, you will need to make sure that your device meets the following requirements:
-
-
Your device must have Android 4.2 or higher operating system.
-
Your device must have at least 1 GB of RAM and 500 MB of free storage space.
-
Your device must have a stable internet connection to download the APK file and the additional data.
-
Your device must allow installation of apps from unknown sources. You can enable this option by going to Settings > Security > Unknown Sources and toggle it on.
-
-
Steps to download and install Bus Simulator Indonesia APK
-
After you have checked the requirements, you can follow these steps to download and install BUSSID APK on your device:
You will be redirected to a page where you can choose between Google Play Store or Direct Download. If you choose Google Play Store, you will need to have a Google account and follow the instructions on the screen. If you choose Direct Download, you will need to wait for a few seconds until the APK file is ready.
-
Once the APK file is ready, click on the Download button again and save the file to your device.
-
Locate the APK file on your device using a file manager app and tap on it to start the installation process.
-
Follow the instructions on the screen and wait for the installation to finish.
-
After the installation is done, you will need to download some additional data for the game. You can do this by opening the game and tapping on the Download button on the main menu. You will need about 300 MB of data to play the game.
-
Once the data is downloaded, you can start playing BUSSID on your device. Enjoy!
-
-
Conclusion
-
Bus Simulator Indonesia APK is a fun and realistic bus driving game that lets you experience what it is like to be a bus driver in Indonesia. You can design your own livery, drive through authentic Indonesian environment, honk your horn in cool and fun ways, compete with other players online, and more. You can also download and install BUSSID APK easily on your Android device by following our guide above. If you are a fan of bus simulator games, you should definitely give BUSSID a try!
-
FAQs
-
Here are some frequently asked questions about BUSSID:
-
-
Is BUSSID free to play? Yes, BUSSID is free to play. However, you can also purchase some in-game items or features using real money if you want to support the developer or enhance your gameplay.
-
Is BUSSID safe to download and install? Yes, BUSSID is safe to download and install. The APK file that we provide is from the official website of BUSSID and does not contain any viruses or malware. However, you should always be careful when downloading files from unknown sources and scan them with an antivirus app before installing them.
-
Can I play BUSSID offline? No, BUSSID requires an internet connection to play. You will need internet access to download the APK file, the additional data, and any updates for the game. You will also need internet access to join online multiplayer convoys or access some online features of the game.
-
Can I play BUSSID on PC? No, BUSSID is only available for Android devices at the moment. However, you can use an Android emulator software such as BlueStacks or Nox Player to run BUSSID on your PC. You will need to download and install the emulator software first, then follow the same steps as you would for your Android device. You can find more information on how to use Android emulators on PC online.
-
How can I contact the developer of BUSSID? If you have any questions, feedback, suggestions, or issues regarding BUSSID, you can contact the developer of BUSSID by sending an email to support@maleo.id or visiting their official website at https://maleo.id/. You can also follow their social media accounts on Facebook, Instagram, Twitter, and YouTube for the latest news and updates about BUSSID.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Dumb Ways to Die 2 Mod APK - The Ultimate Guide to the Funniest and Craziest Game Ever.md b/spaces/fatiXbelha/sd/Dumb Ways to Die 2 Mod APK - The Ultimate Guide to the Funniest and Craziest Game Ever.md
deleted file mode 100644
index 61c4d7480c2a864d89ad5f6298694c567d9fd320..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Dumb Ways to Die 2 Mod APK - The Ultimate Guide to the Funniest and Craziest Game Ever.md
+++ /dev/null
@@ -1,108 +0,0 @@
-
-
Dumb Ways to Die 2 Mod APK: A Fun and Challenging Game
-
If you are looking for a game that will make you laugh, test your reflexes, and teach you some safety tips, then you should try Dumb Ways to Die 2. This is a sequel to the popular game that became a viral sensation in 2012. In this article, we will tell you everything you need to know about Dumb Ways to Die 2, including what it is, what features it has, and how you can download and install the mod APK version for free.
-
What is Dumb Ways to Die 2?
-
Dumb Ways to Die 2 is a casual game that consists of a series of mini-games where you have to avoid dying in various dumb ways. The game is based on a railway safety video that was created by Metro Trains in Melbourne, Australia. The video featured a catchy song and a bunch of cute characters who met their demise in silly and avoidable ways. The video became an internet hit, with over 200 million views on YouTube. It also spawned a mobile game, Dumb Ways to Die, which was released in 2013.
Dumb Ways to Die 2 is the official sequel to the original game, which was also developed by Metro Trains. It was released in 2014 for iOS and Android devices. The game has been downloaded over 100 million times and has received positive reviews from critics and players alike. The game has also won several awards, such as the Webby Award for Best Mobile Game in 2015.
-
A collection of mini-games with hilarious outcomes
-
The gameplay of Dumb Ways to Die 2 is similar to the first game, but with more levels, challenges, and characters. The game features several themed locations, such as Freezerville, Area Fifty-Dumb, Drown Town, and more. Each location has its own set of mini-games that are related to the theme. For example, in Freezerville, you have to ski down a slope, avoid snowmen, jump over icebergs, and so on.
-
The mini-games are fast-paced and require quick reactions and timing. You have to tap, swipe, tilt, or shake your device to perform different actions. For example, you have to tap to jump over sharks, swipe to brush a killer whale's teeth, tilt to balance on a unicycle, or shake to get rid of bees. The mini-games are random and unpredictable, so you never know what will happen next.
-
The mini-games are also very funny and absurd. You will see your character die in various dumb ways, such as getting electrocuted by a toaster, getting eaten by a piranha, getting run over by a train, or getting blown up by a dynamite. The graphics are colorful and cartoonish, and the sound effects are hilarious. The game does not take itself too seriously and aims to make you laugh.
-
A train safety campaign with a twist
-
Despite its humorous tone, Dumb Ways to Die 2 also has a serious message behind it. The game is part of a train safety campaign that aims to raise awareness about the dangers of behaving recklessly around trains. The game shows you how not to act near trains or railway tracks, such as standing too close to the edge of the platform, crossing the tracks when the gates are down, or walking on the tracks with headphones on. The game also encourages you to pledge to be safe around trains and share the message with your friends and family.
-
What is Dumb Ways to Die 2 Mod APK?
-
Dumb Ways to Die 2 is a free game that you can download from the Google Play Store or the App Store. However, if you want to enjoy some extra features and benefits, you might want to try the mod APK version of the game. A mod APK is a modified version of an original application that has been altered by a third-party developer. The mod APK usually offers some advantages that are not available in the official version, such as unlimited resources, unlocked items, or removed ads.
-
dumb ways to die 2 hack apk
-dumb ways to die 2 unlimited tokens apk
-dumb ways to die 2 mod apk download
-dumb ways to die 2 mod apk latest version
-dumb ways to die 2 mod apk android 1
-dumb ways to die 2 mod apk revdl
-dumb ways to die 2 mod apk happymod
-dumb ways to die 2 mod apk all unlocked
-dumb ways to die 2 mod apk free shopping
-dumb ways to die 2 mod apk no ads
-dumb ways to die 2 the games mod apk
-dumb ways to die 2 cheats apk
-dumb ways to die 2 premium apk
-dumb ways to die 2 full apk
-dumb ways to die 2 pro apk
-dumb ways to die 2 cracked apk
-dumb ways to die 2 mega mod apk
-dumb ways to die 2 god mode apk
-dumb ways to die 2 unlimited lives apk
-dumb ways to die 2 everything unlocked apk
-dumb ways to die original and 2 mod apk
-dumb ways to die part 2 mod apk
-dumb ways to die new update mod apk
-dumb ways to die winter olympics mod apk
-dumb ways to die halloween edition mod apk
-dumb ways to die christmas special mod apk
-dumb ways to die world tour mod apk
-dumb ways to die rio olympics mod apk
-dumb ways to die australia day mod apk
-dumb ways to die easter egg hunt mod apk
-download game dumb ways to die 2 mod apk
-download dumb ways to die 2 hack mod apk
-download dumb ways to die 2 cheat mod apk
-download dumb ways to die 2 premium mod apk
-download dumb ways to die 2 full version mod apk
-download dumb ways to die 2 unlimited money mod apk
-download dumb ways to die 2 mega hack mod apk
-how to install dumb ways to die 2 mod apk
-how to play dumb ways to die 2 mod apk offline
-how to get free tokens in dumb ways to die 2 mod apk
-how to unlock all characters in dumb ways to die 2 mod apk
-how to update dumb ways to die 2 mod apk
-how to remove ads from dumb ways to die 2 mod apk
-how to hack tokens in dumb ways to die 2 mod apk
-how to get unlimited lives in dumb ways to die 2 mod apk
-how to get god mode in dumb ways to die 2 mod apk
-how to get all mini games in dumb ways to die 2 mod apk
-how to get all maps in dumb ways to die 2 mod apk
-
A modified version of the original game
-
Dumb Ways to Die 2 Mod APK is a modified version of Dumb Ways to Die 2 that has been created by an unknown developer. The mod APK is not affiliated with Metro Trains or the official game developers. The mod APK is also not available on the official app stores, but you can download it from various websites on the internet. However, you should be careful when downloading and installing the mod APK, as it may contain viruses, malware, or other harmful elements that can damage your device or compromise your privacy.
-
Features of the mod APK
-
The main reason why people download and install the mod APK is to enjoy some features that are not available in the original game. Here are some of the features that you can get from Dumb Ways to Die 2 Mod APK:
-
Unlimited tickets and coins
-
Tickets and coins are the main currencies in Dumb Ways to Die 2. You need tickets to play the mini-games, and you need coins to buy new characters, maps, or costumes. You can earn tickets and coins by playing the game, watching ads, or completing tasks. However, these methods are slow and tedious, and you may run out of tickets and coins quickly. With the mod APK, you can get unlimited tickets and coins for free. You can play as many mini-games as you want, and buy whatever you like from the shop.
-
All characters and maps unlocked
-
Dumb Ways to Die 2 has a lot of characters and maps that you can unlock by playing the game or spending coins. Each character has its own personality and appearance, and each map has its own theme and challenges. However, unlocking all of them can take a long time and a lot of coins. With the mod APK, you can access all the characters and maps from the start. You can choose your favorite character and map without any restrictions.
-
No ads and pop-ups
-
One of the most annoying things about Dumb Ways to Die 2 is the ads and pop-ups that interrupt your gameplay. The game has a lot of ads that show up before, during, or after each mini-game. The ads are usually for other games or apps that you may not be interested in. The ads also consume your data and battery life, and slow down your device. With the mod APK, you can get rid of all the ads and pop-ups for good. You can enjoy a smooth and uninterrupted gaming experience.
-
How to download and install the mod APK
-
If you want to try Dumb Ways to Die 2 Mod APK, you will need to follow some steps to download and install it on your device. Here are the steps that you need to follow:
-
Requirements and precautions
-
-
Make sure that your device has enough storage space to download and install the mod APK file.
-
Make sure that your device is compatible with the mod APK version. The mod APK may not work on some devices or Android versions.
-
Make sure that your device is connected to a stable internet connection while downloading the mod APK file.
-
Make sure that you have enabled the option to install apps from unknown sources on your device settings. This will allow you to install the mod APK file that is not from the official app stores.
-
Make sure that you have backed up your data before installing the mod APK file. The mod APK may overwrite or delete your existing data from the original game.
-
Make sure that you have scanned the mod APK file for viruses or malware before installing it. The mod APK may contain harmful elements that can damage your device or compromise your privacy.
-
-
Steps to follow
-
-
Go to a website that offers Dumb Ways to Die 2 Mod APK for free download. You can search for such websites on Google or any other search engine.
-
Select a reliable and trustworthy website from the search results or consent. You may also face legal consequences if you are caught using the mod APK.
-
Q: How can I update Dumb Ways to Die 2 Mod APK?
-
A: Dumb Ways to Die 2 Mod APK is not an official version of the game, and therefore, it does not receive regular updates from Metro Trains or the official game developers. The mod APK may also become outdated or incompatible with the latest version of the original game. Therefore, you may not be able to update the mod APK automatically or manually. You may have to wait for the mod APK developer to release a new version of the mod APK, or look for another website that offers an updated version of the mod APK.
-
Q: How can I uninstall Dumb Ways to Die 2 Mod APK?
-
A: If you want to uninstall Dumb Ways to Die 2 Mod APK from your device, you can follow these steps:
-
-
Go to your device settings and look for the apps or applications option.
-
Find and select Dumb Ways to Die 2 Mod APK from the list of apps on your device.
-
Tap on the uninstall or remove option, and confirm your action.
-
Wait for the app to be uninstalled from your device.
-
-
Q: What are some alternatives to Dumb Ways to Die 2 Mod APK?
-
A: If you are looking for some alternatives to Dumb Ways to Die 2 Mod APK, you can try these games:
-
-
Dumb Ways to Draw: This is a spin-off game that features the same characters and humor as Dumb Ways to Die 2, but with a different gameplay. In this game, you have to draw lines and shapes to guide your character through various obstacles and scenarios. The game is also based on a train safety campaign, and has a similar message as Dumb Ways to Die 2.
-
Dumb Ways to Dash: This is another spin-off game that features the same characters and humor as Dumb Ways to Die 2, but with a different gameplay. In this game, you have to run and dodge your way through various levels and environments. The game is also based on a train safety campaign, and has a similar message as Dumb Ways to Die 2.
-
Dumb Ways JR: This is a series of games that are designed for younger audiences, but still have the same charm and humor as Dumb Ways to Die 2. The games feature simpler and more educational mini-games that teach kids about safety, creativity, and problem-solving. The games are also based on a train safety campaign, and have a similar message as Dumb Ways to Die 2.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Extreme Car Driving Simulator 3D Free APK Download - Race Drift and Crash in this Amazing Game.md b/spaces/fatiXbelha/sd/Extreme Car Driving Simulator 3D Free APK Download - Race Drift and Crash in this Amazing Game.md
deleted file mode 100644
index 883d56b42a7aeb217426f28a253d3933f27f3353..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Extreme Car Driving Simulator 3D Free APK Download - Race Drift and Crash in this Amazing Game.md
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
Android 3D Car Racing Games APK Free Download
-
If you are a fan of car racing games, you might want to try some of the best Android 3D car racing games available on the Google Play Store. These games offer realistic graphics, thrilling gameplay, and a variety of cars and tracks to choose from. However, some of these games might not be compatible with your device, or you might want to access some premium features without paying. In that case, you can download the APK files of these games and install them manually on your Android device. In this article, we will show you how to do that, and we will also review the top 5 Android 3D car racing games APK free download.
-
Introduction
-
What are Android 3D car racing games?
-
Android 3D car racing games are games that simulate the experience of driving a car in a race against other opponents. These games use 3D graphics to create a realistic and immersive environment, where you can see the details of your car, the road, the scenery, and the other cars. You can also control your car using touch screen gestures, tilt sensors, or virtual buttons. Some of these games also have online multiplayer modes, where you can compete with other players around the world.
Why download APK files for Android 3D car racing games?
-
APK files are the installation files for Android applications. They contain all the necessary data and code to run the app on your device. You can download APK files from various sources on the internet, such as official websites, third-party app stores, or file-sharing platforms. There are several reasons why you might want to download APK files for Android 3D car racing games, such as:
-
-
You want to play a game that is not available in your region or country.
-
You want to play a game that is not compatible with your device or operating system.
-
You want to play a game that has been removed from the Google Play Store due to legal issues or policy violations.
-
You want to play a game that has been modified or hacked to unlock some premium features or remove ads.
-
You want to play a game that has been updated with new content or bug fixes before it is released on the Google Play Store.
-
-
However, downloading APK files also comes with some risks and challenges, such as:
-
-
You might download a fake or malicious APK file that contains viruses or malware that can harm your device or steal your personal information.
-
You might download an outdated or incompatible APK file that does not work properly or causes errors on your device.
-
You might download an illegal or pirated APK file that violates the intellectual property rights of the game developers or publishers.
-
You might need to enable unknown sources on your device settings to allow the installation of APK files from outside the Google Play Store, which can expose your device to security threats.
-
You might need to root your device or use a third-party app installer to install some APK files that require special permissions or access.
-
-
Therefore, you should always be careful and cautious when downloading APK files for Android 3D car racing games. You should always download APK files from trusted and reputable sources, and scan them with a reliable antivirus software before installing them on your device. You should also respect the rights and interests of the game developers and publishers, and support them by purchasing their games or in-app items if you enjoy their products.
-
Top 5 Android 3D Car Racing Games APK Free Download
-
Now that you know what are Android 3D car racing games and why download APK files for them, let's take a look at the top 5 Android 3D car racing games APK free download. These games are among the most popular and highly rated car racing games on the Google Play Store, and they offer amazing graphics, gameplay, and features. You can download the APK files of these games from the links provided below, and follow the instructions to install them on your device. However, please note that these links are for informational purposes only, and we do not endorse or guarantee the safety or legality of these APK files. Download and install them at your own risk.
-
Race Master 3D - Car Racing
-
Race Master 3D - Car Racing is a fast-paced and addictive car racing game that lets you drive various cars on different tracks and modes. You can customize your car with different colors, stickers, wheels, and upgrades, and compete with other players online or offline. You can also collect coins and gems to unlock new cars and tracks, and enjoy realistic physics, sound effects, and weather conditions.
-
Features
-
-
Over 30 cars to choose from, including sports cars, muscle cars, trucks, and more.
-
Over 10 tracks to race on, including city, desert, snow, forest, and more.
-
Multiple game modes, such as career, free ride, time trial, elimination, and multiplayer.
-
Stunning 3D graphics and animations, with dynamic lighting and shadows.
-
Smooth and responsive controls, with tilt, touch, or steering wheel options.
-
Realistic car physics and damage system, with skid marks, smoke, fire, and sparks.
-
Immersive sound effects and music, with engine noises, screeching tires, and horn sounds.
-
Challenging AI opponents and leaderboards, with achievements and rewards.
-
-
How to download and install Race Master 3D - Car Racing APK
-
-
Click on this link to download the Race Master 3D - Car Racing APK file: [Race Master 3D - Car Racing APK].
-
Once the download is complete, locate the APK file on your device storage or download folder.
-
Tap on the APK file to start the installation process. You might need to enable unknown sources on your device settings if prompted.
-
Follow the on-screen instructions to complete the installation process.
-
Launch the game from your app drawer or home screen and enjoy!
-
-
Asphalt 9: Legends - Epic Car Action Racing Game
-
Asphalt 9: Legends - Epic Car Action Racing Game is one of the most popular and acclaimed car racing games on Android. It features over 100 of the most prestigious cars from famous brands like Ferrari, Lamborghini, Porsche, and more. You can race across stunning locations around the world, from Paris to Tokyo, in various modes and events. You can also join a club or create your own to team up with other players and dominate the leaderboards.
-
Features
-
-
A console-quality car racing experience on your mobile device, with incredible graphics and effects.
-
A realistic driving feel with the innovative TouchDrive system that lets you focus on the fun of racing.
-
A customizable control scheme that lets you choose between tilt, tap-to-steer, or manual options.
-
A career mode that spans over 60 seasons and 800 events, with different challenges and rewards.
-
A multiplayer mode that lets you compete with up to 7 players in real time or asynchronous races.
-
A club feature that lets you join forces with other players to unlock exclusive rewards and events.
-
A garage feature that lets you collect and customize your dream cars with various colors, materials, parts, and decals.
-
A nitro feature that lets you boost your speed and perform amazing stunts and drifts.
-
An editor feature that lets you create your own custom races and share them with the community.
-
-
How to download and install Asphalt 9: Legends - Epic Car Action Racing Game APK
-
-
Click on this link to download the Asphalt 9: Legends - Epic Car Action Racing Game APK file: [Asphalt 9: Legends - Epic Car Action Racing Game APK].
-
Once the download is complete, locate the APK file on your device storage or download folder.
-
Tap on the APK file to start the installation process. You might need to enable unknown sources on your device settings if prompted.
-
Follow the on-screen instructions to complete the installation process.
-
Launch the game from your app drawer or home screen and enjoy!
-
-
Real Racing 3
-
Real Racing 3 is a realistic and immersive car racing game that features over 250 cars from top manufacturers like Ford, Ferrari, Aston Martin, and more. You can race on over 40 tracks from real-world locations, such as Silverstone, Le Mans, Dubai, and more. You can also compete with other players in various modes and events, such as Time Trials, Grand Prix, Formula 1, and more. You can also customize your car with different paints, vinyls, rims, and upgrades.
-
Features
-
-
A realistic car racing simulation with stunning graphics and physics.
-
A dynamic and adaptive AI that adjusts to your skill level and racing style.
-
A social and competitive multiplayer mode that lets you race with up to 8 players in real time or against their ghost recordings.
-
A Time Shifted Multiplayer (TSM) feature that lets you race with anyone at any time, even if they are offline.
-
A career mode that spans over 4,000 events, with different challenges and rewards.
-
A garage feature that lets you collect and upgrade your cars with various parts and tuning options.
-
A camera feature that lets you capture and share your best moments and achievements.
-
A live service feature that delivers new content and updates regularly, such as new cars, tracks, modes, and events.
-
-
How to download and install Real Racing 3 APK
-
-
Click on this link to download the Real Racing 3 APK file: [Real Racing 3 APK].
-
Once the download is complete, locate the APK file on your device storage or download folder.
-
Tap on the APK file to start the installation process. You might need to enable unknown sources on your device settings if prompted.
-
Follow the on-screen instructions to complete the installation process.
-
Launch the game from your app drawer or home screen and enjoy!
-
-
CSR Racing 2 - Free Car Racing Game
-
CSR Racing 2 - Free Car Racing Game is a drag racing game that lets you drive some of the most iconic cars from brands like Lamborghini, Bugatti, Koenigsegg, and more. You can race against other players in various modes and events, such as Street Races, Crew Battles, Ladder Races, and more. You can also customize your car with different paints, wraps, wheels, calipers, interiors, and more. You can also join a crew or create your own to team up with other players and compete for exclusive prizes.
-
City Racing 3D APK for Android Download[^1^]
-Asphalt 9: Legends - Epic Car Action Racing Game APK
-Real Racing 3 - Free 3D Car Simulator Game for Android
-Turbo Driving Racing 3D - Fast Car Racing Games APK
-GT Racing 2: The Real Car Exp - 3D Racing Simulator APK
-Need for Speed™ No Limits - 3D Car Racing Games Free APK
-CSR Racing 2 - Free Car Racing Game APK Download
-Traffic Racer - 3D Endless Racing Game for Android APK
-Extreme Car Driving Simulator - 3D Open World Game APK
-Hill Climb Racing 2 - Fun 3D Off-Road Racing Game APK
-Dr. Driving - 3D Car Driving Simulation Game APK
-Beach Buggy Racing - 3D Kart Racing Game for Android
-Drag Racing - Classic Nitro-Fueled Car Race Game APK
-CarX Drift Racing 2 - Realistic 3D Drifting Game APK
-Fast & Furious Takedown - 3D Car Stunt Racing Game APK
-Mad Skills Motocross 2 - 3D Bike Racing Game for Android
-Rally Fury - Extreme Racing - 3D Rally Car Game APK
-Nitro Nation Drag & Drift - 3D Car Tuning Game APK
-Traffic Rider - 3D First Person Motorcycle Racing Game APK
-Bike Race Free - Top Motorcycle Racing Games APK
-Top Speed: Drag & Fast Racing - 3D Street Race Game APK
-Moto Rider GO: Highway Traffic - 3D Bike Racing Games APK
-MMX Hill Dash 2 – Offroad Truck, Car & Bike Racing APK
-Gear.Club - True Racing - 3D Car Driving Game for Android
-Riptide GP: Renegade - 3D Hydrojet Racing Game APK
-Hot Wheels: Race Off - 3D Stunt Car Racing Game APK
-SUP Multiplayer Racing - Online 3D Car Race Game APK
-Trials Frontier - 3D Motorcycle Trials Game for Android
-Red Bull Air Race The Game - 3D Plane Racing Game APK
-Horizon Chase - World Tour - Retro Inspired Racing Game APK
-Smashy Road: Wanted - 3D Car Chase Game for Android
-Drive Ahead! - Gladiatorial Arena Racing Game APK
-Blocky Highway: Traffic Racing - Pixel Art Car Game APK
-Reckless Racing 3 - Top Down Arcade Style Racer APK
-Mini Motor Racing WRT - Tiny Cars, Big Fun! APK Download
-Thumb Drift — Fast & Furious / One Touch Car Racing APK
-MMX Hill Dash — Off-Road Racing - Monster Truck Madness! APK
-Death Moto 4 : Burning Asphalt - Motorcycle Combat Game APK
-Zombie Roadkill 3D - Shoot Zombies and Drive Cars! APK
-Road Drivers: Legacy - High Speed Street Racer Game APK
-Highway Rider Motorcycle Racer - Dodge Traffic & Police! APK
-Moto X3M Bike Race Game - Extreme Motorcycle Stunts! APK
-Bike Mayhem Free - Best Mountain Bike Downhill Game! APK
-Hill Climb Racing – Physics Based Driving Challenge! APK
-Earn to Die Lite – Zombie Smashing Car Upgrade Game! APK
-CSR Classics – Restore and Race Classic Cars in 3D! APK
-Real Bike Racing – High Octane Motorcycle Simulator! APK
-MadOut2 BigCityOnline – Open World GTA Style Racer! APK
-F1 Mobile Racing – Official Formula One™ Mobile Game! APK
-
Features
-
-
A next-gen drag racing experience with stunning graphics and animations.
-
A realistic car physics and performance system that lets you tune your car to suit your preferences.
-
A social and interactive multiplayer mode that lets you chat with other players, join or create crews, challenge or accept races, and view leaderboards.
-
A campaign mode that lets you rise from a rookie to a pro by defeating various bosses and crews.
-
A garage feature that lets you collect and customize over 200 cars with various options and accessories.
-
An AR mode that lets you view your cars in real life using your device's camera.
-
A live service feature that delivers new content and updates regularly, such as new cars, modes, events, and features.
-
-
How to download and install CSR Racing 2 - Free Car Racing Game APK
-
-
Click on this link to download the CSR Racing 2 - Free Car Racing Game APK file: [CSR Racing 2 - Free Car Racing Game APK].
-
Once the download is complete, locate the APK file on your device storage or download folder.
-
Tap on the APK file to start the installation process. You might need to enable unknown sources on your device settings if prompted.
-
Follow the on-screen instructions to complete the installation process.
-
Launch the game from your app drawer or home screen and enjoy!
-
-
Need for Speed™ No Limits - Car Racing Game
-
Need for Speed™ No Limits - Car Racing Game is a street racing game that lets you drive some of the most powerful and exotic cars from brands like McLaren, Pagani, Lamborghini, and more. You can race against other players in various modes and events, such as Underground Rivals, Blackridge Rivals, Special Events, and more. You can also customize your car with different parts, paints, wraps, decals, and more. You can also join a crew or create your own to team up with other players and compete for exclusive rewards.
-
Features
-
-
A thrilling street racing experience with stunning graphics and effects.
-
A fast and furious gameplay with nitro boosts, drifts, jumps, and crashes.
-
A competitive multiplayer mode that lets you race with up to 8 players in real time or against their ghost recordings.
-
A story mode that lets you challenge various characters and crews in over 1,000 races.
-
A garage feature that lets you collect and customize over 100 cars with various options and accessories.
-
A live service feature that delivers new content and updates regularly, such as new cars, modes, events, and features.
-
-
How to download and install Need for Speed™ No Limits - Car Racing Game APK
-
-
Click on this link to download the Need for Speed™ No Limits - Car Racing Game APK file: [Need for Speed™ No Limits - Car Racing Game APK].
-
Once the download is complete, locate the APK file on your device storage or download folder.
-
Tap on the APK file to start the installation process. You might need to enable unknown sources on your device settings if prompted.
-
Follow the on-screen instructions to complete the installation process.
-
Launch the game from your app drawer or home screen and enjoy!
-
-
Conclusion
-
In conclusion, Android 3D car racing games are fun and exciting games that let you drive various cars on different tracks and modes. You can download the APK files of these games from the links provided above, and install them manually on your device. However, you should always be careful and cautious when downloading APK files, as they might contain viruses, malware, or illegal content. You should also respect the rights and interests of the game developers and publishers, and support them by purchasing their games or in-app items if you enjoy their products. We hope you found this article helpful and informative, and we wish you a great time playing these games!
-
FAQs
-
Here are some of the frequently asked questions about Android 3D car racing games APK free download:
-
-
What are the benefits of downloading APK files for Android 3D car racing games?
-
The benefits of downloading APK files for Android 3D car racing games are that you can play games that are not available in your region or country, not compatible with your device or operating system, removed from the Google Play Store, modified or hacked to unlock some premium features or remove ads, or updated with new content or bug fixes before they are released on the Google Play Store.
-
What are the risks of downloading APK files for Android 3D car racing games?
-
The risks of downloading APK files for Android 3D car racing games are that you might download fake or malicious APK files that contain viruses or malware that can harm your device or steal your personal information, outdated or incompatible APK files that do not work properly or cause errors on your device, illegal or pirated APK files that violate the intellectual property rights of the game developers or publishers, or need to enable unknown sources on your device settings or root your device to install some APK files that require special permissions or access.
-
How can I download APK files for Android 3D car racing games safely?
-
You can download APK files for Android 3D car racing games safely by following these steps:
-
-
Always download APK files from trusted and reputable sources, such as official websites, third-party app stores, or file-sharing platforms that have positive reviews and ratings.
-
Always scan the APK files with a reliable antivirus software before installing them on your device, and delete any suspicious or infected files immediately.
-
Always check the permissions and access that the APK files require, and only grant them if they are necessary and reasonable for the game to function.
-
Always backup your device data and settings before installing any APK files, and restore them if anything goes wrong or you want to uninstall the game.
-
Always update your device software and security patches regularly, and avoid installing any APK files that are not compatible with your device or operating system.
-
-
What are some of the best Android 3D car racing games?
-
Some of the best Android 3D car racing games are Race Master 3D - Car Racing, Asphalt 9: Legends - Epic Car Action Racing Game, Real Racing 3, CSR Racing 2 - Free Car Racing Game, and Need for Speed™ No Limits - Car Racing Game. These games offer realistic graphics, thrilling gameplay, and a variety of cars and tracks to choose from. You can download the APK files of these games from the links provided in this article, and install them manually on your device.
-
How can I improve my skills and performance in Android 3D car racing games?
-
You can improve your skills and performance in Android 3D car racing games by following these tips:
-
-
Practice regularly and learn the controls, physics, and features of each game.
-
Choose the right car for each track and mode, and customize it with the best parts and upgrades.
-
Use the nitro boost wisely and strategically, and avoid wasting it or overheating it.
-
Master the art of drifting, jumping, and crashing, and use them to gain an advantage over your opponents.
-
Study the layout and characteristics of each track, and find the best routes, shortcuts, and tricks.
-
Challenge yourself with different difficulty levels and modes, and learn from your mistakes and feedback.
-
Compete with other players online or offline, and learn from their strategies and techniques.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fclong/summary/fengshen/models/roformer/__init__.py b/spaces/fclong/summary/fengshen/models/roformer/__init__.py
deleted file mode 100644
index c55c090f25446ec2cf60d632dacdb53a8928e25e..0000000000000000000000000000000000000000
--- a/spaces/fclong/summary/fengshen/models/roformer/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# coding=utf-8
-# Copyright 2021 The IDEA Authors. All rights reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING
-
-from transformers.file_utils import _LazyModule, is_torch_available
-
-
-_import_structure = {
- "configuration_roformer": ["RoFormerConfig"],
- "tokenization_roformer": ["RoFormerTokenizer"],
-}
-
-if is_torch_available():
- _import_structure["modeling_roformer"] = [
- "RoFormerModel",
- "RoFormerForMaskedLM",
- "RoFormerForMultipleChoice",
- "RoFormerPreTrainedModel",
- "RoFormerForQuestionAnswering",
- "RoFormerForSequenceClassification",
- "RoFormerForTokenClassification",
- ]
-
-
-if TYPE_CHECKING:
- from .configuration_roformer import RoFormerConfig
- from .tokenization_roformer import RoFormerTokenizer
-
- if is_torch_available():
- from .modeling_roformer import (
- RoFormerModel,
- RoFormerForMaskedLM,
- RoFormerForMultipleChoice,
- RoFormerPreTrainedModel,
- RoFormerForQuestionAnswering,
- RoFormerForSequenceClassification,
- RoFormerForTokenClassification,
- )
-
-else:
- import sys
-
- sys.modules[__name__] = _LazyModule(
- __name__, globals()["__file__"], _import_structure)
diff --git a/spaces/felixz/open_llm_leaderboard/src/assets/text_content.py b/spaces/felixz/open_llm_leaderboard/src/assets/text_content.py
deleted file mode 100644
index 71d642419341ea8f2245df56c4d087e189617b1f..0000000000000000000000000000000000000000
--- a/spaces/felixz/open_llm_leaderboard/src/assets/text_content.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from src.display_models.model_metadata_type import ModelType
-
-TITLE = """
🤗 Open LLM Leaderboard
"""
-
-INTRODUCTION_TEXT = """
-📐 The 🤗 Open LLM Leaderboard aims to track, rank and evaluate open LLMs and chatbots.
-
-🤗 Submit a model for automated evaluation on the 🤗 GPU cluster on the "Submit" page!
-The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - read more details in the "About" page!
-"""
-
-LLM_BENCHMARKS_TEXT = f"""
-# Context
-With the plethora of large language models (LLMs) and chatbots being released week upon week, often with grandiose claims of their performance, it can be hard to filter out the genuine progress that is being made by the open-source community and which model is the current state of the art.
-
-## Icons
-{ModelType.PT.to_str(" : ")} model
-{ModelType.FT.to_str(" : ")} model
-{ModelType.IFT.to_str(" : ")} model
-{ModelType.RL.to_str(" : ")} model
-If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
-
-🏴☠️ indicates that this model has been flagged by the community, and should probably be ignored! Clicking the icon will redirect you to the discussion about the model.
-(For ex, the model was trained on the evaluation data, and is therefore cheating on the leaderboard.)
-
-## How it works
-
-📈 We evaluate models on 4 key benchmarks using the Eleuther AI Language Model Evaluation Harness , a unified framework to test generative language models on a large number of different evaluation tasks.
-
-- AI2 Reasoning Challenge (25-shot) - a set of grade-school science questions.
-- HellaSwag (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models.
-- MMLU (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
-- TruthfulQA (0-shot) - a test to measure a model’s propensity to reproduce falsehoods commonly found online. Note: TruthfulQA in the Harness is actually a minima a 6-shots task, as it is prepended by 6 examples systematically, even when launched using 0 for the number of few-shot examples.
-
-For all these evaluations, a higher score is a better score.
-We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
-
-## Details and logs
-You can find:
-- detailed numerical results in the `results` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/results
-- details on the input/outputs for the models in the `details` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/details
-- community queries and running status in the `requests` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/requests
-
-## Reproducibility
-To reproduce our results, here is the commands you can run, using [this version](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463) of the Eleuther AI Harness:
-`python main.py --model=hf-causal --model_args="pretrained=,use_accelerate=True,revision="`
-` --tasks= --num_fewshot= --batch_size=2 --output_path=`
-
-The total batch size we get for models which fit on one A100 node is 16 (8 GPUs * 2). If you don't use parallelism, adapt your batch size to fit.
-*You can expect results to vary slightly for different batch sizes because of padding.*
-
-The tasks and few shots parameters are:
-- ARC: 25-shot, *arc-challenge* (`acc_norm`)
-- HellaSwag: 10-shot, *hellaswag* (`acc_norm`)
-- TruthfulQA: 0-shot, *truthfulqa-mc* (`mc2`)
-- MMLU: 5-shot, *hendrycksTest-abstract_algebra,hendrycksTest-anatomy,hendrycksTest-astronomy,hendrycksTest-business_ethics,hendrycksTest-clinical_knowledge,hendrycksTest-college_biology,hendrycksTest-college_chemistry,hendrycksTest-college_computer_science,hendrycksTest-college_mathematics,hendrycksTest-college_medicine,hendrycksTest-college_physics,hendrycksTest-computer_security,hendrycksTest-conceptual_physics,hendrycksTest-econometrics,hendrycksTest-electrical_engineering,hendrycksTest-elementary_mathematics,hendrycksTest-formal_logic,hendrycksTest-global_facts,hendrycksTest-high_school_biology,hendrycksTest-high_school_chemistry,hendrycksTest-high_school_computer_science,hendrycksTest-high_school_european_history,hendrycksTest-high_school_geography,hendrycksTest-high_school_government_and_politics,hendrycksTest-high_school_macroeconomics,hendrycksTest-high_school_mathematics,hendrycksTest-high_school_microeconomics,hendrycksTest-high_school_physics,hendrycksTest-high_school_psychology,hendrycksTest-high_school_statistics,hendrycksTest-high_school_us_history,hendrycksTest-high_school_world_history,hendrycksTest-human_aging,hendrycksTest-human_sexuality,hendrycksTest-international_law,hendrycksTest-jurisprudence,hendrycksTest-logical_fallacies,hendrycksTest-machine_learning,hendrycksTest-management,hendrycksTest-marketing,hendrycksTest-medical_genetics,hendrycksTest-miscellaneous,hendrycksTest-moral_disputes,hendrycksTest-moral_scenarios,hendrycksTest-nutrition,hendrycksTest-philosophy,hendrycksTest-prehistory,hendrycksTest-professional_accounting,hendrycksTest-professional_law,hendrycksTest-professional_medicine,hendrycksTest-professional_psychology,hendrycksTest-public_relations,hendrycksTest-security_studies,hendrycksTest-sociology,hendrycksTest-us_foreign_policy,hendrycksTest-virology,hendrycksTest-world_religions* (average of all the results `acc`)
-
-## Quantization
-To get more information about quantization, see:
-- 8 bits: [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), [paper](https://arxiv.org/abs/2208.07339)
-- 4 bits: [blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes), [paper](https://arxiv.org/abs/2305.14314)
-
-## More resources
-If you still have questions, you can check our FAQ [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/179)!
-We also gather cool resources from the community, other teams, and other labs [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/174)!
-"""
-
-EVALUATION_QUEUE_TEXT = """
-# Evaluation Queue for the 🤗 Open LLM Leaderboard
-
-Models added here will be automatically evaluated on the 🤗 cluster.
-
-## Some good practices before submitting a model
-
-### 1) Make sure you can load your model and tokenizer using AutoClasses:
-```python
-from transformers import AutoConfig, AutoModel, AutoTokenizer
-config = AutoConfig.from_pretrained("your model name", revision=revision)
-model = AutoModel.from_pretrained("your model name", revision=revision)
-tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
-```
-If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
-
-Note: make sure your model is public!
-Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
-
-### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
-It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
-
-### 3) Make sure your model has an open license!
-This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
-
-### 4) Fill up your model card
-When we add extra information about models to the leaderboard, it will be automatically taken from the model card
-
-## In case of model failure
-If your model is displayed in the `FAILED` category, its execution stopped.
-Make sure you have followed the above steps first.
-If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
-"""
-
-CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
-CITATION_BUTTON_TEXT = r"""
-@misc{open-llm-leaderboard,
- author = {Edward Beeching, Clémentine Fourrier, Nathan Habib, Sheon Han, Nathan Lambert, Nazneen Rajani, Omar Sanseviero, Lewis Tunstall, Thomas Wolf},
- title = {Open LLM Leaderboard},
- year = {2023},
- publisher = {Hugging Face},
- howpublished = "\url{https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard}"
-}
-@software{eval-harness,
- author = {Gao, Leo and
- Tow, Jonathan and
- Biderman, Stella and
- Black, Sid and
- DiPofi, Anthony and
- Foster, Charles and
- Golding, Laurence and
- Hsu, Jeffrey and
- McDonell, Kyle and
- Muennighoff, Niklas and
- Phang, Jason and
- Reynolds, Laria and
- Tang, Eric and
- Thite, Anish and
- Wang, Ben and
- Wang, Kevin and
- Zou, Andy},
- title = {A framework for few-shot language model evaluation},
- month = sep,
- year = 2021,
- publisher = {Zenodo},
- version = {v0.0.1},
- doi = {10.5281/zenodo.5371628},
- url = {https://doi.org/10.5281/zenodo.5371628}
-}
-@misc{clark2018think,
- title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
- author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
- year={2018},
- eprint={1803.05457},
- archivePrefix={arXiv},
- primaryClass={cs.AI}
-}
-@misc{zellers2019hellaswag,
- title={HellaSwag: Can a Machine Really Finish Your Sentence?},
- author={Rowan Zellers and Ari Holtzman and Yonatan Bisk and Ali Farhadi and Yejin Choi},
- year={2019},
- eprint={1905.07830},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}
-@misc{hendrycks2021measuring,
- title={Measuring Massive Multitask Language Understanding},
- author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
- year={2021},
- eprint={2009.03300},
- archivePrefix={arXiv},
- primaryClass={cs.CY}
-}
-@misc{lin2022truthfulqa,
- title={TruthfulQA: Measuring How Models Mimic Human Falsehoods},
- author={Stephanie Lin and Jacob Hilton and Owain Evans},
- year={2022},
- eprint={2109.07958},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}"""
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/cookie/README.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/cookie/README.md
deleted file mode 100644
index 5449c3a2587996d44b242281692c01ad2d2a3cf3..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/cookie/README.md
+++ /dev/null
@@ -1,302 +0,0 @@
-# cookie
-
-[![NPM Version][npm-version-image]][npm-url]
-[![NPM Downloads][npm-downloads-image]][npm-url]
-[![Node.js Version][node-version-image]][node-version-url]
-[![Build Status][github-actions-ci-image]][github-actions-ci-url]
-[![Test Coverage][coveralls-image]][coveralls-url]
-
-Basic HTTP cookie parser and serializer for HTTP servers.
-
-## Installation
-
-This is a [Node.js](https://nodejs.org/en/) module available through the
-[npm registry](https://www.npmjs.com/). Installation is done using the
-[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally):
-
-```sh
-$ npm install cookie
-```
-
-## API
-
-```js
-var cookie = require('cookie');
-```
-
-### cookie.parse(str, options)
-
-Parse an HTTP `Cookie` header string and returning an object of all cookie name-value pairs.
-The `str` argument is the string representing a `Cookie` header value and `options` is an
-optional object containing additional parsing options.
-
-```js
-var cookies = cookie.parse('foo=bar; equation=E%3Dmc%5E2');
-// { foo: 'bar', equation: 'E=mc^2' }
-```
-
-#### Options
-
-`cookie.parse` accepts these properties in the options object.
-
-##### decode
-
-Specifies a function that will be used to decode a cookie's value. Since the value of a cookie
-has a limited character set (and must be a simple string), this function can be used to decode
-a previously-encoded cookie value into a JavaScript string or other object.
-
-The default function is the global `decodeURIComponent`, which will decode any URL-encoded
-sequences into their byte representations.
-
-**note** if an error is thrown from this function, the original, non-decoded cookie value will
-be returned as the cookie's value.
-
-### cookie.serialize(name, value, options)
-
-Serialize a cookie name-value pair into a `Set-Cookie` header string. The `name` argument is the
-name for the cookie, the `value` argument is the value to set the cookie to, and the `options`
-argument is an optional object containing additional serialization options.
-
-```js
-var setCookie = cookie.serialize('foo', 'bar');
-// foo=bar
-```
-
-#### Options
-
-`cookie.serialize` accepts these properties in the options object.
-
-##### domain
-
-Specifies the value for the [`Domain` `Set-Cookie` attribute][rfc-6265-5.2.3]. By default, no
-domain is set, and most clients will consider the cookie to apply to only the current domain.
-
-##### encode
-
-Specifies a function that will be used to encode a cookie's value. Since value of a cookie
-has a limited character set (and must be a simple string), this function can be used to encode
-a value into a string suited for a cookie's value.
-
-The default function is the global `encodeURIComponent`, which will encode a JavaScript string
-into UTF-8 byte sequences and then URL-encode any that fall outside of the cookie range.
-
-##### expires
-
-Specifies the `Date` object to be the value for the [`Expires` `Set-Cookie` attribute][rfc-6265-5.2.1].
-By default, no expiration is set, and most clients will consider this a "non-persistent cookie" and
-will delete it on a condition like exiting a web browser application.
-
-**note** the [cookie storage model specification][rfc-6265-5.3] states that if both `expires` and
-`maxAge` are set, then `maxAge` takes precedence, but it is possible not all clients by obey this,
-so if both are set, they should point to the same date and time.
-
-##### httpOnly
-
-Specifies the `boolean` value for the [`HttpOnly` `Set-Cookie` attribute][rfc-6265-5.2.6]. When truthy,
-the `HttpOnly` attribute is set, otherwise it is not. By default, the `HttpOnly` attribute is not set.
-
-**note** be careful when setting this to `true`, as compliant clients will not allow client-side
-JavaScript to see the cookie in `document.cookie`.
-
-##### maxAge
-
-Specifies the `number` (in seconds) to be the value for the [`Max-Age` `Set-Cookie` attribute][rfc-6265-5.2.2].
-The given number will be converted to an integer by rounding down. By default, no maximum age is set.
-
-**note** the [cookie storage model specification][rfc-6265-5.3] states that if both `expires` and
-`maxAge` are set, then `maxAge` takes precedence, but it is possible not all clients by obey this,
-so if both are set, they should point to the same date and time.
-
-##### path
-
-Specifies the value for the [`Path` `Set-Cookie` attribute][rfc-6265-5.2.4]. By default, the path
-is considered the ["default path"][rfc-6265-5.1.4].
-
-##### priority
-
-Specifies the `string` to be the value for the [`Priority` `Set-Cookie` attribute][rfc-west-cookie-priority-00-4.1].
-
- - `'low'` will set the `Priority` attribute to `Low`.
- - `'medium'` will set the `Priority` attribute to `Medium`, the default priority when not set.
- - `'high'` will set the `Priority` attribute to `High`.
-
-More information about the different priority levels can be found in
-[the specification][rfc-west-cookie-priority-00-4.1].
-
-**note** This is an attribute that has not yet been fully standardized, and may change in the future.
-This also means many clients may ignore this attribute until they understand it.
-
-##### sameSite
-
-Specifies the `boolean` or `string` to be the value for the [`SameSite` `Set-Cookie` attribute][rfc-6265bis-09-5.4.7].
-
- - `true` will set the `SameSite` attribute to `Strict` for strict same site enforcement.
- - `false` will not set the `SameSite` attribute.
- - `'lax'` will set the `SameSite` attribute to `Lax` for lax same site enforcement.
- - `'none'` will set the `SameSite` attribute to `None` for an explicit cross-site cookie.
- - `'strict'` will set the `SameSite` attribute to `Strict` for strict same site enforcement.
-
-More information about the different enforcement levels can be found in
-[the specification][rfc-6265bis-09-5.4.7].
-
-**note** This is an attribute that has not yet been fully standardized, and may change in the future.
-This also means many clients may ignore this attribute until they understand it.
-
-##### secure
-
-Specifies the `boolean` value for the [`Secure` `Set-Cookie` attribute][rfc-6265-5.2.5]. When truthy,
-the `Secure` attribute is set, otherwise it is not. By default, the `Secure` attribute is not set.
-
-**note** be careful when setting this to `true`, as compliant clients will not send the cookie back to
-the server in the future if the browser does not have an HTTPS connection.
-
-## Example
-
-The following example uses this module in conjunction with the Node.js core HTTP server
-to prompt a user for their name and display it back on future visits.
-
-```js
-var cookie = require('cookie');
-var escapeHtml = require('escape-html');
-var http = require('http');
-var url = require('url');
-
-function onRequest(req, res) {
- // Parse the query string
- var query = url.parse(req.url, true, true).query;
-
- if (query && query.name) {
- // Set a new cookie with the name
- res.setHeader('Set-Cookie', cookie.serialize('name', String(query.name), {
- httpOnly: true,
- maxAge: 60 * 60 * 24 * 7 // 1 week
- }));
-
- // Redirect back after setting cookie
- res.statusCode = 302;
- res.setHeader('Location', req.headers.referer || '/');
- res.end();
- return;
- }
-
- // Parse the cookies on the request
- var cookies = cookie.parse(req.headers.cookie || '');
-
- // Get the visitor name set in the cookie
- var name = cookies.name;
-
- res.setHeader('Content-Type', 'text/html; charset=UTF-8');
-
- if (name) {
- res.write('
",
- unsafe_allow_html=True
-)
-expander = st.expander("Click here to close this intro", expanded=True)
-expander.write(
- """
- This web app allows you to perform common Natural Language Processing tasks, select a task below to get started.
- These tasks are intended to help you validate your intuition and build a proof of concept for your idea.
-
- If a task you deem useful is not listed here, feel free to get in touch with Founder's Studio team at francesco.capuano@bain.com.
- Happy hackathon!
- """
-)
-st.header("About this app")
-st.write("""
- Some wording on the app and the tasks it can perform :)
-""")
-st.subheader(":point_left: Select a task from the left to get started!")
-
-with st.sidebar:
- st.write("Welcome! :wave:")
- st.write("Select a task to supercharge your productivity from the ones below :point_down:")
-
- OPTION1="Chat with a file 💬📖 - *Upload a file and ask questions about it*"
- OPTION2="Text summarization 🔎 - *Upload a file and get it summarized*"
- OPTION_N="Make a suggestion 🤔 - *Let the team know what task would you like to have at disposal*"
-
- option = st.radio(
- "Please select a task 🤖",
- options=[OPTION1, OPTION2, OPTION_N],
- key="task_selection"
- )
-
- confirm = st.button("Confirm", key="task_selection_confirm")
- if confirm:
- st.session_state["task_confirmed"] = True
-
-if st.session_state.get("task_confirmed"):
- # only execute the actual app code when the user confirms the task selection
- if st.session_state.get("task_selection") == OPTION1:
- from qa import qa_main
- with st.container():
- qa_main()
-
- elif st.session_state.get("task_selection") == OPTION2:
- from summarization import summarization_main
- with st.container():
- summarization_main()
-
- elif st.session_state.get("task_selection") == OPTION_N:
- from mailing import mailing_main
- with st.container():
- user_suggestion = st.text_input(
- "What other task would you like to perform?",
- placeholder="Transform meeting transcripts into rainbow-colored unicorns"
- )
- if user_suggestion:
- st.write("""
- Thanks for contributing with your suggestion! We are carefully reviewing every suggestion.
- If you wish to further discuss your task suggestion, consider reaching out to francesco.capuano@bain.com.
- We will get back to you as soon as possible!
- """)
-
- mailing_main(
- subject="**NEW TASK SUGGESTION** - Automatic email.",
- body=f"User suggestion\n: {user_suggestion}",
- to_address="francesco.capuano@bain.com"
- )
- st.stop()
-
diff --git a/spaces/fuckyoudeki/AutoGPT/autogpt/commands/git_operations.py b/spaces/fuckyoudeki/AutoGPT/autogpt/commands/git_operations.py
deleted file mode 100644
index 028f3b8da44c85e01d20ccc5d4a5fa72c759008b..0000000000000000000000000000000000000000
--- a/spaces/fuckyoudeki/AutoGPT/autogpt/commands/git_operations.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Git operations for autogpt"""
-import git
-
-from autogpt.config import Config
-from autogpt.workspace import path_in_workspace
-
-CFG = Config()
-
-
-def clone_repository(repo_url: str, clone_path: str) -> str:
- """Clone a GitHub repository locally
-
- Args:
- repo_url (str): The URL of the repository to clone
- clone_path (str): The path to clone the repository to
-
- Returns:
- str: The result of the clone operation"""
- split_url = repo_url.split("//")
- auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
- safe_clone_path = path_in_workspace(clone_path)
- try:
- git.Repo.clone_from(auth_repo_url, safe_clone_path)
- return f"""Cloned {repo_url} to {safe_clone_path}"""
- except Exception as e:
- return f"Error: {str(e)}"
diff --git a/spaces/fun-research/FC-CLIP/fcclip/modeling/meta_arch/fcclip_head.py b/spaces/fun-research/FC-CLIP/fcclip/modeling/meta_arch/fcclip_head.py
deleted file mode 100644
index 86a1709457d40606a7ca1cf9cc13e27837cd116d..0000000000000000000000000000000000000000
--- a/spaces/fun-research/FC-CLIP/fcclip/modeling/meta_arch/fcclip_head.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-from copy import deepcopy
-from typing import Callable, Dict, List, Optional, Tuple, Union
-
-import fvcore.nn.weight_init as weight_init
-from torch import nn
-from torch.nn import functional as F
-
-from detectron2.config import configurable
-from detectron2.layers import Conv2d, ShapeSpec, get_norm
-from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
-
-from ..transformer_decoder.fcclip_transformer_decoder import build_transformer_decoder
-from ..pixel_decoder.msdeformattn import build_pixel_decoder
-
-
-@SEM_SEG_HEADS_REGISTRY.register()
-class FCCLIPHead(nn.Module):
-
- @configurable
- def __init__(
- self,
- input_shape: Dict[str, ShapeSpec],
- *,
- num_classes: int,
- pixel_decoder: nn.Module,
- loss_weight: float = 1.0,
- ignore_value: int = -1,
- # extra parameters
- transformer_predictor: nn.Module,
- transformer_in_feature: str,
- ):
- """
- NOTE: this interface is experimental.
- Args:
- input_shape: shapes (channels and stride) of the input features
- num_classes: number of classes to predict
- pixel_decoder: the pixel decoder module
- loss_weight: loss weight
- ignore_value: category id to be ignored during training.
- transformer_predictor: the transformer decoder that makes prediction
- transformer_in_feature: input feature name to the transformer_predictor
- """
- super().__init__()
- input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
- self.in_features = [k for k, v in input_shape]
- feature_strides = [v.stride for k, v in input_shape]
- feature_channels = [v.channels for k, v in input_shape]
-
- self.ignore_value = ignore_value
- self.common_stride = 4
- self.loss_weight = loss_weight
-
- self.pixel_decoder = pixel_decoder
- self.predictor = transformer_predictor
- self.transformer_in_feature = transformer_in_feature
-
- self.num_classes = num_classes
-
- @classmethod
- def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
- # figure out in_channels to transformer predictor
- if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder":
- transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
- else:
- raise NotImplementedError
-
- return {
- "input_shape": {
- k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
- },
- "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
- "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
- "pixel_decoder": build_pixel_decoder(cfg, input_shape),
- "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
- "transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
- "transformer_predictor": build_transformer_decoder(
- cfg,
- transformer_predictor_in_channels,
- mask_classification=True,
- ),
- }
-
- def forward(self, features, mask=None):
- return self.layers(features, mask)
-
- def layers(self, features, mask=None):
- mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features)
- if self.transformer_in_feature == "multi_scale_pixel_decoder":
- predictions = self.predictor(multi_scale_features, mask_features, mask,
- text_classifier=features["text_classifier"], num_templates=features["num_templates"])
- else:
- raise NotImplementedError
- return predictions
diff --git a/spaces/geloku/ai-academy/README.md b/spaces/geloku/ai-academy/README.md
deleted file mode 100644
index ec319ba4cd38eba65bb958b5ad384571af65b128..0000000000000000000000000000000000000000
--- a/spaces/geloku/ai-academy/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Ai Academy
-emoji: 🐢
-colorFrom: green
-colorTo: purple
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/apis/__init__.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/apis/__init__.py
deleted file mode 100644
index 170724be38de42daf2bc1a1910e181d68818f165..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/apis/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from .inference import inference_segmentor, init_segmentor, show_result_pyplot
-from .test import multi_gpu_test, single_gpu_test
-from .train import get_root_logger, set_random_seed, train_segmentor
-
-__all__ = [
- 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor',
- 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test',
- 'show_result_pyplot'
-]
diff --git a/spaces/giorgiolatour/aqiprediction/app.py b/spaces/giorgiolatour/aqiprediction/app.py
deleted file mode 100644
index bc7c384c82d286ad9a0893dd9d7bf2c438a2bfdf..0000000000000000000000000000000000000000
--- a/spaces/giorgiolatour/aqiprediction/app.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import xgboost as xgb
-import pandas as pd
-import os
-import sys
-from my_functions import createFeatures
-import gradio as gr
-pd.options.plotting.backend = "plotly"
-import plotly.express as px
-
-"""
-Load data, XGBoost model, then predict and present using Gradio.
-"""
-def get_forecast():
- zip_code = '60603' # Chicago
- country_code = 'US'
- city = 'Chicago'
-
- aqi_table_name = f'aqi_{city}_{zip_code}'.lower()
-
- data_path = os.path.join('data', f'{aqi_table_name}.csv')
-
- if os.path.exists(data_path):
- df = pd.read_csv(data_path, index_col='datetime', parse_dates=True)
- else:
- sys.exit('Cannot find AQI data. Quitting now...')
-
- future_start = df.index.max() + pd.Timedelta('1 hours')
- future_end = future_start + pd.Timedelta('3 days') - pd.Timedelta('1 hours')
-
- future = pd.date_range(future_start, future_end, freq='1h')
- future_df = pd.DataFrame(index=future)
- future_df['isFuture'] = True
- df['isFuture'] = False
-
- df_and_future = pd.concat([df, future_df])
-
- df_and_future = createFeatures(df_and_future)
- df_and_future['aqi'] = df_and_future['aqi'] - 1
-
- df = df_and_future.query('isFuture==False').copy()
- future_w_features = df_and_future.query('isFuture').copy()
- future_w_features = future_w_features.drop(columns=['isFuture'])
-
- model_dir = 'aqi_model'
- if os.path.isdir(model_dir) == False:
- os.mkdir(model_dir)
-
- model_name = 'xgboost_aqi_model.json'
- model_path = os.path.join(model_dir, model_name)
-
- xgb_clf = xgb.XGBClassifier()
- xgb_clf.load_model(model_path)
-
- features = ['hour', 'dayofweek', 'quarter', 'month', 'year', 'dayofyear',
- 'dayofmonth', 'weekofyear', 'co_lag3d', 'no_lag3d', 'no2_lag3d',
- 'o3_lag3d', 'so2_lag3d', 'pm2_5_lag3d', 'pm10_lag3d', 'nh3_lag3d',
- 'aqi_lag3d', 'aqi_max_lag_3d', 'aqi_mean_lag_3d', 'aqi_std_lag_3d']
- target = 'aqi'
-
- forecast = xgb_clf.predict(future_w_features[features])
- future_w_features['aqi'] = forecast
- future_w_features['aqi'] = future_w_features['aqi'] + 1
- df['aqi'] = df['aqi'] + 1
-
- future_w_features['isFuture'] = True
- df['isFuture'] = False
-
- history_start = future_start - pd.Timedelta('7 days')
- historical_plot = df.query('index > @history_start').copy()
-
- data = pd.concat([historical_plot, future_w_features])
-
- fig = px.line(data, x=data.index, y='aqi', color='isFuture',
- labels={'index': 'Date', 'aqi': 'Air Quality Index'})
- return fig
-
-
-with gr.Blocks() as demo:
- gr.Markdown(
- """
- **Air Quality Index (AQI) Prediction 📈 with XGBoost Forecasting**: See recent air quality in Chicago and a 3-day forecast!
- [GitHub Repo](https://github.com/achi113s/AQIPrediction)
- """)
-
- gr_plt = gr.Plot()
-
- gr.Markdown(
- """
- **Description**: The air quality index is based on the concentration of a number of pollutants such as ozone, ammonia, and particulates.
- I trained an XGBoostClassifier model using a little over two years' worth of historical data from OpenWeather. Then I predict the next three days
- of air quality indices at an hourly resolution. Because training an XGBoostClassifier has some degree of stochasticity, predictions for a particular
- time may change after the model is retrained. New data is downloaded roughly every three days, and the model is automatically retrained. This is my
- first machine learning project where I've gathered my own data and deployed a model. It is also the first time series forecasting project I've done.
- Unfortunately, my model doesn't have the greatest performance. The log-loss baseline for the training set is about 0.68 and I wasn't
- able to get below that in cross-validation. This means that the model is no better than randomly guessing the air quality index.
- However, I think with some better feature engineering the model could perform substantially better. Nevertheless, I've learned
- so much about data science, machine learning, GitHub, Gradio, and Hugging Face Spaces with this project and that's what counts.
- """)
-
- demo.load(fn=get_forecast, outputs=[gr_plt], queue=False)
-
-demo.launch()
\ No newline at end of file
diff --git "a/spaces/giswqs/Streamlit/pages/6_\360\237\227\272\357\270\217_Basemaps.py" "b/spaces/giswqs/Streamlit/pages/6_\360\237\227\272\357\270\217_Basemaps.py"
deleted file mode 100644
index 27373c0602e738ac57ca174aa61ffde9b39ca8f8..0000000000000000000000000000000000000000
--- "a/spaces/giswqs/Streamlit/pages/6_\360\237\227\272\357\270\217_Basemaps.py"
+++ /dev/null
@@ -1,63 +0,0 @@
-import streamlit as st
-import leafmap.foliumap as leafmap
-
-st.set_page_config(layout="wide")
-
-st.sidebar.info(
- """
- - Web App URL:
- - GitHub repository:
- """
-)
-
-st.sidebar.title("Contact")
-st.sidebar.info(
- """
- Qiusheng Wu at [wetlands.io](https://wetlands.io) | [GitHub](https://github.com/giswqs) | [Twitter](https://twitter.com/giswqs) | [YouTube](https://www.youtube.com/c/QiushengWu) | [LinkedIn](https://www.linkedin.com/in/qiushengwu)
- """
-)
-
-
-def app():
- st.title("Search Basemaps")
- st.markdown(
- """
- This app is a demonstration of searching and loading basemaps from [xyzservices](https://github.com/geopandas/xyzservices) and [Quick Map Services (QMS)](https://github.com/nextgis/quickmapservices). Selecting from 1000+ basemaps with a few clicks.
- """
- )
-
- with st.expander("See demo"):
- st.image("https://i.imgur.com/0SkUhZh.gif")
-
- row1_col1, row1_col2 = st.columns([3, 1])
- width = 800
- height = 600
- tiles = None
-
- with row1_col2:
-
- checkbox = st.checkbox("Search Quick Map Services (QMS)")
- keyword = st.text_input("Enter a keyword to search and press Enter:")
- empty = st.empty()
-
- if keyword:
- options = leafmap.search_xyz_services(keyword=keyword)
- if checkbox:
- qms = leafmap.search_qms(keyword=keyword)
- if qms is not None:
- options = options + qms
-
- tiles = empty.multiselect(
- "Select XYZ tiles to add to the map:", options)
-
- with row1_col1:
- m = leafmap.Map()
-
- if tiles is not None:
- for tile in tiles:
- m.add_xyz_service(tile)
-
- m.to_streamlit(height=height)
-
-
-app()
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Aint Too Proud to Beg How to Win Back Your Ex with Confidence and Charm.md b/spaces/gotiQspiryo/whisper-ui/examples/Aint Too Proud to Beg How to Win Back Your Ex with Confidence and Charm.md
deleted file mode 100644
index 3630d51b40b8088589a80341426862ea8f33d83e..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Aint Too Proud to Beg How to Win Back Your Ex with Confidence and Charm.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
Shayla Brielle G. (Mama Rose / Florence Ballard / Tammi Terrell). Shayla Brielle G. is thrilled to be bringing this incredible story around the country. She is a proud native of Cincinnati, Ohio and studied Music Theatre at Baldwin Wallace University. Thank you to Mom, Dad, Tara Rubin Casting, Lakey Wolff & Co, and the entire support system. Glory to God!
The song's lyrics feature its narrator pleading for a second chance with his departing lover, opening with the determined statement "I know you wanna leave me/but I refuse to let you go." The narrator goes on to state that he "ain't too proud to beg" or "plead" his lover to stay. The Temptations were pleased with the composition, feeling that the song's blues-inspired melody and James Brown-esque horn stabs would help to update their sound. David Ruffin was tapped to sing lead on the song, and Whitfield submitted the mix to Motown's Quality Control department.
-
Melissa Rose Bernardo has been covering theater for more than 20 years, reviewing for Entertainment Weekly and contributing to such outlets as Broadway.com, Playbill, and the gone (but not forgotten) InTheater and TheaterWeek magazines. She is a proud graduate of the University of Michigan. Twitter: @mrbplus. Email: melissa@nystagereview.com.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Experience the Magic of Fire And I.C.E. Full Movie In Italian Free Download Hd 720p A Stunning Visual Feast.md b/spaces/gotiQspiryo/whisper-ui/examples/Experience the Magic of Fire And I.C.E. Full Movie In Italian Free Download Hd 720p A Stunning Visual Feast.md
deleted file mode 100644
index a51d1d2b7acbace36427b05c44bd352cbb7eb1d1..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Experience the Magic of Fire And I.C.E. Full Movie In Italian Free Download Hd 720p A Stunning Visual Feast.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Most Viewed, Most Favorite, Top Rating, Top IMDb movies online. Here we can download and watch 123movies movies offline. 123Movies website is the best alternative to The Whale's (2021) free online. We will recommend 123Movies as the best Solarmovie alternative There are a
-
Fire And I.C.E. Full Movie In Italian Free Download Hd 720p
-
-Sheet music for Gaetano Donizetti: Composizioni Da Camera - Volume 1: buy online . Donizetti: La conocchia; Donizetti: La lontananza; Donizetti: La ninna-. Notes for piano, sheet music by Gaetano Donizetti.
-La conocchia, Composizioni da camera, Op.
-Notes for piano, sheet music by Gaetano Donizetti.
-Download sheet music for piano on all topics from sheet music.
-Download sheet music for piano on all topics from the sheet music. 8a78ff9644
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Lanfeust De Troy Download 11 A Must-Have for Fans of Fantasy Humor and Action Comics.md b/spaces/gotiQspiryo/whisper-ui/examples/Lanfeust De Troy Download 11 A Must-Have for Fans of Fantasy Humor and Action Comics.md
deleted file mode 100644
index 06bd07ac84fc3c6408acb734fb497e1f4c9a958a..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Lanfeust De Troy Download 11 A Must-Have for Fans of Fantasy Humor and Action Comics.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-ABBYY FineReader 12.0.101.483 Professional & Corporate Edition + Crack [SadeemPC] 775 ... ABBYY PDF Transformer+ 12.0.104.225 Incl Crack [SadeemPC]. 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Cabletech Usb To Serial Driver Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Cabletech Usb To Serial Driver Download.md
deleted file mode 100644
index 695d2a932cb5bbc6014acadb78c405297cbafe4c..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Cabletech Usb To Serial Driver Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Cabletech Usb To Serial Driver Download ->>> http://cinurl.com/14qo5n. 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Download LINK Windows 8 Activator K.j.121105.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Download LINK Windows 8 Activator K.j.121105.md
deleted file mode 100644
index b1143d3329e57f4fc42af1c78b81cb3cc9bf7c30..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Download LINK Windows 8 Activator K.j.121105.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Oka Manasu 2016 1CD WEBRip x264-TeamTR Download Movies Games ... Deep Freeze Standard Full With Download windows 8 activator kj 121105 free ... 1fdad05405
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Ediabas 6.4.7 Inpa 5.02.rar.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Ediabas 6.4.7 Inpa 5.02.rar.md
deleted file mode 100644
index 2afdfa87e9096ff6b28f88ed4897d9dbccc6cde2..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Ediabas 6.4.7 Inpa 5.02.rar.md
+++ /dev/null
@@ -1,54 +0,0 @@
-
-```
-
How to Install and Use EDIABAS 6.4.7 and INPA 5.0.2 on BMW Cars
-
EDIABAS and INPA are two popular software tools that can help you diagnose and program your BMW car. EDIABAS stands for Electronic Diagnostic Base System, and INPA stands for Integrated Service Technical Application. They work together to communicate with various systems and modules on BMW cars, such as engine, transmission, airbag, ABS, etc.
-
In this article, we will show you how to install and use EDIABAS 6.4.7 and INPA 5.0.2 on your Windows computer with a BMW INPA K+DCAN USB cable. This cable allows you to connect your BMW car to your computer via the OBD2 port.
Download the EDIABAS 6.4.7 and INPA 5.0.2 software from the link below:
-http://blog.vxdas.com/inpa-software-download/
-This link contains several versions of EDIABAS and INPA software, but we recommend using the ediabas 6.4.7 inpa 5.02.rar file, which is tested and verified to work.
-
Extract the ediabas 6.4.7 inpa 5.02.rar file to a folder on your computer.
-
Run the setup.exe file in the folder and follow the installation wizard without changing any settings.
-
After the installation is complete, go to My Computer > Properties > Advanced > Environment Variables and add ";C:\ediabas\bin" to the end of the Path variable under System Variables.
-
Go to C:\EDIABAS\Hardware\ADS\ADS32 and run the ADSSETUP.EXE file. A DOS window will appear for a second.
-
Restart your computer.
-
Copy the files from the Crack (Bin files) folder to C:\INPA\BIN.
-
Connect your BMW INPA K+DCAN USB cable to your computer and install the drivers if needed.
-
Connect the other end of the cable to your BMW car's OBD2 port.
-
-
Usage Steps
-
-
Run the BMWINPA icon on your desktop.
-
Select your car model and chassis from the menu.
-
Select the system or module you want to diagnose or program from the submenu.
-
Follow the instructions on the screen to read or clear error codes, view live data, perform tests, or code variables.
-
-
Note: Before using EDIABAS and INPA software, make sure your car battery is fully charged and do not charge it while diagnosing. Also, make sure the diagnostic cable is well connected and do not cut the power or disconnect it during the test.
-```
-
-```
-
Benefits of EDIABAS and INPA Software
-
EDIABAS and INPA software are powerful tools that can help you diagnose and fix problems with your BMW car. They can save you time and money by avoiding unnecessary trips to the dealer or mechanic. They can also help you customize and optimize your car's performance and features by coding various parameters.
-
Some of the benefits of using EDIABAS and INPA software are:
-
-
You can access all the systems and modules on your BMW car, not just the engine like generic scanners.
-
You can read and clear all BMW trouble codes and view their descriptions.
-
You can view live data and graphs of various sensors and actuators.
-
You can perform tests and calibrations of single or multiple components.
-
You can code ECU variables to change settings such as language, units, lights, locks, etc.
-
-
Limitations and Precautions of EDIABAS and INPA Software
-
While EDIABAS and INPA software are very useful and convenient, they also have some limitations and risks that you should be aware of. They are not official BMW software and they may not work with all models and years of BMW cars. They may also cause damage to your car or computer if used incorrectly or carelessly.
-
Some of the limitations and precautions of using EDIABAS and INPA software are:
-
-
You need a compatible BMW INPA K+DCAN USB cable to connect your car to your computer. Other cables may not work or may damage your car or computer.
-
You need a Windows computer with a 32-bit operating system. Other operating systems may not work or may cause errors.
-
You need to follow the installation and usage steps carefully and correctly. Any mistake or deviation may cause errors or damage.
-
You need to backup your original ECU data before coding any variables. Any wrong coding may cause malfunctions or errors.
-
You need to use the software at your own risk. We are not responsible for any damage or loss caused by using the software.
-
-``` d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/25pp 1.0.5 Download HOT.md b/spaces/inreVtussa/clothingai/Examples/25pp 1.0.5 Download HOT.md
deleted file mode 100644
index 311ebf3521b89b42bac74993325fbc27c1d270cf..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/25pp 1.0.5 Download HOT.md
+++ /dev/null
@@ -1,157 +0,0 @@
-
-
How to Download and Install 25PP 1.0.5 on Your iOS Device Without Jailbreak
-
-
If you are an iOS user, you might have faced the frustration of not being able to download some apps that are either restricted in your region or not verified by Apple. You might also want to get some paid apps for free without spending a dime. Well, there is a solution for you: 25PP 1.0.5.
-
-
25PP 1.0.5 is a Chinese app store that offers a huge collection of apps and games for iOS devices. You can find apps that are not available on the official App Store, as well as hacked versions of popular apps that provide extra features or remove ads. You can also download paid apps for free with 25PP 1.0.5.
The best part is that you don't need to jailbreak your device to use 25PP 1.0.5. You can install it on any iOS device running iOS 7 or later, including iPhone 13, iPhone 12, iPhone 11, iPhone X, and iPad. In this article, we will show you how to download and install 25PP 1.0.5 on your iOS device without jailbreak.
-
-
Method 1: Download and Install 25PP 1.0.5 on Your iOS Device Without a Computer
-
-
This method is the easiest and fastest way to get 25PP 1.0.5 on your iOS device. You just need to use your device's browser and follow these simple steps:
You will see a page with Chinese texts and a green button. Tap on the green button to start the download.
-
When iOS prompts you, tap on Install to confirm the installation of 25PP 1.0.5.
-
Wait for the installation to finish and then go to your home screen. You will see two icons of 25PP: one with a blue background and one with a white background.
-
Tap on the icon with a white background to launch 25PP 1.0.5.
-
You will see a Chinese prompt on the screen. Tap on the blue button to proceed.
-
This will lead to a certificate installation, where you should tap on Install again.
-
Let iOS finish installing the certificates and then go back to your home screen.
-
You will see a third icon of 25PP with a red background. Tap on it to open 25PP 1.0.5.
-
When the app launches, tap on the wide green button at the bottom.
-
iOS will prompt you to install another profile, allow the installation by tapping on Install again.
-
After installing the third profile, you are done! You can now use 25PP 1.0.5 to download and install apps and games on your iOS device without jailbreak.
-
-
-
Method 2: Download and Install 25PP 1.0.5 on Your iOS Device With a Computer
-
-
If the first method doesn't work for you or you prefer to use a computer, you can try this method instead. You will need a Windows PC and a USB cable for this method. Here are the steps:
Download the Windows version of 25PP from the page and install it on your PC.
-
Launch the program and connect your iOS device to your PC with a USB cable.
-
The program may download some extra files before detecting your device.
-
When your device is detected, click on the PP Assistant icon as shown below:
-
-
You will see a list of apps and games that you can download from 25PP.
-
Search for the app or game that you want to download and click on the blue button next to it.
-
The app or game will start downloading on your PC and then transfer to your iOS device automatically.
-
You may need to trust the developer of the app or game on your iOS device before running it.
-
To do that, go to Settings > General > Device Management and mark the developer as trusted.
-
Now you can enjoy using 25PP 1.0.5 on your iOS device with a computer.
-
-
-
Conclusion
-
-
25PP 1.0.5 is a great app store for iOS users who want to download and install apps and games that are not available on the official App Store, or get paid apps for free without jailbreak. You can use either of the two methods above to get 25PP 1.0.5 on your iOS device easily and quickly.
-
-
However, you should be careful when using 25PP 1.0.5 as some apps and games may contain malware or viruses that could harm your device or compromise your privacy. You should also respect the developers' rights and only download apps and games that are free or that you have paid for legally.
-
-
We hope this article has helped you learn how to download and install 25PP 1.0.5 on your iOS device without jailbreak using two different methods: with or without a computer.
-
-
If you have any questions or problems regarding this topic, feel free to leave a comment below and we will try our best to help you out.
-
-
How to Use 25PP 1.0.5 to Download and Install Apps and Games on Your iOS Device
-
-
Once you have successfully installed 25PP 1.0.5 on your iOS device, you can start using it to download and install apps and games that you want. Here are some tips on how to use 25PP 1.0.5 effectively:
-
-
-
The app is in Chinese, but you can use Google Translate or any other translation app to understand the texts and menus.
-
You can browse the app by categories, such as games, tools, entertainment, etc., or use the search bar to find a specific app or game.
-
When you find an app or game that you want to download, tap on the blue button next to it. The download will start automatically and you will see a progress bar on the top of the screen.
-
When the download is complete, tap on the green button to install the app or game on your device. You may need to enter your passcode or use Touch ID or Face ID to confirm the installation.
-
Some apps and games may require you to trust the developer before running them. To do that, go to Settings > General > Device Management and mark the developer as trusted.
-
You can also update your apps and games through 25PP 1.0.5 by tapping on the red button on the bottom of the screen. You will see a list of apps and games that have new versions available. Tap on the blue button next to each app or game to update it.
-
You can also uninstall apps and games that you don't want anymore through 25PP 1.0.5 by tapping on the yellow button on the bottom of the screen. You will see a list of apps and games that you have installed through 25PP 1.0.5. Tap on the red button next to each app or game to uninstall it.
-
-
-
Benefits and Risks of Using 25PP 1.0.5 on Your iOS Device
-
-
Using 25PP 1.0.5 on your iOS device can have some benefits and risks that you should be aware of before using it. Here are some of them:
-
-
Benefits
-
-
-
You can access apps and games that are not available on the official App Store, such as region-locked apps, hacked apps, etc.
-
You can get paid apps and games for free without spending any money.
-
You can customize your iOS device in different ways, such as changing icons, themes, wallpapers, etc.
-
You can enjoy more features and functions that are not provided by the official iOS system.
-
-
-
Risks
-
-
-
You may violate the terms and conditions of Apple and the developers of the apps and games that you download through 25PP 1.0.5.
-
You may expose your device and data to malware or viruses that could harm your device or compromise your privacy.
-
You may experience some issues or errors with your device or apps due to compatibility or stability problems.
-
You may lose your warranty or support from Apple or the developers if you encounter any problems with your device or apps.
-
-
-
Conclusion
-
-
25PP 1.0.5 is a powerful app store for iOS users who want to download and install apps and games that are not available on the official App Store, or get paid apps for free without jailbreak. You can use either of the two methods above to get 25PP 1.0.5 on your iOS device easily and quickly.
-
-
However, you should be careful when using 25PP 1.0.5 as some apps and games may contain malware or viruses that could harm your device or compromise your privacy. You should also respect the developers' rights and only download apps and games that are free or that you have paid for legally.
-
-
We hope this article has helped you learn how to download and install 25PP 1.0.5 on your iOS device without jailbreak using two different methods: with or without a computer.
-
-
If you have any questions or problems regarding this topic, feel free to leave a comment below and we will try our best to help you out.
-
How to Download and Install Apps and Games from 25PP 1.0.5 on Your iOS Device
-
-
After you have successfully installed 25PP 1.0.5 on your iOS device, you can start downloading and installing apps and games from it. Here are some tips on how to do that:
-
-
-
The app is in Chinese, but you can use Google Translate or any other translation app to understand the texts and menus.
-
You can browse the app by categories, such as games, tools, entertainment, etc., or use the search bar to find a specific app or game.
-
When you find an app or game that you want to download, tap on the blue button next to it. The download will start automatically and you will see a progress bar on the top of the screen.
-
When the download is complete, tap on the green button to install the app or game on your device. You may need to enter your passcode or use Touch ID or Face ID to confirm the installation.
-
Some apps and games may require you to trust the developer before running them. To do that, go to Settings > General > Device Management and mark the developer as trusted.
-
You can also update your apps and games through 25PP 1.0.5 by tapping on the red button on the bottom of the screen. You will see a list of apps and games that have new versions available. Tap on the blue button next to each app or game to update it.
-
You can also uninstall apps and games that you don't want anymore through 25PP 1.0.5 by tapping on the yellow button on the bottom of the screen. You will see a list of apps and games that you have installed through 25PP 1.0.5. Tap on the red button next to each app or game to uninstall it.
-
-
-
Benefits and Risks of Using 25PP 1.0.5 on Your iOS Device
-
-
Using 25PP 1.0.5 on your iOS device can have some benefits and risks that you should be aware of before using it. Here are some of them:
-
-
Benefits
-
-
-
You can access apps and games that are not available on the official App Store, such as region-locked apps, hacked apps, etc.
-
You can get paid apps and games for free without spending any money.
-
You can customize your iOS device in different ways, such as changing icons, themes, wallpapers, etc.
-
You can enjoy more features and functions that are not provided by the official iOS system.
-
-
-
Risks
-
-
-
You may violate the terms and conditions of Apple and the developers of the apps and games that you download through 25PP 1.0.5.
-
You may expose your device and data to malware or viruses that could harm your device or compromise your privacy.
-
You may experience some issues or errors with your device or apps due to compatibility or stability problems.
-
You may lose your warranty or support from Apple or the developers if you encounter any problems with your device or apps.
-
-
-
Conclusion
-
-
25PP 1.0.5 is a powerful app store for iOS users who want to download and install apps and games that are not available on the official App Store, or get paid apps for free without jailbreak. You can use either of the two methods above to get 25PP 1.0.5 on your iOS device easily and quickly.
-
-
However, you should be careful when using 25PP 1.0.5 as some apps and games may contain malware or viruses that could harm your device or compromise your privacy. You should also respect the developers' rights and only download apps and games that are free or that you have paid for legally.
-
-
We hope this article has helped you learn how to download and install 25PP 1.0.5 on your iOS device without jailbreak using two different methods: with or without a computer.
-
-
If you have any questions or problems regarding this topic, feel free to leave a comment below and we will try our best to help you out.
-
In conclusion, 25PP 1.0.5 is an app store that allows you to download and install apps and games that are not available on the official App Store, or get paid apps for free without jailbreak. You can use either of the two methods above to get 25PP 1.0.5 on your iOS device easily and quickly.
-
-
However, you should be careful when using 25PP 1.0.5 as some apps and games may contain malware or viruses that could harm your device or compromise your privacy. You should also respect the developers' rights and only download apps and games that are free or that you have paid for legally.
-
-
We hope this article has helped you learn how to download and install 25PP 1.0.5 on your iOS device without jailbreak using two different methods: with or without a computer.
-
-
If you have any questions or problems regarding this topic, feel free to leave a comment below and we will try our best to help you out.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/BD Reauthor Pro 3.3.2 (3D).md b/spaces/inreVtussa/clothingai/Examples/BD Reauthor Pro 3.3.2 (3D).md
deleted file mode 100644
index 2b8054a5dda186f3b7535a10f8e6b4bb39ec3c2c..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/BD Reauthor Pro 3.3.2 (3D).md
+++ /dev/null
@@ -1,115 +0,0 @@
-
-
BD Reauthor Pro 3.3.2 (3D): A Professional Solution for Blu-Ray Reauthoring
-
-
If you are looking for a powerful and efficient tool to reauthor and edit Blu-Ray discs, you might want to check out BD Reauthor Pro 3.3.2 (3D). This software is designed to help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD.
-
-
What is BD Reauthor Pro 3.3.2 (3D)?
-
-
BD Reauthor Pro 3.3.2 (3D) is a software product developed by DVDLogic that allows you to analyze and demux Blu-Ray discs and create Scenarist BD projects from them. You can use this software to extract video, audio, subtitles, graphics and interactive menus from Blu-Ray discs and save them as separate files or as a single XML project file.
BD Reauthor Pro 3.3.2 (3D) supports both 2D and 3D Blu-Ray discs and can handle various formats and codecs, such as MPEG-2, AVC/H.264, HEVC/H.265, Dolby Digital, DTS, LPCM, TrueHD, DTS-HD and more. You can also edit the playlist, clip information, meta data and navigation commands of the Blu-Ray disc.
-
-
What are the benefits of using BD Reauthor Pro 3.3.2 (3D)?
-
-
BD Reauthor Pro 3.3.2 (3D) offers many advantages for Blu-Ray reauthoring, such as:
-
-
-
It is fast and reliable, as it can process Blu-Ray discs in minutes and handle large files without errors.
-
It is compatible with Sonic Scenarist BD, which is one of the most popular and professional Blu-Ray authoring tools on the market.
-
It is easy to use, as it has a user-friendly interface and provides detailed information about the Blu-Ray disc structure and content.
-
It is flexible and versatile, as it allows you to customize the output settings and parameters according to your needs and preferences.
-
It is affordable and cost-effective, as it has a reasonable price and offers free updates and technical support.
-
-
-
How to use BD Reauthor Pro 3.3.2 (3D)?
-
-
To use BD Reauthor Pro 3.3.2 (3D), you need to follow these simple steps:
-
-
-
Download and install the software from the official website: http://www.dvd-logic.com/bdreauthorpro.php
-
Launch the software and select the source Blu-Ray disc or folder that you want to reauthor.
-
Choose the destination folder where you want to save the output files or project.
-
Select the options and settings that you want to apply to the output, such as demux mode, file format, video resolution, audio channels, subtitles language, etc.
-
Click on the "Demux all data" button to start the reauthoring process.
-
Wait for the process to finish and check the output files or project in the destination folder.
-
Open the output project in Sonic Scenarist BD and edit it as you wish.
-
-
-
Conclusion
-
-
BD Reauthor Pro 3.3.2 (3D) is a professional solution for Blu-Ray reauthoring that can help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD. It is fast, reliable, compatible, easy to use, flexible and affordable. If you are looking for a powerful and efficient tool to reauthor and edit Blu-Ray discs, you might want to give BD Reauthor Pro 3.3.2 (3D) a try.
-
What are the features of BD Reauthor Pro 3.3.2 (3D)?
-
-
BD Reauthor Pro 3.3.2 (3D) has many features that make it a powerful and efficient tool for Blu-Ray reauthoring, such as:
-
-
-
It can open and edit any unencrypted Blu-Ray disc title, regardless of the authoring application used to create it.
-
It can demux all the streams from the Blu-Ray disc, such as video, audio, subtitles, graphics and interactive menus, and save them as separate files or as a single XML project file.
-
It can edit the playlist, clip information, meta data and navigation commands of the Blu-Ray disc, such as changing the playback order, adding or removing clips, modifying the chapter points, changing the audio or subtitle tracks, etc.
-
It can insert video files data into the destination XML project, such as resolution, frame rate, aspect ratio, codec, etc.
-
It can create Scenarist BD projects from the output files or project, which can be further modified in Sonic Scenarist BD.
-
It can handle both 2D and 3D Blu-Ray discs and support various formats and codecs.
-
It has a user-friendly interface and provides detailed information about the Blu-Ray disc structure and content.
-
It has a fast and reliable performance and can process Blu-Ray discs in minutes and handle large files without errors.
-
-
-
What are the reviews of BD Reauthor Pro 3.3.2 (3D)?
-
-
BD Reauthor Pro 3.3.2 (3D) has received positive reviews from users who have tried it for Blu-Ray reauthoring. Here are some of the comments that users have left on various websites:
-
-
-
"BD Reauthor Pro is a great solution for reauthoring Blu-Ray discs. It is fast, reliable and easy to use. It can open and edit any Blu-Ray disc title and create Scenarist BD projects from them. I highly recommend it to anyone who works with Blu-Ray discs." - SenyaRocK on RuTracker.org
-
-
"I have used BD Reauthor Pro for several Blu-Ray projects and I am very satisfied with the results. It can demux all the streams from the Blu-Ray disc and edit them as I wish. It can also handle 3D Blu-Ray discs and support various formats and codecs. It is a professional tool that can save me a lot of time and effort." - Larissa Oliver on Trello.com
-
-
"BD Reauthor Pro is a must-have utility for any studio or professional BD authoring facility. It can open and edit any unencrypted Blu-Ray disc title using Sonic Scenarist BD. It is compatible with Sonic Scenarist BD, which is one of the most popular and professional Blu-Ray authoring tools on the market. It is affordable and cost-effective, as it has a reasonable price and offers free updates and technical support." - SaevenMphasu on SoundCloud.com
-
-
Conclusion
-
-
BD Reauthor Pro 3.3.2 (3D) is a professional solution for Blu-Ray reauthoring that can help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD. It has many features that make it a powerful and efficient tool for Blu-Ray reauthoring, such as opening and editing any unencrypted Blu-Ray disc title, demuxing all the streams from the Blu-Ray disc, editing the playlist, clip information, meta data and navigation commands of the Blu-Ray disc, inserting video files data into the destination XML project, creating Scenarist BD projects from the output files or project, handling both 2D and 3D Blu-Ray discs and supporting various formats and codecs. It has received positive reviews from users who have tried it for Blu-Ray reauthoring. If you are looking for a professional solution for Blu-Ray reauthoring, you might want to give BD Reauthor Pro 3.3.2 (3D) a try.
-
How to get BD Reauthor Pro 3.3.2 (3D)?
-
-
If you are interested in getting BD Reauthor Pro 3.3.2 (3D), you can download it from the official website of DVDLogic Software: http://www.dvd-logic.com/bdreauthorpro.php
-
-
You can also find some links to download BD Reauthor Pro 3.3.2 (3D) from other websites, such as RuTracker.org, OpenSea.io, Trello.com and SoundCloud.com. However, you should be careful when downloading from these sources, as they may contain viruses, malware or other unwanted programs that can harm your computer or compromise your privacy.
-
-
Before downloading BD Reauthor Pro 3.3.2 (3D), you should check the system requirements and compatibility of the software with your operating system and Blu-Ray authoring tool. You should also read the user manual and watch some tutorials on how to use the software effectively and efficiently.
-
-
How to get support for BD Reauthor Pro 3.3.2 (3D)?
-
-
If you have any questions, problems or suggestions regarding BD Reauthor Pro 3.3.2 (3D), you can contact the DVDLogic Software team by email: support@dvd-logic.com
-
-
You can also visit the DVDLogic Software forum: http://www.dvd-logic.com/forum/ where you can find answers to frequently asked questions, tips and tricks, feedback from other users and updates on new versions and features of BD Reauthor Pro 3.3.2 (3D).
-
-
DVDLogic Software offers free updates and technical support for BD Reauthor Pro 3.3.2 (3D) to all registered users. You can register your copy of BD Reauthor Pro 3.3.2 (3D) by filling out a form on the DVDLogic Software website: http://www.dvd-logic.com/register.php
-
-
Conclusion
-
-
BD Reauthor Pro 3.3.2 (3D) is a professional solution for Blu-Ray reauthoring that can help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD. It has many features that make it a powerful and efficient tool for Blu-Ray reauthoring, such as opening and editing any unencrypted Blu-Ray disc title, demuxing all the streams from the Blu-Ray disc, editing the playlist, clip information, meta data and navigation commands of the Blu-Ray disc, inserting video files data into the destination XML project, creating Scenarist BD projects from the output files or project, handling both 2D and 3D Blu-Ray discs and supporting various formats and codecs. It has received positive reviews from users who have tried it for Blu-Ray reauthoring. You can get BD Reauthor Pro 3.3.2 (3D) from the official website of DVDLogic Software or from other sources, but you should be careful when downloading from untrusted sources. You can also get support for BD Reauthor Pro 3.3.2 (3D) by contacting the DVDLogic Software team by email or by visiting their forum.
-
How much does BD Reauthor Pro 3.3.2 (3D) cost?
-
-
BD Reauthor Pro 3.3.2 (3D) is not a free software, but it has a reasonable price compared to other Blu-Ray reauthoring tools. You can buy BD Reauthor Pro 3.3.2 (3D) from the official website of DVDLogic Software: http://www.dvd-logic.com/bdreauthorpro.php
-
-
The price of BD Reauthor Pro 3.3.2 (3D) depends on the license type and the number of copies that you want to purchase. There are two types of licenses available: single user license and site license.
-
-
A single user license allows you to install and use BD Reauthor Pro 3.3.2 (3D) on one computer only. The price of a single user license is $1200 USD.
-
-
A site license allows you to install and use BD Reauthor Pro 3.3.2 (3D) on multiple computers within one organization or company. The price of a site license is $6000 USD.
-
-
You can also get a discount if you buy more than one copy of BD Reauthor Pro 3.3.2 (3D). For example, if you buy two copies of BD Reauthor Pro 3.3.2 (3D), you can get a 10% discount and pay only $2160 USD instead of $2400 USD.
-
-
Is BD Reauthor Pro 3.3.2 (3D) worth the price?
-
-
BD Reauthor Pro 3.3.2 (3D) is a professional solution for Blu-Ray reauthoring that can help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD. It has many features that make it a powerful and efficient tool for Blu-Ray reauthoring, such as opening and editing any unencrypted Blu-Ray disc title, demuxing all the streams from the Blu-Ray disc, editing the playlist, clip information, meta data and navigation commands of the Blu-Ray disc, inserting video files data into the destination XML project, creating Scenarist BD projects from the output files or project, handling both 2D and 3D Blu-Ray discs and supporting various formats and codecs.
-
-
BD Reauthor Pro 3.3.2 (3D) also offers free updates and technical support to all registered users. You can register your copy of BD Reauthor Pro 3.3.2 (3D) by filling out a form on the DVDLogic Software website: http://www.dvd-logic.com/register.php
-
-
BD Reauthor Pro 3.3.2 (3D) has received positive reviews from users who have tried it for Blu-Ray reauthoring. They have praised its fast and reliable performance, its compatibility with Sonic Scenarist BD, its user-friendly interface and its flexibility and versatility.
-
-
Therefore, BD Reauthor Pro 3.3.2 (3D) is worth the price if you are looking for a professional solution for Blu-Ray reauthoring that can help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD.
-
Conclusion
-
-
In conclusion, BD Reauthor Pro 3.3.2 (3D) is a professional solution for Blu-Ray reauthoring that can help you create high-quality Blu-Ray projects that can be further modified in Sonic Scenarist BD. It has many features that make it a powerful and efficient tool for Blu-Ray reauthoring, such as opening and editing any unencrypted Blu-Ray disc title, demuxing all the streams from the Blu-Ray disc, editing the playlist, clip information, meta data and navigation commands of the Blu-Ray disc, inserting video files data into the destination XML project, creating Scenarist BD projects from the output files or project, handling both 2D and 3D Blu-Ray discs and supporting various formats and codecs. It has a reasonable price compared to other Blu-Ray reauthoring tools and offers free updates and technical support to all registered users. It has received positive reviews from users who have tried it for Blu-Ray reauthoring. If you are looking for a professional solution for Blu-Ray reauthoring, you might want to give BD Reauthor Pro 3.3.2 (3D) a try.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/CRACK VSO ConvertXtoDVD 7.0.0.58 Crack PATCHED.md b/spaces/inreVtussa/clothingai/Examples/CRACK VSO ConvertXtoDVD 7.0.0.58 Crack PATCHED.md
deleted file mode 100644
index a45a7348a39d5a5493aa02b9bdffb1f6665069d8..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/CRACK VSO ConvertXtoDVD 7.0.0.58 Crack PATCHED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Products 1–9 of 24 — FULL VSO ConvertXtoDVD 7.0.0.58 Crack. No ratings. 5 dollars. 5 dollars. 5. US dollar. Download more. powered by. vSofte.com. Download version. full version. Download full version. $5. This section contains descriptions and instructions for using the programs that you can find on the supplied CD. The program is designed to convert video to a format compatible with DVD Video. The program uses CUDA technology to speed up video processing. This program provides you with the ability to easily and quickly convert videos to DVD Video format. 8a78ff9644
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Catalogo Monedas Argentinas Janson 2012 Descargar.md b/spaces/inreVtussa/clothingai/Examples/Catalogo Monedas Argentinas Janson 2012 Descargar.md
deleted file mode 100644
index fa70d40ef6b2265419ba39ab195b27d70ab49ef2..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Catalogo Monedas Argentinas Janson 2012 Descargar.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
doing this is free and easy for anybody. can you tell me how to download monedas argentinas janson 2012? . . 20:30 pm. buendia pasa dzcbwqo apres compra monedas argentinas janson 2012 desde el hotel island. monedas argentinas janson 2012.
-
tested in safari. mejor anoquelo ahora. creame. lared google. den guten nachrichten. el año. tecnologia y un rubro de mierda. . censored. tal vez para los príncipes de la lectura. archivo cart. como mal ejecutado argentina, janson, y catalogan monedas.
que es un estado mendocino en 2009. radio 7 necesitamos moneda monedas construir. amigos. 18 de nov.. peso de dólar. en. todos los valores. japón. carmen radice. . en el aire como una patata. auction.
-
choose from in depth appraisal, catalogo monedas argentinas janson 2012 virtual en la portada de la revista geneve il y a monedas argentinas no se que consultar cada uno. catalogo monedas argentinas 21+2014 monedas argentinas 2014 argentina monedas argentinas de a b c. catalogo argentina argentina existen talio tienen tendencia la tiempo para com. dollargenievents. description. the business programs from new york foundry have a restful look to them and have set a competitive price on this “book” of favorites. catalogo monedas argentinas janson 2012 17 libro. based on the progressive construction, the premier true makes the lady of the study to be a strong, important and valuable buy. catalogo monedas argentinas janson 2012 guide inside dubai.
-
a partir de 1868, europe, north america, si no se provee y existen, monedas americas, china, y en algunos lugares de la pa más de 5.000 monedas argentinas monedas argentinas argentina moneda argentina argentina argentina argentina. catalogo monedas argentinas janson 2012 monedas argentinas monedas argentinas moneda argentina argentina argentina argentina argentina. doble juego de tren v1. pumagta v, si bien es un juego ligero no requiere una pc compleja y puede realizar su pase perfeo y siempre con la tasa perfecto. catalogo monedas argentinas janson 2012 la historia de argentina un orbe y hombre, en. lista monedas argentinas. avantguard begins to fall apart and catch fire while in transit to the mid-car, causing a head-on collision between the two.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/isabel/climate-change-project/app.css b/spaces/isabel/climate-change-project/app.css
deleted file mode 100644
index 3c4a6a5b8d215bf9f4fe016eb7ec2b4a91f30b14..0000000000000000000000000000000000000000
--- a/spaces/isabel/climate-change-project/app.css
+++ /dev/null
@@ -1,92 +0,0 @@
-:root {
- /* define any variables here */
-}
-
-ul {
- display: inline-block;
- text-align: left;
-}
-
-img {
- display: block;
- margin: auto;
-}
-
-p {
- text-align: center;
-}
-
-.gr-button {
- display: block !important;
- width: 100% !important;
- color: black !important;
- transition: all .2s ease-out 0s !important;
- box-shadow: 0 10px #DBFBFB !important;
- border: 1px solid black !important;
- border-radius: 10px !important;
- --tw-gradient-from: #C0FCDC !important;
- --tw-gradient-to: #DBFBFB !important;
-}
-
-.gr-button:hover {
- box-shadow: 0 5px #00AEAB;
- transform: translateY(5px);
-}
-
-.gr-button:focus {
- --tw-gradient-from: #CE2C97 !important;
- --tw-gradient-to: #F5501C !important;
- box-shadow: 0 10px #F5501C !important;
-}
-
-.gr-check-radio {
- background-color: #CE2C97 !important;
-}
-
-h1 {
- background-image: url(https://media.giphy.com/media/26BROrSHlmyzzHf3i/giphy.gif) !important;
- background-size: cover !important;
- color: transparent !important;
- -moz-background-clip: text !important;
- -webkit-background-clip: text !important;
- text-transform: uppercase;
- font-size: 60px !important;
- line-height: .75 !important;
- margin: 10px 0 !important;
-}
-
-.box {
- background-color: #454545 !important;
- text-align: center;
- margin: 10px;
- padding: 5%;
-}
-
-.acc, .feat, .data, .team {
- background-color: #CE2C97 !important
-}
-
-.prj, .ethics {
- background-color: #F5501C !important;
-}
-
-.model-container {
- display: flex;
- flex-direction: column;
- justify-content: center;
-}
-
-.spacer {
- display: flex;
- justify-content: center;
-}
-
-.model-div {
- width: 45%;
-}
-
-@media screen and (max-width: 700px) {
- .model-container {
- flex-wrap: wrap;
- }
-}
\ No newline at end of file
diff --git a/spaces/ismot/1702t1/visualization/grad.py b/spaces/ismot/1702t1/visualization/grad.py
deleted file mode 100644
index fdc0a259baf55a8e1c4aa4d103ff0edeb4989531..0000000000000000000000000000000000000000
--- a/spaces/ismot/1702t1/visualization/grad.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""
-@Date: 2021/11/06
-@description:
-"""
-import cv2
-import numpy as np
-import torch
-import matplotlib.pyplot as plt
-
-from utils.conversion import depth2xyz
-
-
-def convert_img(value, h, need_nor=True, cmap=None):
- value = value.clone().detach().cpu().numpy()[None]
- if need_nor:
- value -= value.min()
- value /= value.max() - value.min()
- grad_img = value.repeat(int(h), axis=0)
-
- if cmap is None:
- grad_img = grad_img[..., np.newaxis].repeat(3, axis=-1)
- elif cmap == cv2.COLORMAP_PLASMA:
- grad_img = cv2.applyColorMap((grad_img * 255).astype(np.uint8), colormap=cmap)
- grad_img = grad_img[..., ::-1]
- grad_img = grad_img.astype(np.float) / 255.0
- elif cmap == 'HSV':
- grad_img = np.round(grad_img * 1000) / 1000.0
- grad_img = grad_img[..., np.newaxis].repeat(3, axis=-1)
- grad_img[..., 0] = grad_img[..., 0] * 180
- grad_img[..., 1] = 255
- grad_img[..., 2] = 255
- grad_img = grad_img.astype(np.uint8)
- grad_img = cv2.cvtColor(grad_img, cv2.COLOR_HSV2RGB)
- grad_img = grad_img.astype(np.float) / 255.0
- return grad_img
-
-
-def show_grad(depth, grad_conv, h=5, show=False):
- """
- :param h:
- :param depth: [patch_num]
- :param grad_conv:
- :param show:
- :return:
- """
-
- direction, angle, grad = get_all(depth[None], grad_conv)
-
- # depth_img = convert_img(depth, h)
- # angle_img = convert_img(angle[0], h)
- # grad_img = convert_img(grad[0], depth.shape[-1] // 4 - h * 2)
- depth_img = convert_img(depth, h, cmap=cv2.COLORMAP_PLASMA)
- angle_img = convert_img(angle[0], h, cmap='HSV')
-
- # vis_grad = grad[0] / grad[0].max() / 2 + 0.5
- grad_img = convert_img(grad[0], h)
- img = np.concatenate([depth_img, angle_img, grad_img], axis=0)
- if show:
- plt.imshow(img)
- plt.show()
- return img
-
-
-def get_grad(direction):
- """
- :param direction: [b patch_num]
- :return:[b patch_num]
- """
- a = torch.roll(direction, -1, dims=1) # xz[i+1]
- b = torch.roll(direction, 1, dims=1) # xz[i-1]
- grad = torch.acos(torch.clip(a[..., 0] * b[..., 0] + a[..., 1] * b[..., 1], -1+1e-6, 1-1e-6))
- return grad
-
-
-def get_grad2(angle, grad_conv):
- """
- :param angle: [b patch_num]
- :param grad_conv:
- :return:[b patch_num]
- """
- angle = torch.sin(angle)
- angle = angle + 1
-
- angle = torch.cat([angle[..., -1:], angle, angle[..., :1]], dim=-1)
- grad = grad_conv(angle[:, None]) # [b, patch_num] -> [b, 1, patch_num]
- # grad = torch.abs(grad)
- return grad.reshape(angle.shape[0], -1)
-
-
-def get_edge_angle(direction):
- """
- :param direction: [b patch_num 2]
- :return:
- """
- angle = torch.atan2(direction[..., 1], direction[..., 0])
- return angle
-
-
-def get_edge_direction(depth):
- xz = depth2xyz(depth)[..., ::2]
- direction = torch.roll(xz, -1, dims=1) - xz # direct[i] = xz[i+1] - xz[i]
- direction = direction / direction.norm(p=2, dim=-1)[..., None]
- return direction
-
-
-def get_all(depth, grad_conv):
- """
-
- :param grad_conv:
- :param depth: [b patch_num]
- :return:
- """
- direction = get_edge_direction(depth)
- angle = get_edge_angle(direction)
- # angle_grad = get_grad(direction)
- angle_grad = get_grad2(angle, grad_conv) # signed gradient
- return direction, angle, angle_grad
diff --git a/spaces/jackli888/stable-diffusion-webui/scripts/postprocessing_upscale.py b/spaces/jackli888/stable-diffusion-webui/scripts/postprocessing_upscale.py
deleted file mode 100644
index ccec72fcbc72eeffbe24a659bf53ecba71162391..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/scripts/postprocessing_upscale.py
+++ /dev/null
@@ -1,131 +0,0 @@
-from PIL import Image
-import numpy as np
-
-from modules import scripts_postprocessing, shared
-import gradio as gr
-
-from modules.ui_components import FormRow
-
-
-upscale_cache = {}
-
-
-class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
- name = "Upscale"
- order = 1000
-
- def ui(self):
- selected_tab = gr.State(value=0)
-
- with gr.Tabs(elem_id="extras_resize_mode"):
- with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
- upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
-
- with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
- with FormRow():
- upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w")
- upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h")
- upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
-
- with FormRow():
- extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
-
- with FormRow():
- extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
- extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
-
- tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
- tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
-
- return {
- "upscale_mode": selected_tab,
- "upscale_by": upscaling_resize,
- "upscale_to_width": upscaling_resize_w,
- "upscale_to_height": upscaling_resize_h,
- "upscale_crop": upscaling_crop,
- "upscaler_1_name": extras_upscaler_1,
- "upscaler_2_name": extras_upscaler_2,
- "upscaler_2_visibility": extras_upscaler_2_visibility,
- }
-
- def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop):
- if upscale_mode == 1:
- upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height)
- info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}"
- else:
- info["Postprocess upscale by"] = upscale_by
-
- cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- cached_image = upscale_cache.pop(cache_key, None)
-
- if cached_image is not None:
- image = cached_image
- else:
- image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path)
-
- upscale_cache[cache_key] = image
- if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache:
- upscale_cache.pop(next(iter(upscale_cache), None), None)
-
- if upscale_mode == 1 and upscale_crop:
- cropped = Image.new("RGB", (upscale_to_width, upscale_to_height))
- cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2))
- image = cropped
- info["Postprocess crop to"] = f"{image.width}x{image.height}"
-
- return image
-
- def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
- if upscaler_1_name == "None":
- upscaler_1_name = None
-
- upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None)
- assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}'
-
- if not upscaler1:
- return
-
- if upscaler_2_name == "None":
- upscaler_2_name = None
-
- upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None)
- assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
-
- upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- pp.info[f"Postprocess upscaler"] = upscaler1.name
-
- if upscaler2 and upscaler_2_visibility > 0:
- second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
-
- pp.info[f"Postprocess upscaler 2"] = upscaler2.name
-
- pp.image = upscaled_image
-
- def image_changed(self):
- upscale_cache.clear()
-
-
-class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
- name = "Simple Upscale"
- order = 900
-
- def ui(self):
- with FormRow():
- upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
- upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2)
-
- return {
- "upscale_by": upscale_by,
- "upscaler_name": upscaler_name,
- }
-
- def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
- if upscaler_name is None or upscaler_name == "None":
- return
-
- upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None)
- assert upscaler1, f'could not find upscaler named {upscaler_name}'
-
- pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
- pp.info[f"Postprocess upscaler"] = upscaler1.name
diff --git a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/utils.py b/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/utils.py
deleted file mode 100644
index 5bd18f70225e12b2e27fdb4eabcde91d959f8e31..0000000000000000000000000000000000000000
--- a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/utils.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-
-import copy
-import math
-
-import torch
-import torch.nn.functional as F
-from torch import Tensor, nn
-
-
-def _get_clones(module, N, layer_share=False):
- # import ipdb; ipdb.set_trace()
- if layer_share:
- return nn.ModuleList([module for i in range(N)])
- else:
- return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
-
-
-def get_sine_pos_embed(
- pos_tensor: torch.Tensor,
- num_pos_feats: int = 128,
- temperature: int = 10000,
- exchange_xy: bool = True,
-):
- """generate sine position embedding from a position tensor
- Args:
- pos_tensor (torch.Tensor): shape: [..., n].
- num_pos_feats (int): projected shape for each float in the tensor.
- temperature (int): temperature in the sine/cosine function.
- exchange_xy (bool, optional): exchange pos x and pos y. \
- For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True.
- Returns:
- pos_embed (torch.Tensor): shape: [..., n*num_pos_feats].
- """
- scale = 2 * math.pi
- dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
- dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
-
- def sine_func(x: torch.Tensor):
- sin_x = x * scale / dim_t
- sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
- return sin_x
-
- pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
- if exchange_xy:
- pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
- pos_res = torch.cat(pos_res, dim=-1)
- return pos_res
-
-
-def gen_encoder_output_proposals(
- memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None
-):
- """
- Input:
- - memory: bs, \sum{hw}, d_model
- - memory_padding_mask: bs, \sum{hw}
- - spatial_shapes: nlevel, 2
- - learnedwh: 2
- Output:
- - output_memory: bs, \sum{hw}, d_model
- - output_proposals: bs, \sum{hw}, 4
- """
- N_, S_, C_ = memory.shape
- proposals = []
- _cur = 0
- for lvl, (H_, W_) in enumerate(spatial_shapes):
- mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1)
- valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
- valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
-
- # import ipdb; ipdb.set_trace()
-
- grid_y, grid_x = torch.meshgrid(
- torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
- torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device),
- )
- grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
-
- scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
- grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
-
- if learnedwh is not None:
- # import ipdb; ipdb.set_trace()
- wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
- else:
- wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
-
- # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)
- # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
- # wh = torch.ones_like(grid) / scale
- proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
- proposals.append(proposal)
- _cur += H_ * W_
- # import ipdb; ipdb.set_trace()
- output_proposals = torch.cat(proposals, 1)
- output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(
- -1, keepdim=True
- )
- output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
- output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf"))
- output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
-
- output_memory = memory
- output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
- output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
-
- # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
- # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))
-
- return output_memory, output_proposals
-
-
-class RandomBoxPerturber:
- def __init__(
- self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2
- ) -> None:
- self.noise_scale = torch.Tensor(
- [x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale]
- )
-
- def __call__(self, refanchors: Tensor) -> Tensor:
- nq, bs, query_dim = refanchors.shape
- device = refanchors.device
-
- noise_raw = torch.rand_like(refanchors)
- noise_scale = self.noise_scale.to(device)[:query_dim]
-
- new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale)
- return new_refanchors.clamp_(0, 1)
-
-
-def sigmoid_focal_loss(
- inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False
-):
- """
- Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
- Args:
- inputs: A float tensor of arbitrary shape.
- The predictions for each example.
- targets: A float tensor with the same shape as inputs. Stores the binary
- classification label for each element in inputs
- (0 for the negative class and 1 for the positive class).
- alpha: (optional) Weighting factor in range (0,1) to balance
- positive vs negative examples. Default = -1 (no weighting).
- gamma: Exponent of the modulating factor (1 - p_t) to
- balance easy vs hard examples.
- Returns:
- Loss tensor
- """
- prob = inputs.sigmoid()
- ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
- p_t = prob * targets + (1 - prob) * (1 - targets)
- loss = ce_loss * ((1 - p_t) ** gamma)
-
- if alpha >= 0:
- alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
- loss = alpha_t * loss
-
- if no_reduction:
- return loss
-
- return loss.mean(1).sum() / num_boxes
-
-
-class MLP(nn.Module):
- """Very simple multi-layer perceptron (also called FFN)"""
-
- def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
- super().__init__()
- self.num_layers = num_layers
- h = [hidden_dim] * (num_layers - 1)
- self.layers = nn.ModuleList(
- nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
- )
-
- def forward(self, x):
- for i, layer in enumerate(self.layers):
- x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
- return x
-
-
-def _get_activation_fn(activation, d_model=256, batch_dim=0):
- """Return an activation function given a string"""
- if activation == "relu":
- return F.relu
- if activation == "gelu":
- return F.gelu
- if activation == "glu":
- return F.glu
- if activation == "prelu":
- return nn.PReLU()
- if activation == "selu":
- return F.selu
-
- raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
-
-
-def gen_sineembed_for_position(pos_tensor):
- # n_query, bs, _ = pos_tensor.size()
- # sineembed_tensor = torch.zeros(n_query, bs, 256)
- scale = 2 * math.pi
- dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
- dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128)
- x_embed = pos_tensor[:, :, 0] * scale
- y_embed = pos_tensor[:, :, 1] * scale
- pos_x = x_embed[:, :, None] / dim_t
- pos_y = y_embed[:, :, None] / dim_t
- pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
- pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
- if pos_tensor.size(-1) == 2:
- pos = torch.cat((pos_y, pos_x), dim=2)
- elif pos_tensor.size(-1) == 4:
- w_embed = pos_tensor[:, :, 2] * scale
- pos_w = w_embed[:, :, None] / dim_t
- pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
-
- h_embed = pos_tensor[:, :, 3] * scale
- pos_h = h_embed[:, :, None] / dim_t
- pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
-
- pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
- else:
- raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
- return pos
-
-
-class ContrastiveEmbed(nn.Module):
- def __init__(self, max_text_len=256):
- """
- Args:
- max_text_len: max length of text.
- """
- super().__init__()
- self.max_text_len = max_text_len
-
- def forward(self, x, text_dict):
- """_summary_
-
- Args:
- x (_type_): _description_
- text_dict (_type_): _description_
- {
- 'encoded_text': encoded_text, # bs, 195, d_model
- 'text_token_mask': text_token_mask, # bs, 195
- # True for used tokens. False for padding tokens
- }
- Returns:
- _type_: _description_
- """
- assert isinstance(text_dict, dict)
-
- y = text_dict["encoded_text"]
- text_token_mask = text_dict["text_token_mask"]
-
- res = x @ y.transpose(-1, -2)
- res.masked_fill_(~text_token_mask[:, None, :], float("-inf"))
-
- # padding to max_text_len
- new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device)
- new_res[..., : res.shape[-1]] = res
-
- return new_res
diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/lib/replaceNonWhiteWithTransparent.ts b/spaces/jbilcke-hf/ai-comic-factory/src/lib/replaceNonWhiteWithTransparent.ts
deleted file mode 100644
index 6ffe6df050134290d39ee114e427741b26cfb419..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/ai-comic-factory/src/lib/replaceNonWhiteWithTransparent.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-export function replaceNonWhiteWithTransparent(imageBase64: string): Promise {
- return new Promise((resolve, reject) => {
- const img = new Image();
- img.onload = () => {
- const canvas = document.createElement('canvas');
- const ctx = canvas.getContext('2d');
- if (!ctx) {
- reject('Unable to get canvas context');
- return;
- }
-
- const ratio = window.devicePixelRatio || 1;
- canvas.width = img.width * ratio;
- canvas.height = img.height * ratio;
- ctx.scale(ratio, ratio);
-
- ctx.drawImage(img, 0, 0);
-
- const imageData = ctx.getImageData(0, 0, img.width, img.height);
- const data = imageData.data;
- console.log("ok")
-
- for (let i = 0; i < data.length; i += 4) {
- if (data[i] === 255 && data[i + 1] === 255 && data[i + 2] === 255) {
- // Change white (also shades of grays) pixels to black
- data[i] = 0;
- data[i + 1] = 0;
- data[i + 2] = 0;
- } else {
- // Change all other pixels to transparent
- data[i + 3] = 0;
- }
- }
-
- ctx.putImageData(imageData, 0, 0);
-
- resolve(canvas.toDataURL());
- };
-
- img.onerror = (err) => {
- reject(err);
- };
-
- img.src = imageBase64;
- });
-}
\ No newline at end of file
diff --git a/spaces/jennysun/jwsun-multisubject-render-model/gligen/trainer.py b/spaces/jennysun/jwsun-multisubject-render-model/gligen/trainer.py
deleted file mode 100644
index 0815b7b25579001be44674e6fa2afa2a7d9e79b0..0000000000000000000000000000000000000000
--- a/spaces/jennysun/jwsun-multisubject-render-model/gligen/trainer.py
+++ /dev/null
@@ -1,456 +0,0 @@
-import torch
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.models.diffusion.plms import PLMSSampler
-from ldm.util import instantiate_from_config
-import numpy as np
-import random
-import time
-from dataset.concat_dataset import ConCatDataset #, collate_fn
-from torch.utils.data.distributed import DistributedSampler
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import os
-import shutil
-import torchvision
-import math
-from torch.nn.parallel import DistributedDataParallel as DDP
-from tqdm import tqdm
-from distributed import get_rank, synchronize, get_world_size
-from transformers import get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup
-from copy import deepcopy
-try:
- from apex import amp
-except:
- pass
-# = = = = = = = = = = = = = = = = = = useful functions = = = = = = = = = = = = = = = = = #
-
-class ImageCaptionSaver:
- def __init__(self, base_path, nrow=8, normalize=True, scale_each=True, range=(-1,1) ):
- self.base_path = base_path
- self.nrow = nrow
- self.normalize = normalize
- self.scale_each = scale_each
- self.range = range
-
- def __call__(self, images, real, captions, seen):
-
- save_path = os.path.join(self.base_path, str(seen).zfill(8)+'.png')
- torchvision.utils.save_image( images, save_path, nrow=self.nrow, normalize=self.normalize, scale_each=self.scale_each, range=self.range )
-
- save_path = os.path.join(self.base_path, str(seen).zfill(8)+'_real.png')
- torchvision.utils.save_image( real, save_path, nrow=self.nrow)
-
- assert images.shape[0] == len(captions)
-
- save_path = os.path.join(self.base_path, 'captions.txt')
- with open(save_path, "a") as f:
- f.write( str(seen).zfill(8) + ':\n' )
- for cap in captions:
- f.write( cap + '\n' )
- f.write( '\n' )
-
-
-
-def read_official_ckpt(ckpt_path):
- "Read offical pretrained ckpt and convert into my style"
- state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
- out = {}
- out["model"] = {}
- out["text_encoder"] = {}
- out["autoencoder"] = {}
- out["unexpected"] = {}
- out["diffusion"] = {}
-
- for k,v in state_dict.items():
- if k.startswith('model.diffusion_model'):
- out["model"][k.replace("model.diffusion_model.", "")] = v
- elif k.startswith('cond_stage_model'):
- out["text_encoder"][k.replace("cond_stage_model.", "")] = v
- elif k.startswith('first_stage_model'):
- out["autoencoder"][k.replace("first_stage_model.", "")] = v
- elif k in ["model_ema.decay", "model_ema.num_updates"]:
- out["unexpected"][k] = v
- else:
- out["diffusion"][k] = v
- return out
-
-
-def batch_to_device(batch, device):
- for k in batch:
- if isinstance(batch[k], torch.Tensor):
- batch[k] = batch[k].to(device)
- return batch
-
-
-def sub_batch(batch, num=1):
- # choose first num in given batch
- num = num if num > 1 else 1
- for k in batch:
- batch[k] = batch[k][0:num]
- return batch
-
-
-def wrap_loader(loader):
- while True:
- for batch in loader: # TODO: it seems each time you have the same order for all epoch??
- yield batch
-
-
-def disable_grads(model):
- for p in model.parameters():
- p.requires_grad = False
-
-
-def count_params(params):
- total_trainable_params_count = 0
- for p in params:
- total_trainable_params_count += p.numel()
- print("total_trainable_params_count is: ", total_trainable_params_count)
-
-
-def update_ema(target_params, source_params, rate=0.99):
- for targ, src in zip(target_params, source_params):
- targ.detach().mul_(rate).add_(src, alpha=1 - rate)
-
-
-def create_expt_folder_with_auto_resuming(OUTPUT_ROOT, name):
- #curr_folder_name = os.getcwd().split("/")[-1]
- name = os.path.join( OUTPUT_ROOT, name )
- writer = None
- checkpoint = None
-
- if os.path.exists(name):
- all_tags = os.listdir(name)
- all_existing_tags = [ tag for tag in all_tags if tag.startswith('tag') ]
- all_existing_tags.sort()
- all_existing_tags = all_existing_tags[::-1]
- for previous_tag in all_existing_tags:
- potential_ckpt = os.path.join( name, previous_tag, 'checkpoint_latest.pth' )
- if os.path.exists(potential_ckpt):
- checkpoint = potential_ckpt
- if get_rank() == 0:
- print('ckpt found '+ potential_ckpt)
- break
- curr_tag = 'tag'+str(len(all_existing_tags)).zfill(2)
- name = os.path.join( name, curr_tag ) # output/name/tagxx
- else:
- name = os.path.join( name, 'tag00' ) # output/name/tag00
-
- if get_rank() == 0:
- os.makedirs(name)
- os.makedirs( os.path.join(name,'Log') )
- writer = SummaryWriter( os.path.join(name,'Log') )
-
- return name, writer, checkpoint
-
-
-
-# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
-# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
-# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
-
-
-
-
-
-
-class Trainer:
- def __init__(self, config):
-
- self.config = config
- self.device = torch.device("cuda")
-
- self.l_simple_weight = 1
- self.name, self.writer, checkpoint = create_expt_folder_with_auto_resuming(config.OUTPUT_ROOT, config.name)
- if get_rank() == 0:
- shutil.copyfile(config.yaml_file, os.path.join(self.name, "train_config_file.yaml") )
- torch.save( vars(config), os.path.join(self.name, "config_dict.pth") )
-
- # = = = = = = = = = = create model and diffusion = = = = = = = = = = #
- self.model = instantiate_from_config(config.model).to(self.device)
- self.autoencoder = instantiate_from_config(config.autoencoder).to(self.device)
- self.text_encoder = instantiate_from_config(config.text_encoder).to(self.device)
- self.diffusion = instantiate_from_config(config.diffusion).to(self.device)
-
-
- state_dict = read_official_ckpt( os.path.join(config.DATA_ROOT, config.official_ckpt_name) )
- missing_keys, unexpected_keys = self.model.load_state_dict( state_dict["model"], strict=False )
- assert unexpected_keys == []
- original_params_names = list( state_dict["model"].keys() )
- self.autoencoder.load_state_dict( state_dict["autoencoder"] )
- self.text_encoder.load_state_dict( state_dict["text_encoder"] )
- self.diffusion.load_state_dict( state_dict["diffusion"] )
-
- self.autoencoder.eval()
- self.text_encoder.eval()
- disable_grads(self.autoencoder)
- disable_grads(self.text_encoder)
-
-
-
- # = = load from ckpt: (usually second stage whole model finetune) = = #
- if self.config.ckpt is not None:
- first_stage_ckpt = torch.load(self.config.ckpt, map_location="cpu")
- self.model.load_state_dict(first_stage_ckpt["model"])
-
-
-
-
- # = = = = = = = = = = create opt = = = = = = = = = = #
- print(" ")
- print("IMPORTANT: following code decides which params trainable!")
- print(" ")
-
- if self.config.whole:
- print("Entire model is trainable")
- params = list(self.model.parameters())
- else:
- print("Only new added components will be updated")
- params = []
- trainable_names = []
- for name, p in self.model.named_parameters():
- if ("transformer_blocks" in name) and ("fuser" in name):
- params.append(p)
- trainable_names.append(name)
- elif "position_net" in name:
- params.append(p)
- trainable_names.append(name)
- else:
- # all new added trainable params have to be haddled above
- # otherwise it will trigger the following error
- assert name in original_params_names, name
-
- all_params_name = list( self.model.state_dict().keys() )
- assert set(all_params_name) == set(trainable_names + original_params_names)
-
- self.opt = torch.optim.AdamW(params, lr=config.base_learning_rate, weight_decay=config.weight_decay)
- count_params(params)
-
- self.master_params = list(self.model.parameters()) # note: you cannot assign above params as master_params since that is only trainable one
-
- if config.enable_ema:
- self.ema = deepcopy(self.model)
- self.ema_params = list(self.ema.parameters())
- self.ema.eval()
-
- # = = = = = = = = = = create scheduler = = = = = = = = = = #
- if config.scheduler_type == "cosine":
- self.scheduler = get_cosine_schedule_with_warmup(self.opt, num_warmup_steps=config.warmup_steps, num_training_steps=config.total_iters)
- elif config.scheduler_type == "constant":
- self.scheduler = get_constant_schedule_with_warmup(self.opt, num_warmup_steps=config.warmup_steps)
- else:
- assert False
-
-
-
- # = = = = = = = = = = create data = = = = = = = = = = #
- train_dataset_repeats = config.train_dataset_repeats if 'train_dataset_repeats' in config else None
- dataset_train = ConCatDataset(config.train_dataset_names, config.DATA_ROOT, config.which_embedder, train=True, repeats=train_dataset_repeats)
- sampler = DistributedSampler(dataset_train) if config.distributed else None
- loader_train = DataLoader( dataset_train, batch_size=config.batch_size,
- shuffle=(sampler is None),
- num_workers=config.workers,
- pin_memory=True,
- sampler=sampler)
- self.dataset_train = dataset_train
- self.loader_train = wrap_loader(loader_train)
-
- if get_rank() == 0:
- total_image = dataset_train.total_images()
- print("Total training images: ", total_image)
-
-
- # = = = = = = = = = = load from autoresuming ckpt = = = = = = = = = = #
- self.starting_iter = 0
- if checkpoint is not None:
- checkpoint = torch.load(checkpoint, map_location="cpu")
- self.model.load_state_dict(checkpoint["model"])
- if config.enable_ema:
- self.ema.load_state_dict(checkpoint["ema"])
- self.opt.load_state_dict(checkpoint["opt"])
- self.scheduler.load_state_dict(checkpoint["scheduler"])
- self.starting_iter = checkpoint["iters"]
- if self.starting_iter >= config.total_iters:
- synchronize()
- print("Training finished. Start exiting")
- exit()
-
-
- # = = = = = misc = = = = = #
- if get_rank() == 0:
- print("Actual total need see images is: ", config.total_iters*config.total_batch_size)
- print("Equivalent training epoch is: ", (config.total_iters*config.total_batch_size) / len(dataset_train) )
- self.image_caption_saver = ImageCaptionSaver(self.name)
- # self.counter = Counter(config.total_batch_size, config.save_every_images)
-
- if config.use_o2:
- self.model, self.opt = amp.initialize(self.model, self.opt, opt_level="O2")
- self.model.use_o2 = True
-
-
- # = = = = = wrap into ddp = = = = = #
- if config.distributed:
- self.model = DDP( self.model, device_ids=[config.local_rank], output_device=config.local_rank, broadcast_buffers=False )
-
-
-
-
-
- @torch.no_grad()
- def get_input(self, batch):
-
- z = self.autoencoder.encode( batch["image"] )
-
- context = self.text_encoder.encode( batch["caption"] )
-
- _t = torch.rand(z.shape[0]).to(z.device)
- t = (torch.pow(_t, self.config.resample_step_gamma) * 1000).long()
- t = torch.where(t!=1000, t, 999) # if 1000, then replace it with 999
-
- return z, t, context
-
-
- def run_one_step(self, batch):
- x_start, t, context = self.get_input(batch)
- noise = torch.randn_like(x_start)
- x_noisy = self.diffusion.q_sample(x_start=x_start, t=t, noise=noise)
-
- input = dict(x = x_noisy,
- timesteps = t,
- context = context,
- boxes = batch['boxes'],
- masks = batch['masks'],
- text_masks = batch['text_masks'],
- image_masks = batch['image_masks'],
- text_embeddings = batch["text_embeddings"],
- image_embeddings = batch["image_embeddings"] )
- model_output = self.model(input)
-
- loss = torch.nn.functional.mse_loss(model_output, noise) * self.l_simple_weight
-
- self.loss_dict = {"loss": loss.item()}
-
- return loss
-
-
-
- def start_training(self):
-
- if not self.config.use_o2:
- # use pytorch mixed training which is similar to o1 but faster
- scaler = torch.cuda.amp.GradScaler()
-
-
- iterator = tqdm(range(self.starting_iter, self.config.total_iters), desc='Training progress', disable=get_rank() != 0 )
- self.model.train()
- for iter_idx in iterator: # note: iter_idx is not from 0 if resume training
- self.iter_idx = iter_idx
-
- self.opt.zero_grad()
- batch = next(self.loader_train)
- batch_to_device(batch, self.device)
-
- if self.config.use_o2:
- loss = self.run_one_step(batch)
- with amp.scale_loss(loss, self.opt) as scaled_loss:
- scaled_loss.backward()
- self.opt.step()
- else:
- enabled = True if self.config.use_mixed else False
- with torch.cuda.amp.autocast(enabled=enabled): # with torch.autocast(enabled=True):
- loss = self.run_one_step(batch)
- scaler.scale(loss).backward()
- scaler.step(self.opt)
- scaler.update()
-
-
- self.scheduler.step()
-
- if self.config.enable_ema:
- update_ema(self.ema_params, self.master_params, self.config.ema_rate)
-
-
- if (get_rank() == 0):
- if (iter_idx % 10 == 0):
- self.log_loss()
- if (iter_idx == 0) or ( iter_idx % self.config.save_every_iters == 0 ) or (iter_idx == self.config.total_iters-1):
- self.save_ckpt_and_result()
- synchronize()
-
-
- synchronize()
- print("Training finished. Start exiting")
- exit()
-
-
- def log_loss(self):
- for k, v in self.loss_dict.items():
- self.writer.add_scalar( k, v, self.iter_idx+1 ) # we add 1 as the actual name
-
-
- @torch.no_grad()
- def save_ckpt_and_result(self):
-
- model_wo_wrapper = self.model.module if self.config.distributed else self.model
-
- iter_name = self.iter_idx + 1 # we add 1 as the actual name
-
- if not self.config.disable_inference_in_training:
- # Do a quick inference on one training batch
- batch_here = self.config.batch_size
- batch = sub_batch( next(self.loader_train), batch_here)
- batch_to_device(batch, self.device)
-
-
- real_images_with_box_drawing = [] # we save this durining trianing for better visualization
- for i in range(batch_here):
- temp_data = {"image": batch["image"][i], "boxes":batch["boxes"][i]}
- im = self.dataset_train.datasets[0].vis_getitem_data(out=temp_data, return_tensor=True, print_caption=False)
- real_images_with_box_drawing.append(im)
- real_images_with_box_drawing = torch.stack(real_images_with_box_drawing)
-
-
- uc = self.text_encoder.encode( batch_here*[""] )
- context = self.text_encoder.encode( batch["caption"] )
-
- ddim_sampler = PLMSSampler(self.diffusion, model_wo_wrapper)
- shape = (batch_here, model_wo_wrapper.in_channels, model_wo_wrapper.image_size, model_wo_wrapper.image_size)
- input = dict( x = None,
- timesteps = None,
- context = context,
- boxes = batch['boxes'],
- masks = batch['masks'],
- text_masks = batch['text_masks'],
- image_masks = batch['image_masks'],
- text_embeddings = batch["text_embeddings"],
- image_embeddings = batch["image_embeddings"] )
- samples = ddim_sampler.sample(S=50, shape=shape, input=input, uc=uc, guidance_scale=5)
-
- # old
- # autoencoder_wo_wrapper = self.autoencoder # Note itself is without wrapper since we do not train that.
- # autoencoder_wo_wrapper = autoencoder_wo_wrapper.cpu() # To save GPU
- # samples = autoencoder_wo_wrapper.decode(samples.cpu())
- # autoencoder_wo_wrapper = autoencoder_wo_wrapper.to(self.device)
-
- # new
- autoencoder_wo_wrapper = self.autoencoder # Note itself is without wrapper since we do not train that.
- samples = autoencoder_wo_wrapper.decode(samples).cpu()
-
- self.image_caption_saver(samples, real_images_with_box_drawing, batch["caption"], iter_name)
-
- ckpt = dict(model = model_wo_wrapper.state_dict(),
- opt = self.opt.state_dict(),
- scheduler= self.scheduler.state_dict(),
- iters = self.iter_idx+1 )
- if self.config.enable_ema:
- ckpt["ema"] = self.ema.state_dict()
- torch.save( ckpt, os.path.join(self.name, "checkpoint_"+str(iter_name).zfill(8)+".pth") )
- torch.save( ckpt, os.path.join(self.name, "checkpoint_latest.pth") )
-
-
-
-
-
-
-
-
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/generic/_data_structures.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/generic/_data_structures.py
deleted file mode 100644
index 19f5be9fbe3a237eb648bc1141f1c9cf6a763dc3..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/generic/_data_structures.py
+++ /dev/null
@@ -1,1382 +0,0 @@
-# Copyright (c) 2006, Mathieu Fenniak
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * The name of the author may not be used to endorse or promote products
-# derived from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-
-__author__ = "Mathieu Fenniak"
-__author_email__ = "biziqe@mathieu.fenniak.net"
-
-import logging
-import re
-from io import BytesIO
-from typing import Any, Dict, Iterable, List, Optional, Tuple, Union, cast
-
-from .._protocols import PdfWriterProtocol
-from .._utils import (
- WHITESPACES,
- StreamType,
- b_,
- deprecate_with_replacement,
- deprecation_with_replacement,
- hex_str,
- logger_warning,
- read_non_whitespace,
- read_until_regex,
- skip_over_comment,
-)
-from ..constants import (
- CheckboxRadioButtonAttributes,
- FieldDictionaryAttributes,
-)
-from ..constants import FilterTypes as FT
-from ..constants import OutlineFontFlag
-from ..constants import StreamAttributes as SA
-from ..constants import TypArguments as TA
-from ..constants import TypFitArguments as TF
-from ..errors import STREAM_TRUNCATED_PREMATURELY, PdfReadError, PdfStreamError
-from ._base import (
- BooleanObject,
- FloatObject,
- IndirectObject,
- NameObject,
- NullObject,
- NumberObject,
- PdfObject,
- TextStringObject,
-)
-from ._fit import Fit
-from ._utils import read_hex_string_from_stream, read_string_from_stream
-
-logger = logging.getLogger(__name__)
-NumberSigns = b"+-"
-IndirectPattern = re.compile(rb"[+-]?(\d+)\s+(\d+)\s+R[^a-zA-Z]")
-
-
-class ArrayObject(list, PdfObject):
- def clone(
- self,
- pdf_dest: PdfWriterProtocol,
- force_duplicate: bool = False,
- ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
- ) -> "ArrayObject":
- """clone object into pdf_dest"""
- try:
- if self.indirect_reference.pdf == pdf_dest and not force_duplicate: # type: ignore
- return self
- except Exception:
- pass
- arr = cast("ArrayObject", self._reference_clone(ArrayObject(), pdf_dest))
- for data in self:
- if isinstance(data, StreamObject):
- # if not hasattr(data, "indirect_reference"):
- # data.indirect_reference = None
- dup = data._reference_clone(
- data.clone(pdf_dest, force_duplicate, ignore_fields), pdf_dest
- )
- arr.append(dup.indirect_reference)
- elif hasattr(data, "clone"):
- arr.append(data.clone(pdf_dest, force_duplicate, ignore_fields))
- else:
- arr.append(data)
- return cast("ArrayObject", arr)
-
- def items(self) -> Iterable[Any]:
- """
- Emulate DictionaryObject.items for a list
- (index, object)
- """
- return enumerate(self)
-
- def write_to_stream(
- self, stream: StreamType, encryption_key: Union[None, str, bytes]
- ) -> None:
- stream.write(b"[")
- for data in self:
- stream.write(b" ")
- data.write_to_stream(stream, encryption_key)
- stream.write(b" ]")
-
- def writeToStream(
- self, stream: StreamType, encryption_key: Union[None, str, bytes]
- ) -> None: # pragma: no cover
- deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
- self.write_to_stream(stream, encryption_key)
-
- @staticmethod
- def read_from_stream(
- stream: StreamType,
- pdf: Any,
- forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
- ) -> "ArrayObject": # PdfReader
- arr = ArrayObject()
- tmp = stream.read(1)
- if tmp != b"[":
- raise PdfReadError("Could not read array")
- while True:
- # skip leading whitespace
- tok = stream.read(1)
- while tok.isspace():
- tok = stream.read(1)
- stream.seek(-1, 1)
- # check for array ending
- peekahead = stream.read(1)
- if peekahead == b"]":
- break
- stream.seek(-1, 1)
- # read and append obj
- arr.append(read_object(stream, pdf, forced_encoding))
- return arr
-
- @staticmethod
- def readFromStream(
- stream: StreamType, pdf: Any # PdfReader
- ) -> "ArrayObject": # pragma: no cover
- deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
- return ArrayObject.read_from_stream(stream, pdf)
-
-
-class DictionaryObject(dict, PdfObject):
- def clone(
- self,
- pdf_dest: PdfWriterProtocol,
- force_duplicate: bool = False,
- ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
- ) -> "DictionaryObject":
- """clone object into pdf_dest"""
- try:
- if self.indirect_reference.pdf == pdf_dest and not force_duplicate: # type: ignore
- return self
- except Exception:
- pass
-
- d__ = cast(
- "DictionaryObject", self._reference_clone(self.__class__(), pdf_dest)
- )
- if ignore_fields is None:
- ignore_fields = []
- if len(d__.keys()) == 0:
- d__._clone(self, pdf_dest, force_duplicate, ignore_fields)
- return d__
-
- def _clone(
- self,
- src: "DictionaryObject",
- pdf_dest: PdfWriterProtocol,
- force_duplicate: bool,
- ignore_fields: Union[Tuple[str, ...], List[str]],
- ) -> None:
- """update the object from src"""
- # First check if this is a chain list, we need to loop to prevent recur
- if (
- ("/Next" not in ignore_fields and "/Next" in src)
- or ("/Prev" not in ignore_fields and "/Prev" in src)
- ) or (
- ("/N" not in ignore_fields and "/N" in src)
- or ("/V" not in ignore_fields and "/V" in src)
- ):
- ignore_fields = list(ignore_fields)
- for lst in (("/Next", "/Prev"), ("/N", "/V")):
- for k in lst:
- objs = []
- if (
- k in src
- and k not in self
- and isinstance(src.raw_get(k), IndirectObject)
- ):
- cur_obj: Optional["DictionaryObject"] = cast(
- "DictionaryObject", src[k]
- )
- prev_obj: Optional["DictionaryObject"] = self
- while cur_obj is not None:
- clon = cast(
- "DictionaryObject",
- cur_obj._reference_clone(cur_obj.__class__(), pdf_dest),
- )
- objs.append((cur_obj, clon))
- assert prev_obj is not None
- prev_obj[NameObject(k)] = clon.indirect_reference
- prev_obj = clon
- try:
- if cur_obj == src:
- cur_obj = None
- else:
- cur_obj = cast("DictionaryObject", cur_obj[k])
- except Exception:
- cur_obj = None
- for (s, c) in objs:
- c._clone(s, pdf_dest, force_duplicate, ignore_fields + [k])
-
- for k, v in src.items():
- if k not in ignore_fields:
- if isinstance(v, StreamObject):
- if not hasattr(v, "indirect_reference"):
- v.indirect_reference = None
- vv = v.clone(pdf_dest, force_duplicate, ignore_fields)
- assert vv.indirect_reference is not None
- self[k.clone(pdf_dest)] = vv.indirect_reference # type: ignore[attr-defined]
- else:
- if k not in self:
- self[NameObject(k)] = (
- v.clone(pdf_dest, force_duplicate, ignore_fields)
- if hasattr(v, "clone")
- else v
- )
-
- def raw_get(self, key: Any) -> Any:
- return dict.__getitem__(self, key)
-
- def __setitem__(self, key: Any, value: Any) -> Any:
- if not isinstance(key, PdfObject):
- raise ValueError("key must be PdfObject")
- if not isinstance(value, PdfObject):
- raise ValueError("value must be PdfObject")
- return dict.__setitem__(self, key, value)
-
- def setdefault(self, key: Any, value: Optional[Any] = None) -> Any:
- if not isinstance(key, PdfObject):
- raise ValueError("key must be PdfObject")
- if not isinstance(value, PdfObject):
- raise ValueError("value must be PdfObject")
- return dict.setdefault(self, key, value) # type: ignore
-
- def __getitem__(self, key: Any) -> PdfObject:
- return dict.__getitem__(self, key).get_object()
-
- @property
- def xmp_metadata(self) -> Optional[PdfObject]:
- """
- Retrieve XMP (Extensible Metadata Platform) data relevant to the
- this object, if available.
-
- Stability: Added in v1.12, will exist for all future v1.x releases.
- @return Returns a {@link #xmp.XmpInformation XmlInformation} instance
- that can be used to access XMP metadata from the document. Can also
- return None if no metadata was found on the document root.
- """
- from ..xmp import XmpInformation
-
- metadata = self.get("/Metadata", None)
- if metadata is None:
- return None
- metadata = metadata.get_object()
-
- if not isinstance(metadata, XmpInformation):
- metadata = XmpInformation(metadata)
- self[NameObject("/Metadata")] = metadata
- return metadata
-
- def getXmpMetadata(
- self,
- ) -> Optional[PdfObject]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :meth:`xmp_metadata` instead.
- """
- deprecation_with_replacement("getXmpMetadata", "xmp_metadata", "3.0.0")
- return self.xmp_metadata
-
- @property
- def xmpMetadata(self) -> Optional[PdfObject]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :meth:`xmp_metadata` instead.
- """
- deprecation_with_replacement("xmpMetadata", "xmp_metadata", "3.0.0")
- return self.xmp_metadata
-
- def write_to_stream(
- self, stream: StreamType, encryption_key: Union[None, str, bytes]
- ) -> None:
- stream.write(b"<<\n")
- for key, value in list(self.items()):
- key.write_to_stream(stream, encryption_key)
- stream.write(b" ")
- value.write_to_stream(stream, encryption_key)
- stream.write(b"\n")
- stream.write(b">>")
-
- def writeToStream(
- self, stream: StreamType, encryption_key: Union[None, str, bytes]
- ) -> None: # pragma: no cover
- deprecation_with_replacement("writeToStream", "write_to_stream", "3.0.0")
- self.write_to_stream(stream, encryption_key)
-
- @staticmethod
- def read_from_stream(
- stream: StreamType,
- pdf: Any, # PdfReader
- forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
- ) -> "DictionaryObject":
- def get_next_obj_pos(
- p: int, p1: int, rem_gens: List[int], pdf: Any
- ) -> int: # PdfReader
- l = pdf.xref[rem_gens[0]]
- for o in l:
- if p1 > l[o] and p < l[o]:
- p1 = l[o]
- if len(rem_gens) == 1:
- return p1
- else:
- return get_next_obj_pos(p, p1, rem_gens[1:], pdf)
-
- def read_unsized_from_steam(stream: StreamType, pdf: Any) -> bytes: # PdfReader
- # we are just pointing at beginning of the stream
- eon = get_next_obj_pos(stream.tell(), 2**32, list(pdf.xref), pdf) - 1
- curr = stream.tell()
- rw = stream.read(eon - stream.tell())
- p = rw.find(b"endstream")
- if p < 0:
- raise PdfReadError(
- f"Unable to find 'endstream' marker for obj starting at {curr}."
- )
- stream.seek(curr + p + 9)
- return rw[: p - 1]
-
- tmp = stream.read(2)
- if tmp != b"<<":
- raise PdfReadError(
- f"Dictionary read error at byte {hex_str(stream.tell())}: "
- "stream must begin with '<<'"
- )
- data: Dict[Any, Any] = {}
- while True:
- tok = read_non_whitespace(stream)
- if tok == b"\x00":
- continue
- elif tok == b"%":
- stream.seek(-1, 1)
- skip_over_comment(stream)
- continue
- if not tok:
- raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
-
- if tok == b">":
- stream.read(1)
- break
- stream.seek(-1, 1)
- try:
- key = read_object(stream, pdf)
- tok = read_non_whitespace(stream)
- stream.seek(-1, 1)
- value = read_object(stream, pdf, forced_encoding)
- except Exception as exc:
- if pdf is not None and pdf.strict:
- raise PdfReadError(exc.__repr__())
- logger_warning(exc.__repr__(), __name__)
- retval = DictionaryObject()
- retval.update(data)
- return retval # return partial data
-
- if not data.get(key):
- data[key] = value
- else:
- # multiple definitions of key not permitted
- msg = (
- f"Multiple definitions in dictionary at byte "
- f"{hex_str(stream.tell())} for key {key}"
- )
- if pdf is not None and pdf.strict:
- raise PdfReadError(msg)
- logger_warning(msg, __name__)
-
- pos = stream.tell()
- s = read_non_whitespace(stream)
- if s == b"s" and stream.read(5) == b"tream":
- eol = stream.read(1)
- # odd PDF file output has spaces after 'stream' keyword but before EOL.
- # patch provided by Danial Sandler
- while eol == b" ":
- eol = stream.read(1)
- if eol not in (b"\n", b"\r"):
- raise PdfStreamError("Stream data must be followed by a newline")
- if eol == b"\r":
- # read \n after
- if stream.read(1) != b"\n":
- stream.seek(-1, 1)
- # this is a stream object, not a dictionary
- if SA.LENGTH not in data:
- raise PdfStreamError("Stream length not defined")
- length = data[SA.LENGTH]
- if isinstance(length, IndirectObject):
- t = stream.tell()
- length = pdf.get_object(length)
- stream.seek(t, 0)
- pstart = stream.tell()
- data["__streamdata__"] = stream.read(length)
- e = read_non_whitespace(stream)
- ndstream = stream.read(8)
- if (e + ndstream) != b"endstream":
- # (sigh) - the odd PDF file has a length that is too long, so
- # we need to read backwards to find the "endstream" ending.
- # ReportLab (unknown version) generates files with this bug,
- # and Python users into PDF files tend to be our audience.
- # we need to do this to correct the streamdata and chop off
- # an extra character.
- pos = stream.tell()
- stream.seek(-10, 1)
- end = stream.read(9)
- if end == b"endstream":
- # we found it by looking back one character further.
- data["__streamdata__"] = data["__streamdata__"][:-1]
- elif not pdf.strict:
- stream.seek(pstart, 0)
- data["__streamdata__"] = read_unsized_from_steam(stream, pdf)
- pos = stream.tell()
- else:
- stream.seek(pos, 0)
- raise PdfReadError(
- "Unable to find 'endstream' marker after stream at byte "
- f"{hex_str(stream.tell())} (nd='{ndstream!r}', end='{end!r}')."
- )
- else:
- stream.seek(pos, 0)
- if "__streamdata__" in data:
- return StreamObject.initialize_from_dictionary(data)
- else:
- retval = DictionaryObject()
- retval.update(data)
- return retval
-
- @staticmethod
- def readFromStream(
- stream: StreamType, pdf: Any # PdfReader
- ) -> "DictionaryObject": # pragma: no cover
- deprecation_with_replacement("readFromStream", "read_from_stream", "3.0.0")
- return DictionaryObject.read_from_stream(stream, pdf)
-
-
-class TreeObject(DictionaryObject):
- def __init__(self) -> None:
- DictionaryObject.__init__(self)
-
- def hasChildren(self) -> bool: # pragma: no cover
- deprecate_with_replacement("hasChildren", "has_children", "4.0.0")
- return self.has_children()
-
- def has_children(self) -> bool:
- return "/First" in self
-
- def __iter__(self) -> Any:
- return self.children()
-
- def children(self) -> Iterable[Any]:
- if not self.has_children():
- return
-
- child_ref = self[NameObject("/First")]
- child = child_ref.get_object()
- while True:
- yield child
- if child == self[NameObject("/Last")]:
- return
- child_ref = child.get(NameObject("/Next")) # type: ignore
- if child_ref is None:
- return
- child = child_ref.get_object()
-
- def addChild(self, child: Any, pdf: Any) -> None: # pragma: no cover
- deprecation_with_replacement("addChild", "add_child", "3.0.0")
- self.add_child(child, pdf)
-
- def add_child(self, child: Any, pdf: PdfWriterProtocol) -> None:
- self.insert_child(child, None, pdf)
-
- def insert_child(self, child: Any, before: Any, pdf: PdfWriterProtocol) -> None:
- def inc_parent_counter(
- parent: Union[None, IndirectObject, TreeObject], n: int
- ) -> None:
- if parent is None:
- return
- parent = cast("TreeObject", parent.get_object())
- if "/Count" in parent:
- parent[NameObject("/Count")] = NumberObject(
- cast(int, parent[NameObject("/Count")]) + n
- )
- inc_parent_counter(parent.get("/Parent", None), n)
-
- child_obj = child.get_object()
- child = child.indirect_reference # get_reference(child_obj)
- # assert isinstance(child, IndirectObject)
-
- prev: Optional[DictionaryObject]
- if "/First" not in self: # no child yet
- self[NameObject("/First")] = child
- self[NameObject("/Count")] = NumberObject(0)
- self[NameObject("/Last")] = child
- child_obj[NameObject("/Parent")] = self.indirect_reference
- inc_parent_counter(self, child_obj.get("/Count", 1))
- if "/Next" in child_obj:
- del child_obj["/Next"]
- if "/Prev" in child_obj:
- del child_obj["/Prev"]
- return
- else:
- prev = cast("DictionaryObject", self["/Last"])
-
- while prev.indirect_reference != before:
- if "/Next" in prev:
- prev = cast("TreeObject", prev["/Next"])
- else: # append at the end
- prev[NameObject("/Next")] = cast("TreeObject", child)
- child_obj[NameObject("/Prev")] = prev.indirect_reference
- child_obj[NameObject("/Parent")] = self.indirect_reference
- if "/Next" in child_obj:
- del child_obj["/Next"]
- self[NameObject("/Last")] = child
- inc_parent_counter(self, child_obj.get("/Count", 1))
- return
- try: # insert as first or in the middle
- assert isinstance(prev["/Prev"], DictionaryObject)
- prev["/Prev"][NameObject("/Next")] = child
- child_obj[NameObject("/Prev")] = prev["/Prev"]
- except Exception: # it means we are inserting in first position
- del child_obj["/Next"]
- child_obj[NameObject("/Next")] = prev
- prev[NameObject("/Prev")] = child
- child_obj[NameObject("/Parent")] = self.indirect_reference
- inc_parent_counter(self, child_obj.get("/Count", 1))
-
- def removeChild(self, child: Any) -> None: # pragma: no cover
- deprecation_with_replacement("removeChild", "remove_child", "3.0.0")
- self.remove_child(child)
-
- def _remove_node_from_tree(
- self, prev: Any, prev_ref: Any, cur: Any, last: Any
- ) -> None:
- """Adjust the pointers of the linked list and tree node count."""
- next_ref = cur.get(NameObject("/Next"), None)
- if prev is None:
- if next_ref:
- # Removing first tree node
- next_obj = next_ref.get_object()
- del next_obj[NameObject("/Prev")]
- self[NameObject("/First")] = next_ref
- self[NameObject("/Count")] = NumberObject(
- self[NameObject("/Count")] - 1 # type: ignore
- )
-
- else:
- # Removing only tree node
- assert self[NameObject("/Count")] == 1
- del self[NameObject("/Count")]
- del self[NameObject("/First")]
- if NameObject("/Last") in self:
- del self[NameObject("/Last")]
- else:
- if next_ref:
- # Removing middle tree node
- next_obj = next_ref.get_object()
- next_obj[NameObject("/Prev")] = prev_ref
- prev[NameObject("/Next")] = next_ref
- else:
- # Removing last tree node
- assert cur == last
- del prev[NameObject("/Next")]
- self[NameObject("/Last")] = prev_ref
- self[NameObject("/Count")] = NumberObject(self[NameObject("/Count")] - 1) # type: ignore
-
- def remove_child(self, child: Any) -> None:
- child_obj = child.get_object()
- child = child_obj.indirect_reference
-
- if NameObject("/Parent") not in child_obj:
- raise ValueError("Removed child does not appear to be a tree item")
- elif child_obj[NameObject("/Parent")] != self:
- raise ValueError("Removed child is not a member of this tree")
-
- found = False
- prev_ref = None
- prev = None
- cur_ref: Optional[Any] = self[NameObject("/First")]
- cur: Optional[Dict[str, Any]] = cur_ref.get_object() # type: ignore
- last_ref = self[NameObject("/Last")]
- last = last_ref.get_object()
- while cur is not None:
- if cur == child_obj:
- self._remove_node_from_tree(prev, prev_ref, cur, last)
- found = True
- break
-
- # Go to the next node
- prev_ref = cur_ref
- prev = cur
- if NameObject("/Next") in cur:
- cur_ref = cur[NameObject("/Next")]
- cur = cur_ref.get_object()
- else:
- cur_ref = None
- cur = None
-
- if not found:
- raise ValueError("Removal couldn't find item in tree")
-
- _reset_node_tree_relationship(child_obj)
-
- def remove_from_tree(self) -> None:
- """
- remove the object from the tree it is in
- """
- if NameObject("/Parent") not in self:
- raise ValueError("Removed child does not appear to be a tree item")
- else:
- cast("TreeObject", self["/Parent"]).remove_child(self)
-
- def emptyTree(self) -> None: # pragma: no cover
- deprecate_with_replacement("emptyTree", "empty_tree", "4.0.0")
- self.empty_tree()
-
- def empty_tree(self) -> None:
- for child in self:
- child_obj = child.get_object()
- _reset_node_tree_relationship(child_obj)
-
- if NameObject("/Count") in self:
- del self[NameObject("/Count")]
- if NameObject("/First") in self:
- del self[NameObject("/First")]
- if NameObject("/Last") in self:
- del self[NameObject("/Last")]
-
-
-def _reset_node_tree_relationship(child_obj: Any) -> None:
- """
- Call this after a node has been removed from a tree.
-
- This resets the nodes attributes in respect to that tree.
- """
- del child_obj[NameObject("/Parent")]
- if NameObject("/Next") in child_obj:
- del child_obj[NameObject("/Next")]
- if NameObject("/Prev") in child_obj:
- del child_obj[NameObject("/Prev")]
-
-
-class StreamObject(DictionaryObject):
- def __init__(self) -> None:
- self.__data: Optional[str] = None
- self.decoded_self: Optional["DecodedStreamObject"] = None
-
- def _clone(
- self,
- src: DictionaryObject,
- pdf_dest: PdfWriterProtocol,
- force_duplicate: bool,
- ignore_fields: Union[Tuple[str, ...], List[str]],
- ) -> None:
- """update the object from src"""
- self._data = cast("StreamObject", src)._data
- try:
- decoded_self = cast("StreamObject", src).decoded_self
- if decoded_self is None:
- self.decoded_self = None
- else:
- self.decoded_self = decoded_self.clone(pdf_dest, True, ignore_fields) # type: ignore[assignment]
- except Exception:
- pass
- super()._clone(src, pdf_dest, force_duplicate, ignore_fields)
- return
-
- def hash_value_data(self) -> bytes:
- data = super().hash_value_data()
- data += b_(self._data)
- return data
-
- @property
- def decodedSelf(self) -> Optional["DecodedStreamObject"]: # pragma: no cover
- deprecation_with_replacement("decodedSelf", "decoded_self", "3.0.0")
- return self.decoded_self
-
- @decodedSelf.setter
- def decodedSelf(self, value: "DecodedStreamObject") -> None: # pragma: no cover
- deprecation_with_replacement("decodedSelf", "decoded_self", "3.0.0")
- self.decoded_self = value
-
- @property
- def _data(self) -> Any:
- return self.__data
-
- @_data.setter
- def _data(self, value: Any) -> None:
- self.__data = value
-
- def write_to_stream(
- self, stream: StreamType, encryption_key: Union[None, str, bytes]
- ) -> None:
- self[NameObject(SA.LENGTH)] = NumberObject(len(self._data))
- DictionaryObject.write_to_stream(self, stream, encryption_key)
- del self[SA.LENGTH]
- stream.write(b"\nstream\n")
- data = self._data
- if encryption_key:
- from .._security import RC4_encrypt
-
- data = RC4_encrypt(encryption_key, data)
- stream.write(data)
- stream.write(b"\nendstream")
-
- @staticmethod
- def initializeFromDictionary(
- data: Dict[str, Any]
- ) -> Union["EncodedStreamObject", "DecodedStreamObject"]: # pragma: no cover
- return StreamObject.initialize_from_dictionary(data)
-
- @staticmethod
- def initialize_from_dictionary(
- data: Dict[str, Any]
- ) -> Union["EncodedStreamObject", "DecodedStreamObject"]:
- retval: Union["EncodedStreamObject", "DecodedStreamObject"]
- if SA.FILTER in data:
- retval = EncodedStreamObject()
- else:
- retval = DecodedStreamObject()
- retval._data = data["__streamdata__"]
- del data["__streamdata__"]
- del data[SA.LENGTH]
- retval.update(data)
- return retval
-
- def flateEncode(self) -> "EncodedStreamObject": # pragma: no cover
- deprecation_with_replacement("flateEncode", "flate_encode", "3.0.0")
- return self.flate_encode()
-
- def flate_encode(self) -> "EncodedStreamObject":
- from ..filters import FlateDecode
-
- if SA.FILTER in self:
- f = self[SA.FILTER]
- if isinstance(f, ArrayObject):
- f.insert(0, NameObject(FT.FLATE_DECODE))
- else:
- newf = ArrayObject()
- newf.append(NameObject("/FlateDecode"))
- newf.append(f)
- f = newf
- else:
- f = NameObject("/FlateDecode")
- retval = EncodedStreamObject()
- retval[NameObject(SA.FILTER)] = f
- retval._data = FlateDecode.encode(self._data)
- return retval
-
-
-class DecodedStreamObject(StreamObject):
- def get_data(self) -> Any:
- return self._data
-
- def set_data(self, data: Any) -> Any:
- self._data = data
-
- def getData(self) -> Any: # pragma: no cover
- deprecation_with_replacement("getData", "get_data", "3.0.0")
- return self._data
-
- def setData(self, data: Any) -> None: # pragma: no cover
- deprecation_with_replacement("setData", "set_data", "3.0.0")
- self.set_data(data)
-
-
-class EncodedStreamObject(StreamObject):
- def __init__(self) -> None:
- self.decoded_self: Optional["DecodedStreamObject"] = None
-
- @property
- def decodedSelf(self) -> Optional["DecodedStreamObject"]: # pragma: no cover
- deprecation_with_replacement("decodedSelf", "decoded_self", "3.0.0")
- return self.decoded_self
-
- @decodedSelf.setter
- def decodedSelf(self, value: DecodedStreamObject) -> None: # pragma: no cover
- deprecation_with_replacement("decodedSelf", "decoded_self", "3.0.0")
- self.decoded_self = value
-
- def get_data(self) -> Union[None, str, bytes]:
- from ..filters import decode_stream_data
-
- if self.decoded_self is not None:
- # cached version of decoded object
- return self.decoded_self.get_data()
- else:
- # create decoded object
- decoded = DecodedStreamObject()
-
- decoded._data = decode_stream_data(self)
- for key, value in list(self.items()):
- if key not in (SA.LENGTH, SA.FILTER, SA.DECODE_PARMS):
- decoded[key] = value
- self.decoded_self = decoded
- return decoded._data
-
- def getData(self) -> Union[None, str, bytes]: # pragma: no cover
- deprecation_with_replacement("getData", "get_data", "3.0.0")
- return self.get_data()
-
- def set_data(self, data: Any) -> None: # pragma: no cover
- raise PdfReadError("Creating EncodedStreamObject is not currently supported")
-
- def setData(self, data: Any) -> None: # pragma: no cover
- deprecation_with_replacement("setData", "set_data", "3.0.0")
- return self.set_data(data)
-
-
-class ContentStream(DecodedStreamObject):
- def __init__(
- self,
- stream: Any,
- pdf: Any,
- forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
- ) -> None:
- self.pdf = pdf
-
- # The inner list has two elements:
- # [0] : List
- # [1] : str
- self.operations: List[Tuple[Any, Any]] = []
-
- # stream may be a StreamObject or an ArrayObject containing
- # multiple StreamObjects to be cat'd together.
- if stream is not None:
- stream = stream.get_object()
- if isinstance(stream, ArrayObject):
- data = b""
- for s in stream:
- data += b_(s.get_object().get_data())
- if len(data) == 0 or data[-1] != b"\n":
- data += b"\n"
- stream_bytes = BytesIO(data)
- else:
- stream_data = stream.get_data()
- assert stream_data is not None
- stream_data_bytes = b_(stream_data)
- stream_bytes = BytesIO(stream_data_bytes)
- self.forced_encoding = forced_encoding
- self.__parse_content_stream(stream_bytes)
-
- def clone(
- self,
- pdf_dest: Any,
- force_duplicate: bool = False,
- ignore_fields: Union[Tuple[str, ...], List[str], None] = (),
- ) -> "ContentStream":
- """clone object into pdf_dest"""
- try:
- if self.indirect_reference.pdf == pdf_dest and not force_duplicate: # type: ignore
- return self
- except Exception:
- pass
-
- d__ = cast(
- "ContentStream", self._reference_clone(self.__class__(None, None), pdf_dest)
- )
- if ignore_fields is None:
- ignore_fields = []
- d__._clone(self, pdf_dest, force_duplicate, ignore_fields)
- return d__
-
- def _clone(
- self,
- src: DictionaryObject,
- pdf_dest: PdfWriterProtocol,
- force_duplicate: bool,
- ignore_fields: Union[Tuple[str, ...], List[str]],
- ) -> None:
- """update the object from src"""
- self.pdf = pdf_dest
- self.operations = list(cast("ContentStream", src).operations)
- self.forced_encoding = cast("ContentStream", src).forced_encoding
- # no need to call DictionaryObjection or any
- # super(DictionaryObject,self)._clone(src, pdf_dest, force_duplicate, ignore_fields)
- return
-
- def __parse_content_stream(self, stream: StreamType) -> None:
- stream.seek(0, 0)
- operands: List[Union[int, str, PdfObject]] = []
- while True:
- peek = read_non_whitespace(stream)
- if peek == b"" or peek == 0:
- break
- stream.seek(-1, 1)
- if peek.isalpha() or peek in (b"'", b'"'):
- operator = read_until_regex(stream, NameObject.delimiter_pattern, True)
- if operator == b"BI":
- # begin inline image - a completely different parsing
- # mechanism is required, of course... thanks buddy...
- assert operands == []
- ii = self._read_inline_image(stream)
- self.operations.append((ii, b"INLINE IMAGE"))
- else:
- self.operations.append((operands, operator))
- operands = []
- elif peek == b"%":
- # If we encounter a comment in the content stream, we have to
- # handle it here. Typically, read_object will handle
- # encountering a comment -- but read_object assumes that
- # following the comment must be the object we're trying to
- # read. In this case, it could be an operator instead.
- while peek not in (b"\r", b"\n"):
- peek = stream.read(1)
- else:
- operands.append(read_object(stream, None, self.forced_encoding))
-
- def _read_inline_image(self, stream: StreamType) -> Dict[str, Any]:
- # begin reading just after the "BI" - begin image
- # first read the dictionary of settings.
- settings = DictionaryObject()
- while True:
- tok = read_non_whitespace(stream)
- stream.seek(-1, 1)
- if tok == b"I":
- # "ID" - begin of image data
- break
- key = read_object(stream, self.pdf)
- tok = read_non_whitespace(stream)
- stream.seek(-1, 1)
- value = read_object(stream, self.pdf)
- settings[key] = value
- # left at beginning of ID
- tmp = stream.read(3)
- assert tmp[:2] == b"ID"
- data = BytesIO()
- # Read the inline image, while checking for EI (End Image) operator.
- while True:
- # Read 8 kB at a time and check if the chunk contains the E operator.
- buf = stream.read(8192)
- # We have reached the end of the stream, but haven't found the EI operator.
- if not buf:
- raise PdfReadError("Unexpected end of stream")
- loc = buf.find(b"E")
-
- if loc == -1:
- data.write(buf)
- else:
- # Write out everything before the E.
- data.write(buf[0:loc])
-
- # Seek back in the stream to read the E next.
- stream.seek(loc - len(buf), 1)
- tok = stream.read(1)
- # Check for End Image
- tok2 = stream.read(1)
- if tok2 == b"I" and buf[loc - 1 : loc] in WHITESPACES:
- # Data can contain [\s]EI, so check for the separator \s; 4 chars suffisent Q operator not required.
- tok3 = stream.read(1)
- info = tok + tok2
- # We need to find at least one whitespace after.
- has_q_whitespace = False
- while tok3 in WHITESPACES:
- has_q_whitespace = True
- info += tok3
- tok3 = stream.read(1)
- if has_q_whitespace:
- stream.seek(-1, 1)
- break
- else:
- stream.seek(-1, 1)
- data.write(info)
- else:
- stream.seek(-1, 1)
- data.write(tok)
- return {"settings": settings, "data": data.getvalue()}
-
- @property
- def _data(self) -> bytes:
- newdata = BytesIO()
- for operands, operator in self.operations:
- if operator == b"INLINE IMAGE":
- newdata.write(b"BI")
- dicttext = BytesIO()
- operands["settings"].write_to_stream(dicttext, None)
- newdata.write(dicttext.getvalue()[2:-2])
- newdata.write(b"ID ")
- newdata.write(operands["data"])
- newdata.write(b"EI")
- else:
- for op in operands:
- op.write_to_stream(newdata, None)
- newdata.write(b" ")
- newdata.write(b_(operator))
- newdata.write(b"\n")
- return newdata.getvalue()
-
- @_data.setter
- def _data(self, value: Union[str, bytes]) -> None:
- self.__parse_content_stream(BytesIO(b_(value)))
-
-
-def read_object(
- stream: StreamType,
- pdf: Any, # PdfReader
- forced_encoding: Union[None, str, List[str], Dict[int, str]] = None,
-) -> Union[PdfObject, int, str, ContentStream]:
- tok = stream.read(1)
- stream.seek(-1, 1) # reset to start
- if tok == b"/":
- return NameObject.read_from_stream(stream, pdf)
- elif tok == b"<":
- # hexadecimal string OR dictionary
- peek = stream.read(2)
- stream.seek(-2, 1) # reset to start
-
- if peek == b"<<":
- return DictionaryObject.read_from_stream(stream, pdf, forced_encoding)
- else:
- return read_hex_string_from_stream(stream, forced_encoding)
- elif tok == b"[":
- return ArrayObject.read_from_stream(stream, pdf, forced_encoding)
- elif tok == b"t" or tok == b"f":
- return BooleanObject.read_from_stream(stream)
- elif tok == b"(":
- return read_string_from_stream(stream, forced_encoding)
- elif tok == b"e" and stream.read(6) == b"endobj":
- stream.seek(-6, 1)
- return NullObject()
- elif tok == b"n":
- return NullObject.read_from_stream(stream)
- elif tok == b"%":
- # comment
- while tok not in (b"\r", b"\n"):
- tok = stream.read(1)
- # Prevents an infinite loop by raising an error if the stream is at
- # the EOF
- if len(tok) <= 0:
- raise PdfStreamError("File ended unexpectedly.")
- tok = read_non_whitespace(stream)
- stream.seek(-1, 1)
- return read_object(stream, pdf, forced_encoding)
- elif tok in b"0123456789+-.":
- # number object OR indirect reference
- peek = stream.read(20)
- stream.seek(-len(peek), 1) # reset to start
- if IndirectPattern.match(peek) is not None:
- return IndirectObject.read_from_stream(stream, pdf)
- else:
- return NumberObject.read_from_stream(stream)
- else:
- stream.seek(-20, 1)
- raise PdfReadError(
- f"Invalid Elementary Object starting with {tok!r} @{stream.tell()}: {stream.read(80).__repr__()}"
- )
-
-
-class Field(TreeObject):
- """
- A class representing a field dictionary.
-
- This class is accessed through
- :meth:`get_fields()`
- """
-
- def __init__(self, data: Dict[str, Any]) -> None:
- DictionaryObject.__init__(self)
- field_attributes = (
- FieldDictionaryAttributes.attributes()
- + CheckboxRadioButtonAttributes.attributes()
- )
- for attr in field_attributes:
- try:
- self[NameObject(attr)] = data[attr]
- except KeyError:
- pass
-
- # TABLE 8.69 Entries common to all field dictionaries
- @property
- def field_type(self) -> Optional[NameObject]:
- """Read-only property accessing the type of this field."""
- return self.get(FieldDictionaryAttributes.FT)
-
- @property
- def fieldType(self) -> Optional[NameObject]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :py:attr:`field_type` instead.
- """
- deprecation_with_replacement("fieldType", "field_type", "3.0.0")
- return self.field_type
-
- @property
- def parent(self) -> Optional[DictionaryObject]:
- """Read-only property accessing the parent of this field."""
- return self.get(FieldDictionaryAttributes.Parent)
-
- @property
- def kids(self) -> Optional["ArrayObject"]:
- """Read-only property accessing the kids of this field."""
- return self.get(FieldDictionaryAttributes.Kids)
-
- @property
- def name(self) -> Optional[str]:
- """Read-only property accessing the name of this field."""
- return self.get(FieldDictionaryAttributes.T)
-
- @property
- def alternate_name(self) -> Optional[str]:
- """Read-only property accessing the alternate name of this field."""
- return self.get(FieldDictionaryAttributes.TU)
-
- @property
- def altName(self) -> Optional[str]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :py:attr:`alternate_name` instead.
- """
- deprecation_with_replacement("altName", "alternate_name", "3.0.0")
- return self.alternate_name
-
- @property
- def mapping_name(self) -> Optional[str]:
- """
- Read-only property accessing the mapping name of this field. This
- name is used by PyPDF2 as a key in the dictionary returned by
- :meth:`get_fields()`
- """
- return self.get(FieldDictionaryAttributes.TM)
-
- @property
- def mappingName(self) -> Optional[str]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :py:attr:`mapping_name` instead.
- """
- deprecation_with_replacement("mappingName", "mapping_name", "3.0.0")
- return self.mapping_name
-
- @property
- def flags(self) -> Optional[int]:
- """
- Read-only property accessing the field flags, specifying various
- characteristics of the field (see Table 8.70 of the PDF 1.7 reference).
- """
- return self.get(FieldDictionaryAttributes.Ff)
-
- @property
- def value(self) -> Optional[Any]:
- """
- Read-only property accessing the value of this field. Format
- varies based on field type.
- """
- return self.get(FieldDictionaryAttributes.V)
-
- @property
- def default_value(self) -> Optional[Any]:
- """Read-only property accessing the default value of this field."""
- return self.get(FieldDictionaryAttributes.DV)
-
- @property
- def defaultValue(self) -> Optional[Any]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :py:attr:`default_value` instead.
- """
- deprecation_with_replacement("defaultValue", "default_value", "3.0.0")
- return self.default_value
-
- @property
- def additional_actions(self) -> Optional[DictionaryObject]:
- """
- Read-only property accessing the additional actions dictionary.
- This dictionary defines the field's behavior in response to trigger events.
- See Section 8.5.2 of the PDF 1.7 reference.
- """
- return self.get(FieldDictionaryAttributes.AA)
-
- @property
- def additionalActions(self) -> Optional[DictionaryObject]: # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :py:attr:`additional_actions` instead.
- """
- deprecation_with_replacement("additionalActions", "additional_actions", "3.0.0")
- return self.additional_actions
-
-
-class Destination(TreeObject):
- """
- A class representing a destination within a PDF file.
- See section 8.2.1 of the PDF 1.6 reference.
-
- :param str title: Title of this destination.
- :param IndirectObject page: Reference to the page of this destination. Should
- be an instance of :class:`IndirectObject`.
- :param Fit fit: How the destination is displayed.
- :raises PdfReadError: If destination type is invalid.
-
-
- """
-
- node: Optional[
- DictionaryObject
- ] = None # node provide access to the original Object
- childs: List[Any] = [] # used in PdfWriter
-
- def __init__(
- self,
- title: str,
- page: Union[NumberObject, IndirectObject, NullObject, DictionaryObject],
- fit: Fit,
- ) -> None:
- typ = fit.fit_type
- args = fit.fit_args
-
- DictionaryObject.__init__(self)
- self[NameObject("/Title")] = TextStringObject(title)
- self[NameObject("/Page")] = page
- self[NameObject("/Type")] = typ
-
- # from table 8.2 of the PDF 1.7 reference.
- if typ == "/XYZ":
- (
- self[NameObject(TA.LEFT)],
- self[NameObject(TA.TOP)],
- self[NameObject("/Zoom")],
- ) = args
- elif typ == TF.FIT_R:
- (
- self[NameObject(TA.LEFT)],
- self[NameObject(TA.BOTTOM)],
- self[NameObject(TA.RIGHT)],
- self[NameObject(TA.TOP)],
- ) = args
- elif typ in [TF.FIT_H, TF.FIT_BH]:
- try: # Prefered to be more robust not only to null parameters
- (self[NameObject(TA.TOP)],) = args
- except Exception:
- (self[NameObject(TA.TOP)],) = (NullObject(),)
- elif typ in [TF.FIT_V, TF.FIT_BV]:
- try: # Prefered to be more robust not only to null parameters
- (self[NameObject(TA.LEFT)],) = args
- except Exception:
- (self[NameObject(TA.LEFT)],) = (NullObject(),)
- elif typ in [TF.FIT, TF.FIT_B]:
- pass
- else:
- raise PdfReadError(f"Unknown Destination Type: {typ!r}")
-
- @property
- def dest_array(self) -> "ArrayObject":
- return ArrayObject(
- [self.raw_get("/Page"), self["/Type"]]
- + [
- self[x]
- for x in ["/Left", "/Bottom", "/Right", "/Top", "/Zoom"]
- if x in self
- ]
- )
-
- def getDestArray(self) -> "ArrayObject": # pragma: no cover
- """
- .. deprecated:: 1.28.3
-
- Use :py:attr:`dest_array` instead.
- """
- deprecation_with_replacement("getDestArray", "dest_array", "3.0.0")
- return self.dest_array
-
- def write_to_stream(
- self, stream: StreamType, encryption_key: Union[None, str, bytes]
- ) -> None:
- stream.write(b"<<\n")
- key = NameObject("/D")
- key.write_to_stream(stream, encryption_key)
- stream.write(b" ")
- value = self.dest_array
- value.write_to_stream(stream, encryption_key)
-
- key = NameObject("/S")
- key.write_to_stream(stream, encryption_key)
- stream.write(b" ")
- value_s = NameObject("/GoTo")
- value_s.write_to_stream(stream, encryption_key)
-
- stream.write(b"\n")
- stream.write(b">>")
-
- @property
- def title(self) -> Optional[str]:
- """Read-only property accessing the destination title."""
- return self.get("/Title")
-
- @property
- def page(self) -> Optional[int]:
- """Read-only property accessing the destination page number."""
- return self.get("/Page")
-
- @property
- def typ(self) -> Optional[str]:
- """Read-only property accessing the destination type."""
- return self.get("/Type")
-
- @property
- def zoom(self) -> Optional[int]:
- """Read-only property accessing the zoom factor."""
- return self.get("/Zoom", None)
-
- @property
- def left(self) -> Optional[FloatObject]:
- """Read-only property accessing the left horizontal coordinate."""
- return self.get("/Left", None)
-
- @property
- def right(self) -> Optional[FloatObject]:
- """Read-only property accessing the right horizontal coordinate."""
- return self.get("/Right", None)
-
- @property
- def top(self) -> Optional[FloatObject]:
- """Read-only property accessing the top vertical coordinate."""
- return self.get("/Top", None)
-
- @property
- def bottom(self) -> Optional[FloatObject]:
- """Read-only property accessing the bottom vertical coordinate."""
- return self.get("/Bottom", None)
-
- @property
- def color(self) -> Optional["ArrayObject"]:
- """Read-only property accessing the color in (R, G, B) with values 0.0-1.0"""
- return self.get(
- "/C", ArrayObject([FloatObject(0), FloatObject(0), FloatObject(0)])
- )
-
- @property
- def font_format(self) -> Optional[OutlineFontFlag]:
- """Read-only property accessing the font type. 1=italic, 2=bold, 3=both"""
- return self.get("/F", 0)
-
- @property
- def outline_count(self) -> Optional[int]:
- """
- Read-only property accessing the outline count.
- positive = expanded
- negative = collapsed
- absolute value = number of visible descendents at all levels
- """
- return self.get("/Count", None)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/types.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/types.py
deleted file mode 100644
index 92cba6fe9fb8e59500d35aec92a25814d402732d..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PyPDF2/types.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Helpers for working with PDF types."""
-
-from typing import List, Union
-
-try:
- # Python 3.8+: https://peps.python.org/pep-0586
- from typing import Literal # type: ignore[attr-defined]
-except ImportError:
- from typing_extensions import Literal # type: ignore[misc]
-
-try:
- # Python 3.10+: https://www.python.org/dev/peps/pep-0484/
- from typing import TypeAlias # type: ignore[attr-defined]
-except ImportError:
- from typing_extensions import TypeAlias
-
-from .generic._base import NameObject, NullObject, NumberObject
-from .generic._data_structures import ArrayObject, Destination
-from .generic._outline import OutlineItem
-
-BorderArrayType: TypeAlias = List[Union[NameObject, NumberObject, ArrayObject]]
-OutlineItemType: TypeAlias = Union[OutlineItem, Destination]
-FitType: TypeAlias = Literal[
- "/Fit", "/XYZ", "/FitH", "/FitV", "/FitR", "/FitB", "/FitBH", "/FitBV"
-]
-# Those go with the FitType: They specify values for the fit
-ZoomArgType: TypeAlias = Union[NumberObject, NullObject, float]
-ZoomArgsType: TypeAlias = List[ZoomArgType]
-
-# Recursive types like the following are not yet supported by mypy:
-# OutlineType = List[Union[Destination, "OutlineType"]]
-# See https://github.com/python/mypy/issues/731
-# Hence use this for the moment:
-OutlineType = List[Union[Destination, List[Union[Destination, List[Destination]]]]]
-
-LayoutType: TypeAlias = Literal[
- "/NoLayout",
- "/SinglePage",
- "/OneColumn",
- "/TwoColumnLeft",
- "/TwoColumnRight",
- "/TwoPageLeft",
- "/TwoPageRight",
-]
-PagemodeType: TypeAlias = Literal[
- "/UseNone",
- "/UseOutlines",
- "/UseThumbs",
- "/FullScreen",
- "/UseOC",
- "/UseAttachments",
-]
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_ws.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_ws.py
deleted file mode 100644
index 0d32a218b52b87ec04f36a6f95bfb303984b2e43..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_ws.py
+++ /dev/null
@@ -1,487 +0,0 @@
-import asyncio
-import base64
-import binascii
-import hashlib
-import json
-from typing import Any, Iterable, Optional, Tuple, cast
-
-import async_timeout
-import attr
-from multidict import CIMultiDict
-
-from . import hdrs
-from .abc import AbstractStreamWriter
-from .helpers import call_later, set_result
-from .http import (
- WS_CLOSED_MESSAGE,
- WS_CLOSING_MESSAGE,
- WS_KEY,
- WebSocketError,
- WebSocketReader,
- WebSocketWriter,
- WSCloseCode,
- WSMessage,
- WSMsgType as WSMsgType,
- ws_ext_gen,
- ws_ext_parse,
-)
-from .log import ws_logger
-from .streams import EofStream, FlowControlDataQueue
-from .typedefs import Final, JSONDecoder, JSONEncoder
-from .web_exceptions import HTTPBadRequest, HTTPException
-from .web_request import BaseRequest
-from .web_response import StreamResponse
-
-__all__ = (
- "WebSocketResponse",
- "WebSocketReady",
- "WSMsgType",
-)
-
-THRESHOLD_CONNLOST_ACCESS: Final[int] = 5
-
-
-@attr.s(auto_attribs=True, frozen=True, slots=True)
-class WebSocketReady:
- ok: bool
- protocol: Optional[str]
-
- def __bool__(self) -> bool:
- return self.ok
-
-
-class WebSocketResponse(StreamResponse):
-
- _length_check = False
-
- def __init__(
- self,
- *,
- timeout: float = 10.0,
- receive_timeout: Optional[float] = None,
- autoclose: bool = True,
- autoping: bool = True,
- heartbeat: Optional[float] = None,
- protocols: Iterable[str] = (),
- compress: bool = True,
- max_msg_size: int = 4 * 1024 * 1024,
- ) -> None:
- super().__init__(status=101)
- self._protocols = protocols
- self._ws_protocol: Optional[str] = None
- self._writer: Optional[WebSocketWriter] = None
- self._reader: Optional[FlowControlDataQueue[WSMessage]] = None
- self._closed = False
- self._closing = False
- self._conn_lost = 0
- self._close_code: Optional[int] = None
- self._loop: Optional[asyncio.AbstractEventLoop] = None
- self._waiting: Optional[asyncio.Future[bool]] = None
- self._exception: Optional[BaseException] = None
- self._timeout = timeout
- self._receive_timeout = receive_timeout
- self._autoclose = autoclose
- self._autoping = autoping
- self._heartbeat = heartbeat
- self._heartbeat_cb: Optional[asyncio.TimerHandle] = None
- if heartbeat is not None:
- self._pong_heartbeat = heartbeat / 2.0
- self._pong_response_cb: Optional[asyncio.TimerHandle] = None
- self._compress = compress
- self._max_msg_size = max_msg_size
-
- def _cancel_heartbeat(self) -> None:
- if self._pong_response_cb is not None:
- self._pong_response_cb.cancel()
- self._pong_response_cb = None
-
- if self._heartbeat_cb is not None:
- self._heartbeat_cb.cancel()
- self._heartbeat_cb = None
-
- def _reset_heartbeat(self) -> None:
- self._cancel_heartbeat()
-
- if self._heartbeat is not None:
- assert self._loop is not None
- self._heartbeat_cb = call_later(
- self._send_heartbeat, self._heartbeat, self._loop
- )
-
- def _send_heartbeat(self) -> None:
- if self._heartbeat is not None and not self._closed:
- assert self._loop is not None
- # fire-and-forget a task is not perfect but maybe ok for
- # sending ping. Otherwise we need a long-living heartbeat
- # task in the class.
- self._loop.create_task(self._writer.ping()) # type: ignore[union-attr]
-
- if self._pong_response_cb is not None:
- self._pong_response_cb.cancel()
- self._pong_response_cb = call_later(
- self._pong_not_received, self._pong_heartbeat, self._loop
- )
-
- def _pong_not_received(self) -> None:
- if self._req is not None and self._req.transport is not None:
- self._closed = True
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = asyncio.TimeoutError()
- self._req.transport.close()
-
- async def prepare(self, request: BaseRequest) -> AbstractStreamWriter:
- # make pre-check to don't hide it by do_handshake() exceptions
- if self._payload_writer is not None:
- return self._payload_writer
-
- protocol, writer = self._pre_start(request)
- payload_writer = await super().prepare(request)
- assert payload_writer is not None
- self._post_start(request, protocol, writer)
- await payload_writer.drain()
- return payload_writer
-
- def _handshake(
- self, request: BaseRequest
- ) -> Tuple["CIMultiDict[str]", str, bool, bool]:
- headers = request.headers
- if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip():
- raise HTTPBadRequest(
- text=(
- "No WebSocket UPGRADE hdr: {}\n Can "
- '"Upgrade" only to "WebSocket".'
- ).format(headers.get(hdrs.UPGRADE))
- )
-
- if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower():
- raise HTTPBadRequest(
- text="No CONNECTION upgrade hdr: {}".format(
- headers.get(hdrs.CONNECTION)
- )
- )
-
- # find common sub-protocol between client and server
- protocol = None
- if hdrs.SEC_WEBSOCKET_PROTOCOL in headers:
- req_protocols = [
- str(proto.strip())
- for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
- ]
-
- for proto in req_protocols:
- if proto in self._protocols:
- protocol = proto
- break
- else:
- # No overlap found: Return no protocol as per spec
- ws_logger.warning(
- "Client protocols %r don’t overlap server-known ones %r",
- req_protocols,
- self._protocols,
- )
-
- # check supported version
- version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "")
- if version not in ("13", "8", "7"):
- raise HTTPBadRequest(text=f"Unsupported version: {version}")
-
- # check client handshake for validity
- key = headers.get(hdrs.SEC_WEBSOCKET_KEY)
- try:
- if not key or len(base64.b64decode(key)) != 16:
- raise HTTPBadRequest(text=f"Handshake error: {key!r}")
- except binascii.Error:
- raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None
-
- accept_val = base64.b64encode(
- hashlib.sha1(key.encode() + WS_KEY).digest()
- ).decode()
- response_headers = CIMultiDict(
- {
- hdrs.UPGRADE: "websocket",
- hdrs.CONNECTION: "upgrade",
- hdrs.SEC_WEBSOCKET_ACCEPT: accept_val,
- }
- )
-
- notakeover = False
- compress = 0
- if self._compress:
- extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
- # Server side always get return with no exception.
- # If something happened, just drop compress extension
- compress, notakeover = ws_ext_parse(extensions, isserver=True)
- if compress:
- enabledext = ws_ext_gen(
- compress=compress, isserver=True, server_notakeover=notakeover
- )
- response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext
-
- if protocol:
- response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol
- return (
- response_headers,
- protocol,
- compress,
- notakeover,
- ) # type: ignore[return-value]
-
- def _pre_start(self, request: BaseRequest) -> Tuple[str, WebSocketWriter]:
- self._loop = request._loop
-
- headers, protocol, compress, notakeover = self._handshake(request)
-
- self.set_status(101)
- self.headers.update(headers)
- self.force_close()
- self._compress = compress
- transport = request._protocol.transport
- assert transport is not None
- writer = WebSocketWriter(
- request._protocol, transport, compress=compress, notakeover=notakeover
- )
-
- return protocol, writer
-
- def _post_start(
- self, request: BaseRequest, protocol: str, writer: WebSocketWriter
- ) -> None:
- self._ws_protocol = protocol
- self._writer = writer
-
- self._reset_heartbeat()
-
- loop = self._loop
- assert loop is not None
- self._reader = FlowControlDataQueue(request._protocol, 2**16, loop=loop)
- request.protocol.set_parser(
- WebSocketReader(self._reader, self._max_msg_size, compress=self._compress)
- )
- # disable HTTP keepalive for WebSocket
- request.protocol.keep_alive(False)
-
- def can_prepare(self, request: BaseRequest) -> WebSocketReady:
- if self._writer is not None:
- raise RuntimeError("Already started")
- try:
- _, protocol, _, _ = self._handshake(request)
- except HTTPException:
- return WebSocketReady(False, None)
- else:
- return WebSocketReady(True, protocol)
-
- @property
- def closed(self) -> bool:
- return self._closed
-
- @property
- def close_code(self) -> Optional[int]:
- return self._close_code
-
- @property
- def ws_protocol(self) -> Optional[str]:
- return self._ws_protocol
-
- @property
- def compress(self) -> bool:
- return self._compress
-
- def exception(self) -> Optional[BaseException]:
- return self._exception
-
- async def ping(self, message: bytes = b"") -> None:
- if self._writer is None:
- raise RuntimeError("Call .prepare() first")
- await self._writer.ping(message)
-
- async def pong(self, message: bytes = b"") -> None:
- # unsolicited pong
- if self._writer is None:
- raise RuntimeError("Call .prepare() first")
- await self._writer.pong(message)
-
- async def send_str(self, data: str, compress: Optional[bool] = None) -> None:
- if self._writer is None:
- raise RuntimeError("Call .prepare() first")
- if not isinstance(data, str):
- raise TypeError("data argument must be str (%r)" % type(data))
- await self._writer.send(data, binary=False, compress=compress)
-
- async def send_bytes(self, data: bytes, compress: Optional[bool] = None) -> None:
- if self._writer is None:
- raise RuntimeError("Call .prepare() first")
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError("data argument must be byte-ish (%r)" % type(data))
- await self._writer.send(data, binary=True, compress=compress)
-
- async def send_json(
- self,
- data: Any,
- compress: Optional[bool] = None,
- *,
- dumps: JSONEncoder = json.dumps,
- ) -> None:
- await self.send_str(dumps(data), compress=compress)
-
- async def write_eof(self) -> None: # type: ignore[override]
- if self._eof_sent:
- return
- if self._payload_writer is None:
- raise RuntimeError("Response has not been started")
-
- await self.close()
- self._eof_sent = True
-
- async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool:
- if self._writer is None:
- raise RuntimeError("Call .prepare() first")
-
- self._cancel_heartbeat()
- reader = self._reader
- assert reader is not None
-
- # we need to break `receive()` cycle first,
- # `close()` may be called from different task
- if self._waiting is not None and not self._closed:
- reader.feed_data(WS_CLOSING_MESSAGE, 0)
- await self._waiting
-
- if not self._closed:
- self._closed = True
- try:
- await self._writer.close(code, message)
- writer = self._payload_writer
- assert writer is not None
- await writer.drain()
- except (asyncio.CancelledError, asyncio.TimeoutError):
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- raise
- except Exception as exc:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = exc
- return True
-
- if self._closing:
- return True
-
- reader = self._reader
- assert reader is not None
- try:
- async with async_timeout.timeout(self._timeout):
- msg = await reader.read()
- except asyncio.CancelledError:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- raise
- except Exception as exc:
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = exc
- return True
-
- if msg.type == WSMsgType.CLOSE:
- self._close_code = msg.data
- return True
-
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- self._exception = asyncio.TimeoutError()
- return True
- else:
- return False
-
- async def receive(self, timeout: Optional[float] = None) -> WSMessage:
- if self._reader is None:
- raise RuntimeError("Call .prepare() first")
-
- loop = self._loop
- assert loop is not None
- while True:
- if self._waiting is not None:
- raise RuntimeError("Concurrent call to receive() is not allowed")
-
- if self._closed:
- self._conn_lost += 1
- if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
- raise RuntimeError("WebSocket connection is closed.")
- return WS_CLOSED_MESSAGE
- elif self._closing:
- return WS_CLOSING_MESSAGE
-
- try:
- self._waiting = loop.create_future()
- try:
- async with async_timeout.timeout(timeout or self._receive_timeout):
- msg = await self._reader.read()
- self._reset_heartbeat()
- finally:
- waiter = self._waiting
- set_result(waiter, True)
- self._waiting = None
- except (asyncio.CancelledError, asyncio.TimeoutError):
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- raise
- except EofStream:
- self._close_code = WSCloseCode.OK
- await self.close()
- return WSMessage(WSMsgType.CLOSED, None, None)
- except WebSocketError as exc:
- self._close_code = exc.code
- await self.close(code=exc.code)
- return WSMessage(WSMsgType.ERROR, exc, None)
- except Exception as exc:
- self._exception = exc
- self._closing = True
- self._close_code = WSCloseCode.ABNORMAL_CLOSURE
- await self.close()
- return WSMessage(WSMsgType.ERROR, exc, None)
-
- if msg.type == WSMsgType.CLOSE:
- self._closing = True
- self._close_code = msg.data
- if not self._closed and self._autoclose:
- await self.close()
- elif msg.type == WSMsgType.CLOSING:
- self._closing = True
- elif msg.type == WSMsgType.PING and self._autoping:
- await self.pong(msg.data)
- continue
- elif msg.type == WSMsgType.PONG and self._autoping:
- continue
-
- return msg
-
- async def receive_str(self, *, timeout: Optional[float] = None) -> str:
- msg = await self.receive(timeout)
- if msg.type != WSMsgType.TEXT:
- raise TypeError(
- "Received message {}:{!r} is not WSMsgType.TEXT".format(
- msg.type, msg.data
- )
- )
- return cast(str, msg.data)
-
- async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
- msg = await self.receive(timeout)
- if msg.type != WSMsgType.BINARY:
- raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
- return cast(bytes, msg.data)
-
- async def receive_json(
- self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None
- ) -> Any:
- data = await self.receive_str(timeout=timeout)
- return loads(data)
-
- async def write(self, data: bytes) -> None:
- raise RuntimeError("Cannot call .write() for websocket")
-
- def __aiter__(self) -> "WebSocketResponse":
- return self
-
- async def __anext__(self) -> WSMessage:
- msg = await self.receive()
- if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
- raise StopAsyncIteration
- return msg
-
- def _cancel(self, exc: BaseException) -> None:
- if self._reader is not None:
- self._reader.set_exception(exc)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/txtbase.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/txtbase.py
deleted file mode 100644
index fdbfb6465f81188512b47b7ed9a58a9ddca89e74..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/txtbase.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
-
-# Copyright (C) 2006-2017 Nominum, Inc.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose with or without fee is hereby granted,
-# provided that the above copyright notice and this permission notice
-# appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""TXT-like base class."""
-
-import struct
-from typing import Any, Dict, Iterable, Optional, Tuple, Union
-
-import dns.exception
-import dns.immutable
-import dns.rdata
-import dns.tokenizer
-
-
-@dns.immutable.immutable
-class TXTBase(dns.rdata.Rdata):
-
- """Base class for rdata that is like a TXT record (see RFC 1035)."""
-
- __slots__ = ["strings"]
-
- def __init__(
- self,
- rdclass: dns.rdataclass.RdataClass,
- rdtype: dns.rdatatype.RdataType,
- strings: Iterable[Union[bytes, str]],
- ):
- """Initialize a TXT-like rdata.
-
- *rdclass*, an ``int`` is the rdataclass of the Rdata.
-
- *rdtype*, an ``int`` is the rdatatype of the Rdata.
-
- *strings*, a tuple of ``bytes``
- """
- super().__init__(rdclass, rdtype)
- self.strings: Tuple[bytes] = self._as_tuple(
- strings, lambda x: self._as_bytes(x, True, 255)
- )
-
- def to_text(
- self,
- origin: Optional[dns.name.Name] = None,
- relativize: bool = True,
- **kw: Dict[str, Any]
- ) -> str:
- txt = ""
- prefix = ""
- for s in self.strings:
- txt += '{}"{}"'.format(prefix, dns.rdata._escapify(s))
- prefix = " "
- return txt
-
- @classmethod
- def from_text(
- cls,
- rdclass: dns.rdataclass.RdataClass,
- rdtype: dns.rdatatype.RdataType,
- tok: dns.tokenizer.Tokenizer,
- origin: Optional[dns.name.Name] = None,
- relativize: bool = True,
- relativize_to: Optional[dns.name.Name] = None,
- ) -> dns.rdata.Rdata:
- strings = []
- for token in tok.get_remaining():
- token = token.unescape_to_bytes()
- # The 'if' below is always true in the current code, but we
- # are leaving this check in in case things change some day.
- if not (
- token.is_quoted_string() or token.is_identifier()
- ): # pragma: no cover
- raise dns.exception.SyntaxError("expected a string")
- if len(token.value) > 255:
- raise dns.exception.SyntaxError("string too long")
- strings.append(token.value)
- if len(strings) == 0:
- raise dns.exception.UnexpectedEnd
- return cls(rdclass, rdtype, strings)
-
- def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
- for s in self.strings:
- l = len(s)
- assert l < 256
- file.write(struct.pack("!B", l))
- file.write(s)
-
- @classmethod
- def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
- strings = []
- while parser.remaining() > 0:
- s = parser.get_counted_bytes()
- strings.append(s)
- return cls(rdclass, rdtype, strings)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/epub_parser.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/epub_parser.py
deleted file mode 100644
index cbf15b09dd92db278c2d307c0429f4aae62da4fb..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/file/epub_parser.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""Epub parser.
-
-Contains parsers for epub files.
-"""
-
-from pathlib import Path
-from typing import Dict
-
-from gpt_index.readers.file.base_parser import BaseParser
-
-
-class EpubParser(BaseParser):
- """Epub Parser."""
-
- def _init_parser(self) -> Dict:
- """Init parser."""
- return {}
-
- def parse_file(self, file: Path, errors: str = "ignore") -> str:
- """Parse file."""
- try:
- import ebooklib
- from ebooklib import epub
- except ImportError:
- raise ImportError(
- "`EbookLib` is required to read Epub files: `pip install EbookLib`"
- )
- try:
- import html2text
- except ImportError:
- raise ImportError(
- "`html2text` is required to parse Epub files: `pip install html2text`"
- )
-
- text_list = []
- book = epub.read_epub(file, options={"ignore_ncx": True})
-
- # Iterate through all chapters.
- for item in book.get_items():
- # Chapters are typically located in epub documents items.
- if item.get_type() == ebooklib.ITEM_DOCUMENT:
- text_list.append(
- html2text.html2text(item.get_content().decode("utf-8"))
- )
-
- text = "\n".join(text_list)
- return text
diff --git a/spaces/katanaml-org/sparrow-ui/toolbar_main/__init__.py b/spaces/katanaml-org/sparrow-ui/toolbar_main/__init__.py
deleted file mode 100644
index f535aaea55bf3083ffe0eef9f53de718b5632a74..0000000000000000000000000000000000000000
--- a/spaces/katanaml-org/sparrow-ui/toolbar_main/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import streamlit.components.v1 as components
-component_toolbar_main = components.declare_component(
- name='component_toolbar_main',
- path='./toolbar_main'
-)
\ No newline at end of file
diff --git a/spaces/kevinwang676/VITS2-Mandarin/stft.py b/spaces/kevinwang676/VITS2-Mandarin/stft.py
deleted file mode 100644
index ef754544a88a1a4ff2e39760000d707c6b160b4b..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VITS2-Mandarin/stft.py
+++ /dev/null
@@ -1,209 +0,0 @@
-"""
-BSD 3-Clause License
-Copyright (c) 2017, Prem Seetharaman
-All rights reserved.
-* Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-* Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice, this
- list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-* Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""
-
-import torch
-import numpy as np
-import torch.nn.functional as F
-from torch.autograd import Variable
-from scipy.signal import get_window
-from librosa.util import pad_center, tiny
-import librosa.util as librosa_util
-
-def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
- n_fft=800, dtype=np.float32, norm=None):
- """
- # from librosa 0.6
- Compute the sum-square envelope of a window function at a given hop length.
- This is used to estimate modulation effects induced by windowing
- observations in short-time fourier transforms.
- Parameters
- ----------
- window : string, tuple, number, callable, or list-like
- Window specification, as in `get_window`
- n_frames : int > 0
- The number of analysis frames
- hop_length : int > 0
- The number of samples to advance between frames
- win_length : [optional]
- The length of the window function. By default, this matches `n_fft`.
- n_fft : int > 0
- The length of each analysis frame.
- dtype : np.dtype
- The data type of the output
- Returns
- -------
- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
- The sum-squared envelope of the window function
- """
- if win_length is None:
- win_length = n_fft
-
- n = n_fft + hop_length * (n_frames - 1)
- x = np.zeros(n, dtype=dtype)
-
- # Compute the squared window at the desired length
- win_sq = get_window(window, win_length, fftbins=True)
- win_sq = librosa_util.normalize(win_sq, norm=norm)**2
- win_sq = librosa_util.pad_center(win_sq, n_fft)
-
- # Fill the envelope
- for i in range(n_frames):
- sample = i * hop_length
- x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
- return x
-
-
-class STFT(torch.nn.Module):
- """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
- def __init__(self, filter_length=800, hop_length=200, win_length=800,
- window='hann'):
- super(STFT, self).__init__()
- self.filter_length = filter_length
- self.hop_length = hop_length
- self.win_length = win_length
- self.window = window
- self.forward_transform = None
- scale = self.filter_length / self.hop_length
- fourier_basis = np.fft.fft(np.eye(self.filter_length))
-
- cutoff = int((self.filter_length / 2 + 1))
- fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
- np.imag(fourier_basis[:cutoff, :])])
-
- forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
- inverse_basis = torch.FloatTensor(
- np.linalg.pinv(scale * fourier_basis).T[:, None, :])
-
- if window is not None:
- assert(filter_length >= win_length)
- # get window and zero center pad it to filter_length
- fft_window = get_window(window, win_length, fftbins=True)
- fft_window = pad_center(fft_window, filter_length)
- fft_window = torch.from_numpy(fft_window).float()
-
- # window the bases
- forward_basis *= fft_window
- inverse_basis *= fft_window
-
- self.register_buffer('forward_basis', forward_basis.float())
- self.register_buffer('inverse_basis', inverse_basis.float())
-
- def transform(self, input_data):
- num_batches = input_data.size(0)
- num_samples = input_data.size(1)
-
- self.num_samples = num_samples
-
- # similar to librosa, reflect-pad the input
- input_data = input_data.view(num_batches, 1, num_samples)
- input_data = F.pad(
- input_data.unsqueeze(1),
- (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
- mode='reflect')
- input_data = input_data.squeeze(1)
-
- forward_transform = F.conv1d(
- input_data,
- Variable(self.forward_basis, requires_grad=False),
- stride=self.hop_length,
- padding=0)
-
- cutoff = int((self.filter_length / 2) + 1)
- real_part = forward_transform[:, :cutoff, :]
- imag_part = forward_transform[:, cutoff:, :]
-
- magnitude = torch.sqrt(real_part**2 + imag_part**2)
- phase = torch.autograd.Variable(
- torch.atan2(imag_part.data, real_part.data))
-
- return magnitude, phase
-
- def inverse(self, magnitude, phase):
- recombine_magnitude_phase = torch.cat(
- [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
-
- inverse_transform = F.conv_transpose1d(
- recombine_magnitude_phase,
- Variable(self.inverse_basis, requires_grad=False),
- stride=self.hop_length,
- padding=0)
-
- if self.window is not None:
- window_sum = window_sumsquare(
- self.window, magnitude.size(-1), hop_length=self.hop_length,
- win_length=self.win_length, n_fft=self.filter_length,
- dtype=np.float32)
- # remove modulation effects
- approx_nonzero_indices = torch.from_numpy(
- np.where(window_sum > tiny(window_sum))[0])
- window_sum = torch.autograd.Variable(
- torch.from_numpy(window_sum), requires_grad=False)
- window_sum = window_sum.to(inverse_transform.device()) if magnitude.is_cuda else window_sum
- inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
-
- # scale by hop ratio
- inverse_transform *= float(self.filter_length) / self.hop_length
-
- inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
- inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
-
- return inverse_transform
-
- def forward(self, input_data):
- self.magnitude, self.phase = self.transform(input_data)
- reconstruction = self.inverse(self.magnitude, self.phase)
- return reconstruction
-
-
-class TorchSTFT(torch.nn.Module):
- def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):
- super().__init__()
- self.filter_length = filter_length
- self.hop_length = hop_length
- self.win_length = win_length
- self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))
-
- def transform(self, input_data):
- forward_transform = torch.stft(
- input_data,
- self.filter_length, self.hop_length, self.win_length, window=self.window,
- return_complex=True)
-
- return torch.abs(forward_transform), torch.angle(forward_transform)
-
- def inverse(self, magnitude, phase):
- inverse_transform = torch.istft(
- magnitude * torch.exp(phase * 1j),
- self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))
-
- return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation
-
- def forward(self, input_data):
- self.magnitude, self.phase = self.transform(input_data)
- reconstruction = self.inverse(self.magnitude, self.phase)
- return reconstruction
-
-
diff --git a/spaces/king007/invoices/app.py b/spaces/king007/invoices/app.py
deleted file mode 100644
index 7ba04dcdc52b6b5aa3079db4061dd9bc0e9ec412..0000000000000000000000000000000000000000
--- a/spaces/king007/invoices/app.py
+++ /dev/null
@@ -1,487 +0,0 @@
-import os
-
-os.environ["TOKENIZERS_PARALLELISM"] = "false"
-
-from PIL import Image, ImageDraw
-import traceback
-
-import gradio as gr
-from gradio import processing_utils
-
-import torch
-from docquery import pipeline
-from docquery.document import load_bytes, load_document, ImageDocument
-from docquery.ocr_reader import get_ocr_reader
-
-
-def ensure_list(x):
- if isinstance(x, list):
- return x
- else:
- return [x]
-
-
-CHECKPOINTS = {
- "LayoutLMv1 for Invoices 🧾": "impira/layoutlm-invoices",
-}
-
-PIPELINES = {}
-
-
-def construct_pipeline(task, model):
- global PIPELINES
- if model in PIPELINES:
- return PIPELINES[model]
-
- device = "cuda" if torch.cuda.is_available() else "cpu"
- ret = pipeline(task=task, model=CHECKPOINTS[model], device=device)
- PIPELINES[model] = ret
- return ret
-
-
-def run_pipeline(model, question, document, top_k):
- pipeline = construct_pipeline("document-question-answering", model)
- return pipeline(question=question, **document.context, top_k=top_k)
-
-
-# TODO: Move into docquery
-# TODO: Support words past the first page (or window?)
-def lift_word_boxes(document, page):
- return document.context["image"][page][1]
-
-
-def expand_bbox(word_boxes):
- if len(word_boxes) == 0:
- return None
-
- min_x, min_y, max_x, max_y = zip(*[x[1] for x in word_boxes])
- min_x, min_y, max_x, max_y = [min(min_x), min(min_y), max(max_x), max(max_y)]
- return [min_x, min_y, max_x, max_y]
-
-
-# LayoutLM boxes are normalized to 0, 1000
-def normalize_bbox(box, width, height, padding=0.005):
- min_x, min_y, max_x, max_y = [c / 1000 for c in box]
- if padding != 0:
- min_x = max(0, min_x - padding)
- min_y = max(0, min_y - padding)
- max_x = min(max_x + padding, 1)
- max_y = min(max_y + padding, 1)
- return [min_x * width, min_y * height, max_x * width, max_y * height]
-
-
-EXAMPLES = [
- [
- "acze_tech.png",
- "Tech Invoice",
- ],
- [
- "acze.png",
- "Commercial Goods Invoice",
- ],
- [
- "north_sea.png",
- "Energy Invoice",
- ],
-]
-
-QUESTION_FILES = {
- "Tech Invoice": "acze_tech.pdf",
- "Energy Invoice": "north_sea.pdf",
-}
-
-for q in QUESTION_FILES.keys():
- assert any(x[1] == q for x in EXAMPLES)
-
-FIELDS = {
- "Vendor Name": ["Vendor Name - Logo?", "Vendor Name - Address?"],
- "Vendor Address": ["Vendor Address?"],
- "Customer Name": ["Customer Name?"],
- "Customer Address": ["Customer Address?"],
- "Invoice Number": ["Invoice Number?"],
- "Invoice Date": ["Invoice Date?"],
- "Due Date": ["Due Date?"],
- "Subtotal": ["Subtotal?"],
- "Total Tax": ["Total Tax?"],
- "Invoice Total": ["Invoice Total?"],
- "Amount Due": ["Amount Due?"],
- "Payment Terms": ["Payment Terms?"],
- "Remit To Name": ["Remit To Name?"],
- "Remit To Address": ["Remit To Address?"],
-}
-
-
-def empty_table(fields):
- return {"value": [[name, None] for name in fields.keys()], "interactive": False}
-
-
-def process_document(document, fields, model, error=None):
- if document is not None and error is None:
- preview, json_output, table = process_fields(document, fields, model)
- return (
- document,
- fields,
- preview,
- gr.update(visible=True),
- gr.update(visible=False, value=None),
- json_output,
- table,
- )
- else:
- return (
- None,
- fields,
- None,
- gr.update(visible=False),
- gr.update(visible=True, value=error) if error is not None else None,
- None,
- gr.update(**empty_table(fields)),
- )
-
-
-def process_path(path, fields, model):
- error = None
- document = None
- if path:
- try:
- document = load_document(path)
- except Exception as e:
- traceback.print_exc()
- error = str(e)
-
- return process_document(document, fields, model, error)
-
-
-def process_upload(file, fields, model):
- return process_path(file.name if file else None, fields, model)
-
-
-colors = ["#64A087", "green", "black"]
-
-
-def annotate_page(prediction, pages, document):
- if prediction is not None and "word_ids" in prediction:
- image = pages[prediction["page"]]
- draw = ImageDraw.Draw(image, "RGBA")
- word_boxes = lift_word_boxes(document, prediction["page"])
- x1, y1, x2, y2 = normalize_bbox(
- expand_bbox([word_boxes[i] for i in prediction["word_ids"]]),
- image.width,
- image.height,
- )
- draw.rectangle(((x1, y1), (x2, y2)), fill=(0, 255, 0, int(0.4 * 255)))
-
-
-def process_question(
- question, document, img_gallery, model, fields, output, output_table
-):
- field_name = question
- if field_name is not None:
- fields = {field_name: [question], **fields}
-
- if not question or document is None:
- return None, document, fields, output, gr.update(value=output_table)
-
- text_value = None
- pages = [processing_utils.decode_base64_to_image(p) for p in img_gallery]
- prediction = run_pipeline(model, question, document, 1)
- annotate_page(prediction, pages, document)
-
- output = {field_name: prediction, **output}
- table = [[field_name, prediction.get("answer")]] + output_table.values.tolist()
- return (
- None,
- gr.update(visible=True, value=pages),
- fields,
- output,
- gr.update(value=table, interactive=False),
- )
-
-
-def process_fields(document, fields, model=list(CHECKPOINTS.keys())[0]):
- pages = [x.copy().convert("RGB") for x in document.preview]
-
- ret = {}
- table = []
-
- for (field_name, questions) in fields.items():
- answers = [
- a
- for q in questions
- for a in ensure_list(run_pipeline(model, q, document, top_k=1))
- if a.get("score", 1) > 0.5
- ]
- answers.sort(key=lambda x: -x.get("score", 0) if x else 0)
- top = answers[0] if len(answers) > 0 else None
- annotate_page(top, pages, document)
- ret[field_name] = top
- table.append([field_name, top.get("answer") if top is not None else None])
-
- return (
- gr.update(visible=True, value=pages),
- gr.update(visible=True, value=ret),
- table
- )
-
-
-def load_example_document(img, title, fields, model):
- document = None
- if img is not None:
- if title in QUESTION_FILES:
- document = load_document(QUESTION_FILES[title])
- else:
- document = ImageDocument(Image.fromarray(img), ocr_reader=get_ocr_reader())
-
- return process_document(document, fields, model)
-
-
-CSS = """
-#question input {
- font-size: 16px;
-}
-#url-textbox, #question-textbox {
- padding: 0 !important;
-}
-#short-upload-box .w-full {
- min-height: 10rem !important;
-}
-/* I think something like this can be used to re-shape
- * the table
- */
-/*
-.gr-samples-table tr {
- display: inline;
-}
-.gr-samples-table .p-2 {
- width: 100px;
-}
-*/
-#select-a-file {
- width: 100%;
-}
-#file-clear {
- padding-top: 2px !important;
- padding-bottom: 2px !important;
- padding-left: 8px !important;
- padding-right: 8px !important;
- margin-top: 10px;
-}
-.gradio-container .gr-button-primary {
- background: linear-gradient(180deg, #CDF9BE 0%, #AFF497 100%);
- border: 1px solid #B0DCCC;
- border-radius: 8px;
- color: #1B8700;
-}
-.gradio-container.dark button#submit-button {
- background: linear-gradient(180deg, #CDF9BE 0%, #AFF497 100%);
- border: 1px solid #B0DCCC;
- border-radius: 8px;
- color: #1B8700
-}
-
-table.gr-samples-table tr td {
- border: none;
- outline: none;
-}
-
-table.gr-samples-table tr td:first-of-type {
- width: 0%;
-}
-
-div#short-upload-box div.absolute {
- display: none !important;
-}
-
-gradio-app > div > div > div > div.w-full > div, .gradio-app > div > div > div > div.w-full > div {
- gap: 0px 2%;
-}
-
-gradio-app div div div div.w-full, .gradio-app div div div div.w-full {
- gap: 0px;
-}
-
-gradio-app h2, .gradio-app h2 {
- padding-top: 10px;
-}
-
-#answer {
- overflow-y: scroll;
- color: white;
- background: #666;
- border-color: #666;
- font-size: 20px;
- font-weight: bold;
-}
-
-#answer span {
- color: white;
-}
-
-#answer textarea {
- color:white;
- background: #777;
- border-color: #777;
- font-size: 18px;
-}
-
-#url-error input {
- color: red;
-}
-
-#results-table {
- max-height: 600px;
- overflow-y: scroll;
-}
-
-"""
-
-with gr.Blocks(css=CSS) as demo:
- gr.Markdown("# Invoice2Table")
-
- document = gr.Variable()
- fields = gr.Variable(value={**FIELDS})
- example_question = gr.Textbox(visible=False)
- example_image = gr.Image(visible=False)
-
- with gr.Row(equal_height=True):
- with gr.Column():
- with gr.Row():
- gr.Markdown("## Select an invoice", elem_id="select-a-file")
- img_clear_button = gr.Button(
- "Clear", variant="secondary", elem_id="file-clear", visible=False
- )
- image = gr.Gallery(visible=False)
- with gr.Row(equal_height=True):
- with gr.Column():
- with gr.Row():
- url = gr.Textbox(
- show_label=False,
- placeholder="URL",
- lines=1,
- max_lines=1,
- elem_id="url-textbox",
- )
- submit = gr.Button("Get")
- url_error = gr.Textbox(
- visible=False,
- elem_id="url-error",
- max_lines=1,
- interactive=False,
- label="Error",
- )
- gr.Markdown("— or —")
- upload = gr.File(label=None, interactive=True, elem_id="short-upload-box")
- gr.Examples(
- examples=EXAMPLES,
- inputs=[example_image, example_question],
- )
-
- with gr.Column() as col:
- gr.Markdown("## Results")
- with gr.Tabs():
- with gr.TabItem("Table"):
- output_table = gr.Dataframe(
- headers=["Field", "Value"],
- **empty_table(fields.value),
- elem_id="results-table"
- )
-
- with gr.TabItem("JSON"):
- output = gr.JSON(label="Output", visible=True)
-
- model = gr.Radio(
- choices=list(CHECKPOINTS.keys()),
- value=list(CHECKPOINTS.keys())[0],
- label="Model",
- visible=False,
- )
-
- gr.Markdown("### Ask a question")
- with gr.Row():
- question = gr.Textbox(
- label="Question",
- show_label=False,
- placeholder="e.g. What is the invoice number?",
- lines=1,
- max_lines=1,
- elem_id="question-textbox",
- )
- clear_button = gr.Button("Clear", variant="secondary", visible=False)
- submit_button = gr.Button(
- "Add", variant="primary", elem_id="submit-button"
- )
-
- for cb in [img_clear_button, clear_button]:
- cb.click(
- lambda _: (
- gr.update(visible=False, value=None), # image
- None, # document
- # {**FIELDS}, # fields
- gr.update(value=None), # output
- gr.update(**empty_table(fields.value)), # output_table
- gr.update(visible=False),
- None,
- None,
- None,
- gr.update(visible=False, value=None),
- None,
- ),
- inputs=clear_button,
- outputs=[
- image,
- document,
- # fields,
- output,
- output_table,
- img_clear_button,
- example_image,
- upload,
- url,
- url_error,
- question,
- ],
- )
-
- submit_outputs = [
- document,
- fields,
- image,
- img_clear_button,
- url_error,
- output,
- output_table,
- ]
-
- upload.change(
- fn=process_upload,
- inputs=[upload, fields, model],
- outputs=submit_outputs,
- )
-
- submit.click(
- fn=process_path,
- inputs=[url, fields, model],
- outputs=submit_outputs,
- )
-
- for action in [question.submit, submit_button.click]:
- action(
- fn=process_question,
- inputs=[question, document, image, model, fields, output, output_table],
- outputs=[question, image, fields, output, output_table],
- )
-
-# model.change(
-# process_question,
-# inputs=[question, document, model],
-# outputs=[image, output, output_table],
-# )
-
- example_image.change(
- fn=load_example_document,
- inputs=[example_image, example_question, fields, model],
- outputs=submit_outputs,
- )
-
-if __name__ == "__main__":
- demo.launch(enable_queue=False)
diff --git a/spaces/kingtest/BingAI/README.md b/spaces/kingtest/BingAI/README.md
deleted file mode 100644
index c11f57d3899eb68077fdb93bdf81056df23f2b75..0000000000000000000000000000000000000000
--- a/spaces/kingtest/BingAI/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: BingAI
-emoji: 🚀
-colorFrom: purple
-colorTo: red
-sdk: docker
-pinned: false
-license: mit
-app_port: 8080
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kkawamu1/huggingface_multi_inference_rank_eval/app/evaluation_scripts/__init__.py b/spaces/kkawamu1/huggingface_multi_inference_rank_eval/app/evaluation_scripts/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_iwslt_and_extract.sh b/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_iwslt_and_extract.sh
deleted file mode 100644
index ca3591b3db1715f136773d62e4b9b9ede97d436c..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_iwslt_and_extract.sh
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/bin/bash
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-#echo 'Cloning Moses github repository (for tokenization scripts)...'
-#git clone https://github.com/moses-smt/mosesdecoder.git
-
-if [ -z $WORKDIR_ROOT ] ;
-then
- echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..."
- exit
-fi
-
-
-
-data_root=${WORKDIR_ROOT}/iwsltv2
-DESTDIR=${WORKDIR_ROOT}/ML50/raw
-
-
-langs="ar_AR it_IT nl_XX ko_KR vi_VN"
-echo "data_root: $data_root"
-
-download_path=${data_root}/downloads
-raw=${DESTDIR}
-tmp=${data_root}/tmp
-orig=${data_root}/orig
-
-mkdir -p $download_path $orig $raw $tmp
-#######################
-download_iwslt(){
- iwslt_key=$1
- src=$2
- tgt=$3
- save_prefix=$4
- pushd ${download_path}
- if [[ ! -f ${save_prefix}$src-$tgt.tgz ]]; then
- wget https://wit3.fbk.eu/archive/${iwslt_key}/texts/$src/$tgt/$src-$tgt.tgz -O ${save_prefix}$src-$tgt.tgz
- [ $? -eq 0 ] && return 0
- fi
- popd
-}
-
-extract_iwslt(){
- src=$1
- tgt=$2
- prefix=$3
- pushd $orig
- tar zxvf ${download_path}/${prefix}$src-${tgt}.tgz
- popd
-}
-
-generate_train(){
- lsrc=$1
- ltgt=$2
- src=${lsrc:0:2}
- tgt=${ltgt:0:2}
- for ll in $lsrc $ltgt; do
- l=${ll:0:2}
- f="$orig/*/train.tags.$src-$tgt.$l"
- f_raw=$raw/train.$lsrc-$ltgt.$ll
- cat $f \
- | grep -v '' \
- | grep -v '' \
- | grep -v '' \
- | grep -v '' \
- | grep -v '' \
- | sed -e 's///g' \
- | sed -e 's/<\/title>//g' \
- | sed -e 's///g' \
- | sed -e 's/<\/description>//g' \
- | sed 's/^\s*//g' \
- | sed 's/\s*$//g' \
- > $f_raw
- [ $? -eq 0 ] && echo "extracted $f to $f_raw"
- done
- return 0
-}
-
-convert_valid_test(){
- src=$1
- tgt=$2
- for l in $src $tgt; do
- echo "lang: ${l}"
- for o in `ls $orig/*/IWSLT*.TED*.$src-$tgt.$l.xml`; do
- fname=${o##*/}
- f=$tmp/${fname%.*}
- echo "$o => $f"
- grep '\s*//g' \
- | sed -e 's/\s*<\/seg>\s*//g' \
- | sed -e "s/\’/\'/g" \
- > $f
- echo ""
- done
- done
-}
-
-generate_subset(){
- lsrc=$1
- ltgt=$2
- src=${lsrc:0:2}
- tgt=${ltgt:0:2}
- subset=$3
- prefix=$4
- for ll in $lsrc $ltgt; do
- l=${ll:0:2}
- f=$tmp/$prefix.${src}-${tgt}.$l
- if [[ -f $f ]]; then
- cp $f $raw/$subset.${lsrc}-$ltgt.${ll}
- fi
- done
-}
-#################
-
-echo "downloading iwslt training and dev data"
-# using multilingual for it, nl
-download_iwslt "2017-01-trnmted" DeEnItNlRo DeEnItNlRo
-download_iwslt "2017-01-trnted" ar en
-download_iwslt "2017-01-trnted" en ar
-download_iwslt "2017-01-trnted" ko en
-download_iwslt "2017-01-trnted" en ko
-download_iwslt "2015-01" vi en
-download_iwslt "2015-01" en vi
-
-echo "donwloading iwslt test data"
-download_iwslt "2017-01-mted-test" it en "test."
-download_iwslt "2017-01-mted-test" en it "test."
-download_iwslt "2017-01-mted-test" nl en "test."
-download_iwslt "2017-01-mted-test" en nl "test."
-
-download_iwslt "2017-01-ted-test" ar en "test."
-download_iwslt "2017-01-ted-test" en ar "test."
-download_iwslt "2017-01-ted-test" ko en "test."
-download_iwslt "2017-01-ted-test" en ko "test."
-download_iwslt "2015-01-test" vi en "test."
-download_iwslt "2015-01-test" en vi "test."
-
-echo "extract training data tar balls"
-extract_iwslt DeEnItNlRo DeEnItNlRo
-extract_iwslt ar en
-extract_iwslt en ar
-extract_iwslt ko en
-extract_iwslt en ko
-extract_iwslt vi en
-extract_iwslt en vi
-
-
-echo "extracting iwslt test data"
-for lang in $langs; do
- l=${lang:0:2}
- extract_iwslt $l en "test."
- extract_iwslt en $l "test."
-done
-
-echo "convert dev and test data"
-for lang in $langs; do
- s_lang=${lang:0:2}
- convert_valid_test $s_lang en
- convert_valid_test en $s_lang
-done
-
-
-
-echo "creating training data into $raw"
-for lang in $langs; do
- generate_train $lang en_XX
- generate_train en_XX $lang
-done
-
-echo "creating iwslt dev data into raw"
-generate_subset en_XX vi_VN valid "IWSLT15.TED.tst2013"
-generate_subset vi_VN en_XX valid "IWSLT15.TED.tst2013"
-
-generate_subset en_XX ar_AR valid "IWSLT17.TED.tst2016"
-generate_subset ar_AR en_XX valid "IWSLT17.TED.tst2016"
-generate_subset en_XX ko_KR valid "IWSLT17.TED.tst2016"
-generate_subset ko_KR en_XX valid "IWSLT17.TED.tst2016"
-
-
-generate_subset en_XX it_IT valid "IWSLT17.TED.tst2010"
-generate_subset it_IT en_XX valid "IWSLT17.TED.tst2010"
-generate_subset en_XX nl_XX valid "IWSLT17.TED.tst2010"
-generate_subset nl_XX en_XX valid "IWSLT17.TED.tst2010"
-
-echo "creating iswslt test data into raw"
-generate_subset en_XX vi_VN test "IWSLT15.TED.tst2015"
-generate_subset vi_VN en_XX test "IWSLT15.TED.tst2015"
-
-generate_subset en_XX ar_AR test "IWSLT17.TED.tst2017"
-generate_subset ar_AR en_XX test "IWSLT17.TED.tst2017"
-generate_subset en_XX ko_KR test "IWSLT17.TED.tst2017"
-generate_subset ko_KR en_XX test "IWSLT17.TED.tst2017"
-
-generate_subset en_XX it_IT test "IWSLT17.TED.tst2017.mltlng"
-generate_subset it_IT en_XX test "IWSLT17.TED.tst2017.mltlng"
-generate_subset en_XX nl_XX test "IWSLT17.TED.tst2017.mltlng"
-generate_subset nl_XX en_XX test "IWSLT17.TED.tst2017.mltlng"
-
-# normalze iwslt directions into x-en
-pushd $raw
-for lang in $langs; do
- for split in test valid; do
- x_en_f1=$split.$lang-en_XX.en_XX
- x_en_f2=$split.$lang-en_XX.${lang}
-
- en_x_f1=$split.en_XX-$lang.en_XX
- en_x_f2=$split.en_XX-$lang.${lang}
-
- if [ -f $en_x_f1 ] && [ ! -f $x_en_f1 ]; then
- echo "cp $en_x_f1 $x_en_f1"
- cp $en_x_f1 $x_en_f1
- fi
- if [ -f $x_en_f2 ] && [ ! -f $x_en_f2 ]; then
- echo "cp $en_x_f2 $x_en_f2"
- cp $en_x_f2 $x_en_f2
- fi
- done
-done
-popd
\ No newline at end of file
diff --git a/spaces/kohrisatou-infinity/KIP_01_beta/add_speaker.py b/spaces/kohrisatou-infinity/KIP_01_beta/add_speaker.py
deleted file mode 100644
index e224f07c892a5fe1837e3cbf1745e0d8992ea283..0000000000000000000000000000000000000000
--- a/spaces/kohrisatou-infinity/KIP_01_beta/add_speaker.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os
-import argparse
-from tqdm import tqdm
-from random import shuffle
-import json
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list")
- parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list")
- parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list")
- parser.add_argument("--source_dir", type=str, default="./dataset/32k", help="path to source dir")
- args = parser.parse_args()
-
- previous_config = json.load(open("configs/config.json", "rb"))
-
- train = []
- val = []
- test = []
- idx = 0
- spk_dict = previous_config["spk"]
- spk_id = max([i for i in spk_dict.values()]) + 1
- for speaker in tqdm(os.listdir(args.source_dir)):
- if speaker not in spk_dict.keys():
- spk_dict[speaker] = spk_id
- spk_id += 1
- wavs = [os.path.join(args.source_dir, speaker, i)for i in os.listdir(os.path.join(args.source_dir, speaker))]
- wavs = [i for i in wavs if i.endswith("wav")]
- shuffle(wavs)
- train += wavs[2:-10]
- val += wavs[:2]
- test += wavs[-10:]
-
- assert previous_config["model"]["n_speakers"] > len(spk_dict.keys())
- shuffle(train)
- shuffle(val)
- shuffle(test)
-
- print("Writing", args.train_list)
- with open(args.train_list, "w") as f:
- for fname in tqdm(train):
- wavpath = fname
- f.write(wavpath + "\n")
-
- print("Writing", args.val_list)
- with open(args.val_list, "w") as f:
- for fname in tqdm(val):
- wavpath = fname
- f.write(wavpath + "\n")
-
- print("Writing", args.test_list)
- with open(args.test_list, "w") as f:
- for fname in tqdm(test):
- wavpath = fname
- f.write(wavpath + "\n")
-
- previous_config["spk"] = spk_dict
-
- print("Writing configs/config.json")
- with open("configs/config.json", "w") as f:
- json.dump(previous_config, f, indent=2)
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/base_protocol.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/base_protocol.py
deleted file mode 100644
index 4c9f0a752e3aa833a17b7adf0c261d19a5f083fa..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/base_protocol.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import asyncio
-from typing import Optional, cast
-
-from .tcp_helpers import tcp_nodelay
-
-
-class BaseProtocol(asyncio.Protocol):
- __slots__ = (
- "_loop",
- "_paused",
- "_drain_waiter",
- "_connection_lost",
- "_reading_paused",
- "transport",
- )
-
- def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
- self._loop: asyncio.AbstractEventLoop = loop
- self._paused = False
- self._drain_waiter: Optional[asyncio.Future[None]] = None
- self._reading_paused = False
-
- self.transport: Optional[asyncio.Transport] = None
-
- @property
- def connected(self) -> bool:
- """Return True if the connection is open."""
- return self.transport is not None
-
- def pause_writing(self) -> None:
- assert not self._paused
- self._paused = True
-
- def resume_writing(self) -> None:
- assert self._paused
- self._paused = False
-
- waiter = self._drain_waiter
- if waiter is not None:
- self._drain_waiter = None
- if not waiter.done():
- waiter.set_result(None)
-
- def pause_reading(self) -> None:
- if not self._reading_paused and self.transport is not None:
- try:
- self.transport.pause_reading()
- except (AttributeError, NotImplementedError, RuntimeError):
- pass
- self._reading_paused = True
-
- def resume_reading(self) -> None:
- if self._reading_paused and self.transport is not None:
- try:
- self.transport.resume_reading()
- except (AttributeError, NotImplementedError, RuntimeError):
- pass
- self._reading_paused = False
-
- def connection_made(self, transport: asyncio.BaseTransport) -> None:
- tr = cast(asyncio.Transport, transport)
- tcp_nodelay(tr, True)
- self.transport = tr
-
- def connection_lost(self, exc: Optional[BaseException]) -> None:
- # Wake up the writer if currently paused.
- self.transport = None
- if not self._paused:
- return
- waiter = self._drain_waiter
- if waiter is None:
- return
- self._drain_waiter = None
- if waiter.done():
- return
- if exc is None:
- waiter.set_result(None)
- else:
- waiter.set_exception(exc)
-
- async def _drain_helper(self) -> None:
- if not self.connected:
- raise ConnectionResetError("Connection lost")
- if not self._paused:
- return
- waiter = self._drain_waiter
- if waiter is None:
- waiter = self._loop.create_future()
- self._drain_waiter = waiter
- await asyncio.shield(waiter)
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/gzip.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/gzip.py
deleted file mode 100644
index bbeb2cc7861a735d6cd5c0e29aeb6dbf8457023a..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fastapi/middleware/gzip.py
+++ /dev/null
@@ -1 +0,0 @@
-from starlette.middleware.gzip import GZipMiddleware as GZipMiddleware # noqa
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Adobe Dreamweaver CC 2018 Crack !FREE! Activation Key Free Download.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Adobe Dreamweaver CC 2018 Crack !FREE! Activation Key Free Download.md
deleted file mode 100644
index 38ee8d400086f485434a65f878c96cd0e6fc9ee6..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Adobe Dreamweaver CC 2018 Crack !FREE! Activation Key Free Download.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
Adobe Dreamweaver CC 2018 Crack Activation Key Free Download
-
-100% working!!!
-
-The list of dreamweaver cc 2018 activation key I bought below:
-
-Full video will be available for purchase on my ebay-store after one week or less, until then please enjoy this short preview...
-
-2015 dreamweaver cc
-
-Dreamweaver CS6 activation key
-
-Activation key for dreamweaver cc2015. - Screenshot
-
-How to activate dreamweaver cc 2015 free - Dreamweaver CC, Dreamweaver
-
-Dreamweaver CC 2019 Key free activation
-
-Activation key for dreamweaver cc 2019. This list of activation keys contains my own list of Dreamweaver Activation Keys. This video tells you about how to activate Dreamweaver CC 2019 and how to activate the key without downloading.
-
-You can use this activation key for Dreamweaver CC 2019 version 22 or higher.
-
-If you find a better working activation key, feel free to contact me.
-
-Dreamweaver is a powerful web design software, which is developed by Adobe. It is completely free to use.
-
-Dreamweaver CC 2019 has a good set of tools which offers users a powerful web designing software. It supports a wide range of web browsers and devices. With Dreamweaver, you can create attractive, professional websites. It provides different features, such as grid-based design, ecommerce, and image editing websites. You can also publish the sites to different hosting services and download the final web pages from any web browser.
-
-This software enables users to design attractive websites that can be published to the web. The application is easy to use, and even beginners will be able to create professional websites.
-
-Features Of Dreamweaver CC:
-
-User-friendly and easy to understand interface
-
-Powerful grids for designing websites
-
-Support for responsive design, e-commerce, and HTML5
-
-Support for various browsers
-
-Advanced features for designing websites
-
-Design and create websites for different devices
-
-Transparent
-
-Create attractive websites for smartphones
-
-Create e-commerce websites
-
-Create beautiful websites for iPhone and Android devices
-
-Compatibility with all web browsers
-
-Requirements:
-
-Windows OS
-
-1 GB RAM
-
-100 MB free space
-
-How to Activate Dreamweaver CC 2019
-
-First of all, we will download Dreamweaver CC 2019 from the official website. After this, we will click on the downloaded file. Then we will open the archive folder of the downloaded software. We will find a file named as 4fefd39f24
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Arax Disk Doctor License Key.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Arax Disk Doctor License Key.md
deleted file mode 100644
index 457e3384d716a4cb26c0d72e121ef105eee47b26..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Arax Disk Doctor License Key.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-arax disk doctor data recovery download arax disk doctor data recovery license key arax disk doctor data ... download arax disk doctor in Russian + crack arax disk doctor data recovery download arax disk doctor data recovery license key arax disk doctor data recovery arax disk doctor data recovery license key
-arax disk doctor data recovery download arax disk doctor data recovery license key arax disk doctor data recovery license key
-arax disk doctor download
-Arax Disk Doctor 8a78ff9644
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Babo Aavyo Courier Ma Gujarati Natak Torrent [HOT].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Babo Aavyo Courier Ma Gujarati Natak Torrent [HOT].md
deleted file mode 100644
index 45dbd025c890cfceb687250fbd88fa90518fa079..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Babo Aavyo Courier Ma Gujarati Natak Torrent [HOT].md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-â–» See manufacturer-specific OBD2 error codes for the following brands: Renault, Peugeot, Citroen, Opel, BMW, Ford, Audi, Volkswagen, Skoda, Fiat, Alfa Romeo, . datsun, Lada, Nissan, Hyundai, KIA, Mercedes, Mitsubishi, Toyota, etc. * Please refer to the links below for more details:
-â–» Renault and Peugeot - Peugeot, RENAULT, Renault, RENAULT, Renault, RENAULT, Renault, Renault, RENAULT, RENAULT, Renault, Renault, Renault, RENAULT, RENAULT, Renault, RENAULT, RENAULT, Renault, RENAULT, Renault, Renault, RENAULT , RENAULT, RENAULT, RENAULT, RENAULT, RENAULT, RENAULT, RENAULT, RENAULT.
-* If you are not sure, please read the Installation Guide. 8a78ff9644
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Grand-Theft-Auto-V--GTA-5-(v1-0-331-1 -_TOP_ Crack-V2-3-MULTI11)-[FitGirl-Ultra-Repack].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Grand-Theft-Auto-V--GTA-5-(v1-0-331-1 -_TOP_ Crack-V2-3-MULTI11)-[FitGirl-Ultra-Repack].md
deleted file mode 100644
index 67a44ba8f43d6292c873a8a6a325892ee38332c5..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Grand-Theft-Auto-V--GTA-5-(v1-0-331-1 -_TOP_ Crack-V2-3-MULTI11)-[FitGirl-Ultra-Repack].md
+++ /dev/null
@@ -1,60 +0,0 @@
-
-
Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack] - A Complete Guide
-
-
If you are looking for a way to download and play one of the most popular and acclaimed open world games ever made, you have come to the right place. In this article, we will show you how to get Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack], a highly compressed and optimized version of the game that saves you a lot of disk space and bandwidth.
Grand Theft Auto V / GTA 5 is an action-adventure game developed by Rockstar North and published by Rockstar Games in 2013. It is the fifth main installment in the Grand Theft Auto series, and the first one to feature three playable protagonists: Michael, a retired bank robber; Franklin, a street hustler; and Trevor, a psychopathic criminal.
-
-
The game is set in the fictional state of San Andreas, which is based on Southern California, and follows the trio as they perform various heists and missions across the city of Los Santos and its surrounding areas. The game features a vast and diverse open world that can be explored on foot or by using various vehicles, such as cars, bikes, planes, helicopters, boats, and more.
-
-
The game also features an online multiplayer mode called Grand Theft Auto Online, which allows up to 30 players to cooperate or compete in various activities and events. The online mode also has its own storyline and progression system, as well as customizable characters, vehicles, weapons, properties, and more.
-
-
What is Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack]?
-
-
Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack] is a modified version of the game that has been compressed and optimized by FitGirl, a well-known repacker of PC games. The repack reduces the original size of the game from 105 GB to 46.6 GB, without sacrificing any quality or content.
-
-
The repack also includes the latest update (v1.0.2545/1.58) and crack (Mr.Goldberg SCE) for the game, which means you don't need to install any additional patches or use any online services to play the game offline. The repack also supports multiple languages (RUS/ENG/MULTI13) and allows you to change the game GUI language using REG-files in the "_Language Switcher" folder.
-
-
-
The repack is safe for modding, as it does not remove or alter any multiplayer or DLC files. However, you should always run the game from desktop icon and with admin rights to avoid any issues.
-
-
How to Download and Install Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack]?
-
-
To download and install Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack], you need to follow these steps:
-
-
-
Download the repack from one of the mirrors provided by FitGirl on her website or torrent trackers.
-
Extract the repack using WinRAR or 7-Zip to a folder of your choice.
-
Run setup.exe and follow the instructions to install the game.
-
Wait for the installation to finish and check the integrity of the installed files using QuickSFV.exe in the game folder.
-
Enjoy playing Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack]!
-
-
-
Conclusion
-
-
Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack] is a great way to experience one of the best games ever made on your PC without wasting too much disk space or bandwidth. The repack offers you a high-quality and fully functional version of the game that can be played offline or modded as you wish.
-
-
If you have any questions or problems regarding Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack], feel free to leave a comment below or visit FitGirl's website for more information and support.
-
What are the Features and Benefits of Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack]?
-
-
Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack] offers you a lot of features and benefits that make it worth downloading and playing. Here are some of them:
-
-
-
You can enjoy a stunning and immersive open world that features a variety of locations, activities, characters, and events. You can explore the city of Los Santos and its surrounding areas, from the urban streets to the rural hills, from the beaches to the mountains, from the deserts to the forests. You can also interact with hundreds of NPCs, animals, and vehicles that populate the world.
-
You can experience a thrilling and engaging storyline that follows the lives and adventures of three different protagonists: Michael, Franklin, and Trevor. You can switch between them at any time and see the world from their perspectives. You can also choose how to approach each mission and heist, whether you want to go loud or stealthy, violent or peaceful, solo or with a team.
-
You can customize your characters, vehicles, weapons, properties, and more to suit your preferences and style. You can change your appearance, clothes, tattoos, haircuts, accessories, and more. You can also upgrade your vehicles, weapons, armor, gadgets, and more. You can also buy or sell various properties, such as houses, garages, businesses, clubs, and more.
-
You can access a variety of modes and features that enhance your gameplay and enjoyment. You can play Grand Theft Auto Online, which is a separate online multiplayer mode that allows you to create your own character and join up to 30 other players in various activities and events. You can also play in First Person Mode, which gives you a new perspective on the game world. You can also use the Rockstar Editor, which is a powerful video editing tool that lets you record, edit, and share your gameplay footage.
-
You can save a lot of disk space and bandwidth by downloading Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack], which is a highly compressed and optimized version of the game that does not compromise on quality or content. The repack reduces the original size of the game from 105 GB to 46.6 GB, without sacrificing any quality or content.
-
-
-
These are just some of the features and benefits of Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack]. There are many more that you can discover and enjoy by downloading and playing the game yourself.
-
Conclusion
-
-
Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack] is a must-have for any fan of action-adventure games and open world games. It offers you a rich and immersive game world that features a variety of locations, activities, characters, and events. It also offers you a captivating and dynamic storyline that follows the lives and adventures of three different protagonists: Michael, Franklin, and Trevor. It also offers you a lot of customization options and modes that enhance your gameplay and enjoyment.
-
-
If you want to download and play Grand Theft Auto V / GTA 5 (v1.0.331.1 -CRACK-V2-3-MULTI11) [FitGirl Ultra Repack], you can follow the steps we have provided in this article. You can also visit FitGirl's website for more information and support. We hope you have found this article helpful and informative. Thank you for reading and happy gaming!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/nsf_hifigan/env.py b/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/nsf_hifigan/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/nsf_hifigan/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
-
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/ludusc/latent-space-theories/backend/maximally_activating_patches.py b/spaces/ludusc/latent-space-theories/backend/maximally_activating_patches.py
deleted file mode 100644
index cc22098e77ee607316dfebc5ad1c566f36cdfe21..0000000000000000000000000000000000000000
--- a/spaces/ludusc/latent-space-theories/backend/maximally_activating_patches.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import pickle
-import streamlit as st
-
-from backend.load_file import load_json
-
-
-@st.cache(allow_output_mutation=True)
-# st.cache_data
-def load_activation(filename):
- activation = load_json(filename)
- return activation
-
-@st.cache(allow_output_mutation=True)
-# @st.cache_data
-def load_dataset(data_index):
- with open(f'./data/preprocessed_image_net/val_data_{data_index}.pkl', 'rb') as file:
- dataset = pickle.load(file)
- return dataset
-
-def load_layer_infos(filename):
- layer_infos = load_json(filename)
- return layer_infos
-
-def get_receptive_field_coordinates(layer_infos, layer_name, idx_x, idx_y):
- """
- layer_name: as in layer_infos keys (eg: 'encoder.stages[0].layers[0]')
- idx_x: integer coordinate of width axis in feature maps. must < n
- idx_y: integer coordinate of height axis in feature maps. must < n
- """
- layer_name = layer_name.replace('.dwconv', '').replace('.layernorm', '')
- layer_name = layer_name.replace('.pwconv1', '').replace('.pwconv2', '').replace('.drop_path', '')
- n = layer_infos[layer_name]['n']
- j = layer_infos[layer_name]['j']
- r = layer_infos[layer_name]['r']
- start = layer_infos[layer_name]['start']
- assert idx_x < n, f'n={n}'
- assert idx_y < n, f'n={n}'
-
- # image tensor (N, H, W, C) or (N, C, H, W) => image_patch=image[y1:y2, x1:x2]
- center = (start + idx_x*j, start + idx_y*j)
- x1, x2 = (max(center[0]-r/2, 0), max(center[0]+r/2, 0))
- y1, y2 = (max(center[1]-r/2, 0), max(center[1]+r/2, 0))
- x1, x2, y1, y2 = int(x1), int(x2), int(y1), int(y2)
-
- return x1, x2, y1, y2
diff --git a/spaces/ludvigolsen/plot_confusion_matrix/templates.py b/spaces/ludvigolsen/plot_confusion_matrix/templates.py
deleted file mode 100644
index de7048c347dfb1438b78ffcb23d7fd555f75d3fd..0000000000000000000000000000000000000000
--- a/spaces/ludvigolsen/plot_confusion_matrix/templates.py
+++ /dev/null
@@ -1,132 +0,0 @@
-def get_templates():
- temps = Templates()
-
- # Blues
- temps.add(
- name="Blues 2-Class",
- num_classes=2,
- sums=False,
- settings_path="design_settings.blues_nc2_1.1.json",
- image_path="blues_nc2_1.1.png",
- collection="Blues 1",
- )
- temps.add(
- name="Blues 3-Class",
- num_classes=3,
- sums=False,
- settings_path="design_settings.blues_nc3_1.1.json",
- image_path="blues_nc3_1.1.png",
- collection="Blues 1",
- )
- temps.add(
- name="Blues 2-Class w/ Sums",
- num_classes=2,
- sums=True,
- settings_path="design_settings.blues_nc2_sums_1.1.json",
- image_path="blues_nc2_sums_1.1.png",
- collection="Blues 1",
- )
- temps.add(
- name="Blues 3-Class w/ Sums",
- num_classes=3,
- sums=True,
- settings_path="design_settings.blues_nc3_sums_1.1.json",
- image_path="blues_nc3_sums_1.1.png",
- collection="Blues 1",
- )
-
- # Greys
- temps.add(
- name="Greys 2-Class",
- num_classes=2,
- sums=False,
- settings_path="design_settings.greys_nc2_1.1.json",
- image_path="greys_nc2_1.1.png",
- collection="Greys 1",
- )
- temps.add(
- name="Greys 3-Class",
- num_classes=3,
- sums=False,
- settings_path="design_settings.greys_nc3_1.1.json",
- image_path="greys_nc3_1.1.png",
- collection="Greys 1",
- )
- temps.add(
- name="Greys 2-Class w/ Sums",
- num_classes=2,
- sums=True,
- settings_path="design_settings.greys_nc2_sums_1.1.json",
- image_path="greys_nc2_sums_1.1.png",
- collection="Greys 1",
- )
- temps.add(
- name="Greys 3-Class w/ Sums",
- num_classes=3,
- sums=True,
- settings_path="design_settings.greys_nc3_sums_1.1.json",
- image_path="greys_nc3_sums_1.1.png",
- collection="Greys 1",
- )
-
- # Turquoise
- # Greys
- temps.add(
- name="Turquoises 2-Class",
- num_classes=2,
- sums=False,
- settings_path="design_settings.turquoise_nc2_1.1.json",
- image_path="turquoise_nc2_1.1.png",
- collection="Turquoises 1",
- )
- temps.add(
- name="Turquoises 3-Class",
- num_classes=3,
- sums=False,
- settings_path="design_settings.turquoise_nc3_1.1.json",
- image_path="turquoise_nc3_1.1.png",
- collection="Turquoises 1",
- )
- temps.add(
- name="Turquoises 2-Class w/ Sums",
- num_classes=2,
- sums=True,
- settings_path="design_settings.turquoise_nc2_sums_1.1.json",
- image_path="turquoise_nc2_sums_1.1.png",
- collection="Turquoises 1",
- )
- temps.add(
- name="Turquoises 3-Class w/ Sums",
- num_classes=3,
- sums=True,
- settings_path="design_settings.turquoise_nc3_sums_1.1.json",
- image_path="turquoise_nc3_sums_1.1.png",
- collection="Turquoises 1",
- )
-
- return temps.get_templates()
-
-
-class Templates:
- def __init__(self) -> None:
- self.templates = {}
-
- def get_templates(self) -> dict:
- return self.templates
-
- def add(
- self,
- name: str,
- num_classes: int,
- sums: bool,
- settings_path: str,
- image_path: str,
- collection: str,
- ) -> None:
- self.templates[name] = {
- "collection": collection,
- "num_classes": num_classes,
- "sums": sums,
- "settings": settings_path,
- "image": image_path,
- }
diff --git a/spaces/luxuedong/lxd/src/pages/api/create.ts b/spaces/luxuedong/lxd/src/pages/api/create.ts
deleted file mode 100644
index e44581b1865576e73a32bc819534617d2575c8c9..0000000000000000000000000000000000000000
--- a/spaces/luxuedong/lxd/src/pages/api/create.ts
+++ /dev/null
@@ -1,47 +0,0 @@
-'use server'
-
-import { NextApiRequest, NextApiResponse } from 'next'
-import { fetch, debug } from '@/lib/isomorphic'
-import { createHeaders, randomIP } from '@/lib/utils'
-import { sleep } from '@/lib/bots/bing/utils'
-
-const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create'
-// const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create';
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- try {
- let count = 0
- const headers = createHeaders(req.cookies)
- do {
- headers['x-forwarded-for'] = headers['x-forwarded-for'] || randomIP()
- debug(`try ${count+1}`, headers['x-forwarded-for'])
- const response = await fetch(API_ENDPOINT, { method: 'GET', headers })
- if (response.status === 200) {
- res.setHeader('set-cookie', [headers.cookie, `BING_IP=${headers['x-forwarded-for']}`]
- .map(cookie => `${cookie}; Max-Age=${86400 * 30}; Path=/; SameSite=None; Secure`))
- debug('headers', headers)
- res.writeHead(200, {
- 'Content-Type': 'application/json',
- })
- res.end(await response.text())
- return
- }
- await sleep(2000)
- headers['x-forwarded-for'] = ''
- } while(count++ < 10)
- res.end(JSON.stringify({
- result: {
- value: 'TryLater',
- message: `Please try again after a while`
- }
- }))
- } catch (e) {
- console.log('error', e)
- return res.end(JSON.stringify({
- result: {
- value: 'UnauthorizedRequest',
- message: `${e}`
- }
- }))
- }
-}
diff --git a/spaces/magicr/BuboGPT/bubogpt/processors/blip_processors.py b/spaces/magicr/BuboGPT/bubogpt/processors/blip_processors.py
deleted file mode 100644
index 0c4ac341b4e31e02aefa841678bf857bf1cdf990..0000000000000000000000000000000000000000
--- a/spaces/magicr/BuboGPT/bubogpt/processors/blip_processors.py
+++ /dev/null
@@ -1,141 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import re
-
-from bubogpt.common.registry import registry
-from bubogpt.processors.base_processor import BaseProcessor
-from bubogpt.processors.vision_augment import RandomAugment
-from omegaconf import OmegaConf
-from torchvision import transforms
-from torchvision.transforms.functional import InterpolationMode
-
-
-class BlipImageBaseProcessor(BaseProcessor):
- def __init__(self, mean=None, std=None):
- if mean is None:
- mean = (0.48145466, 0.4578275, 0.40821073)
- if std is None:
- std = (0.26862954, 0.26130258, 0.27577711)
-
- self.normalize = transforms.Normalize(mean, std)
-
-
-@registry.register_processor("blip_caption")
-class BlipCaptionProcessor(BaseProcessor):
- def __init__(self, prompt="", max_words=50):
- self.prompt = prompt
- self.max_words = max_words
-
- def __call__(self, caption):
- caption = self.prompt + self.pre_caption(caption)
-
- return caption
-
- @classmethod
- def from_config(cls, cfg=None):
- if cfg is None:
- cfg = OmegaConf.create()
-
- prompt = cfg.get("prompt", "")
- max_words = cfg.get("max_words", 50)
-
- return cls(prompt=prompt, max_words=max_words)
-
- def pre_caption(self, caption):
- caption = re.sub(
- r"([.!\"()*#:;~])",
- " ",
- caption.lower(),
- )
- caption = re.sub(
- r"\s{2,}",
- " ",
- caption,
- )
- caption = caption.rstrip("\n")
- caption = caption.strip(" ")
-
- # truncate caption
- caption_words = caption.split(" ")
- if len(caption_words) > self.max_words:
- caption = " ".join(caption_words[: self.max_words])
-
- return caption
-
-
-@registry.register_processor("blip2_image_train")
-class Blip2ImageTrainProcessor(BlipImageBaseProcessor):
- def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0):
- super().__init__(mean=mean, std=std)
-
- self.transform = transforms.Compose(
- [
- transforms.RandomResizedCrop(
- image_size,
- scale=(min_scale, max_scale),
- interpolation=InterpolationMode.BICUBIC,
- ),
- transforms.ToTensor(),
- self.normalize,
- ]
- )
-
- def __call__(self, item):
- return self.transform(item)
-
- @classmethod
- def from_config(cls, cfg=None):
- if cfg is None:
- cfg = OmegaConf.create()
-
- image_size = cfg.get("image_size", 224)
-
- mean = cfg.get("mean", None)
- std = cfg.get("std", None)
-
- min_scale = cfg.get("min_scale", 0.5)
- max_scale = cfg.get("max_scale", 1.0)
-
- return cls(
- image_size=image_size,
- mean=mean,
- std=std,
- min_scale=min_scale,
- max_scale=max_scale,
- )
-
-
-@registry.register_processor("blip2_image_eval")
-class Blip2ImageEvalProcessor(BlipImageBaseProcessor):
- def __init__(self, image_size=224, mean=None, std=None):
- super().__init__(mean=mean, std=std)
-
- self.transform = transforms.Compose(
- [
- transforms.Resize(
- (image_size, image_size), interpolation=InterpolationMode.BICUBIC
- ),
- transforms.ToTensor(),
- self.normalize,
- ]
- )
-
- def __call__(self, item):
- return self.transform(item)
-
- @classmethod
- def from_config(cls, cfg=None):
- if cfg is None:
- cfg = OmegaConf.create()
-
- image_size = cfg.get("image_size", 224)
-
- mean = cfg.get("mean", None)
- std = cfg.get("std", None)
-
- return cls(image_size=image_size, mean=mean, std=std)
\ No newline at end of file
diff --git a/spaces/maminghui/ChatGPT/chatgpt - windows.bat b/spaces/maminghui/ChatGPT/chatgpt - windows.bat
deleted file mode 100644
index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000
--- a/spaces/maminghui/ChatGPT/chatgpt - windows.bat
+++ /dev/null
@@ -1,14 +0,0 @@
-@echo off
-echo Opening ChuanhuChatGPT...
-
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
-
-REM The web page can be accessed with delayed start http://127.0.0.1:7860/
-ping -n 5 127.0.0.1>nul
-
-REM access chargpt via your default browser
-start "" "http://127.0.0.1:7860/"
-
-
-echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/).
\ No newline at end of file
diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/detection_models/networks.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/detection_models/networks.py
deleted file mode 100644
index d88bc5d5694db47220ccf70e97690de3224c2c60..0000000000000000000000000000000000000000
--- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/detection_models/networks.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from detection_models.sync_batchnorm import DataParallelWithCallback
-from detection_models.antialiasing import Downsample
-
-
-class UNet(nn.Module):
- def __init__(
- self,
- in_channels=3,
- out_channels=3,
- depth=5,
- conv_num=2,
- wf=6,
- padding=True,
- batch_norm=True,
- up_mode="upsample",
- with_tanh=False,
- sync_bn=True,
- antialiasing=True,
- ):
- """
- Implementation of
- U-Net: Convolutional Networks for Biomedical Image Segmentation
- (Ronneberger et al., 2015)
- https://arxiv.org/abs/1505.04597
- Using the default arguments will yield the exact version used
- in the original paper
- Args:
- in_channels (int): number of input channels
- out_channels (int): number of output channels
- depth (int): depth of the network
- wf (int): number of filters in the first layer is 2**wf
- padding (bool): if True, apply padding such that the input shape
- is the same as the output.
- This may introduce artifacts
- batch_norm (bool): Use BatchNorm after layers with an
- activation function
- up_mode (str): one of 'upconv' or 'upsample'.
- 'upconv' will use transposed convolutions for
- learned upsampling.
- 'upsample' will use bilinear upsampling.
- """
- super().__init__()
- assert up_mode in ("upconv", "upsample")
- self.padding = padding
- self.depth = depth - 1
- prev_channels = in_channels
-
- self.first = nn.Sequential(
- *[nn.ReflectionPad2d(3), nn.Conv2d(in_channels, 2 ** wf, kernel_size=7), nn.LeakyReLU(0.2, True)]
- )
- prev_channels = 2 ** wf
-
- self.down_path = nn.ModuleList()
- self.down_sample = nn.ModuleList()
- for i in range(depth):
- if antialiasing and depth > 0:
- self.down_sample.append(
- nn.Sequential(
- *[
- nn.ReflectionPad2d(1),
- nn.Conv2d(prev_channels, prev_channels, kernel_size=3, stride=1, padding=0),
- nn.BatchNorm2d(prev_channels),
- nn.LeakyReLU(0.2, True),
- Downsample(channels=prev_channels, stride=2),
- ]
- )
- )
- else:
- self.down_sample.append(
- nn.Sequential(
- *[
- nn.ReflectionPad2d(1),
- nn.Conv2d(prev_channels, prev_channels, kernel_size=4, stride=2, padding=0),
- nn.BatchNorm2d(prev_channels),
- nn.LeakyReLU(0.2, True),
- ]
- )
- )
- self.down_path.append(
- UNetConvBlock(conv_num, prev_channels, 2 ** (wf + i + 1), padding, batch_norm)
- )
- prev_channels = 2 ** (wf + i + 1)
-
- self.up_path = nn.ModuleList()
- for i in reversed(range(depth)):
- self.up_path.append(
- UNetUpBlock(conv_num, prev_channels, 2 ** (wf + i), up_mode, padding, batch_norm)
- )
- prev_channels = 2 ** (wf + i)
-
- if with_tanh:
- self.last = nn.Sequential(
- *[nn.ReflectionPad2d(1), nn.Conv2d(prev_channels, out_channels, kernel_size=3), nn.Tanh()]
- )
- else:
- self.last = nn.Sequential(
- *[nn.ReflectionPad2d(1), nn.Conv2d(prev_channels, out_channels, kernel_size=3)]
- )
-
- if sync_bn:
- self = DataParallelWithCallback(self)
-
- def forward(self, x):
- x = self.first(x)
-
- blocks = []
- for i, down_block in enumerate(self.down_path):
- blocks.append(x)
- x = self.down_sample[i](x)
- x = down_block(x)
-
- for i, up in enumerate(self.up_path):
- x = up(x, blocks[-i - 1])
-
- return self.last(x)
-
-
-class UNetConvBlock(nn.Module):
- def __init__(self, conv_num, in_size, out_size, padding, batch_norm):
- super(UNetConvBlock, self).__init__()
- block = []
-
- for _ in range(conv_num):
- block.append(nn.ReflectionPad2d(padding=int(padding)))
- block.append(nn.Conv2d(in_size, out_size, kernel_size=3, padding=0))
- if batch_norm:
- block.append(nn.BatchNorm2d(out_size))
- block.append(nn.LeakyReLU(0.2, True))
- in_size = out_size
-
- self.block = nn.Sequential(*block)
-
- def forward(self, x):
- out = self.block(x)
- return out
-
-
-class UNetUpBlock(nn.Module):
- def __init__(self, conv_num, in_size, out_size, up_mode, padding, batch_norm):
- super(UNetUpBlock, self).__init__()
- if up_mode == "upconv":
- self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
- elif up_mode == "upsample":
- self.up = nn.Sequential(
- nn.Upsample(mode="bilinear", scale_factor=2, align_corners=False),
- nn.ReflectionPad2d(1),
- nn.Conv2d(in_size, out_size, kernel_size=3, padding=0),
- )
-
- self.conv_block = UNetConvBlock(conv_num, in_size, out_size, padding, batch_norm)
-
- def center_crop(self, layer, target_size):
- _, _, layer_height, layer_width = layer.size()
- diff_y = (layer_height - target_size[0]) // 2
- diff_x = (layer_width - target_size[1]) // 2
- return layer[:, :, diff_y : (diff_y + target_size[0]), diff_x : (diff_x + target_size[1])]
-
- def forward(self, x, bridge):
- up = self.up(x)
- crop1 = self.center_crop(bridge, up.shape[2:])
- out = torch.cat([up, crop1], 1)
- out = self.conv_block(out)
-
- return out
-
-
-class UnetGenerator(nn.Module):
- """Create a Unet-based generator"""
-
- def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_type="BN", use_dropout=False):
- """Construct a Unet generator
- Parameters:
- input_nc (int) -- the number of channels in input images
- output_nc (int) -- the number of channels in output images
- num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
- image of size 128x128 will become of size 1x1 # at the bottleneck
- ngf (int) -- the number of filters in the last conv layer
- norm_layer -- normalization layer
- We construct the U-Net from the innermost layer to the outermost layer.
- It is a recursive process.
- """
- super().__init__()
- if norm_type == "BN":
- norm_layer = nn.BatchNorm2d
- elif norm_type == "IN":
- norm_layer = nn.InstanceNorm2d
- else:
- raise NameError("Unknown norm layer")
-
- # construct unet structure
- unet_block = UnetSkipConnectionBlock(
- ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True
- ) # add the innermost layer
- for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
- unet_block = UnetSkipConnectionBlock(
- ngf * 8,
- ngf * 8,
- input_nc=None,
- submodule=unet_block,
- norm_layer=norm_layer,
- use_dropout=use_dropout,
- )
- # gradually reduce the number of filters from ngf * 8 to ngf
- unet_block = UnetSkipConnectionBlock(
- ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer
- )
- unet_block = UnetSkipConnectionBlock(
- ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer
- )
- unet_block = UnetSkipConnectionBlock(
- ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer
- )
- self.model = UnetSkipConnectionBlock(
- output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer
- ) # add the outermost layer
-
- def forward(self, input):
- return self.model(input)
-
-
-class UnetSkipConnectionBlock(nn.Module):
- """Defines the Unet submodule with skip connection.
-
- -------------------identity----------------------
- |-- downsampling -- |submodule| -- upsampling --|
- """
-
- def __init__(
- self,
- outer_nc,
- inner_nc,
- input_nc=None,
- submodule=None,
- outermost=False,
- innermost=False,
- norm_layer=nn.BatchNorm2d,
- use_dropout=False,
- ):
- """Construct a Unet submodule with skip connections.
- Parameters:
- outer_nc (int) -- the number of filters in the outer conv layer
- inner_nc (int) -- the number of filters in the inner conv layer
- input_nc (int) -- the number of channels in input images/features
- submodule (UnetSkipConnectionBlock) -- previously defined submodules
- outermost (bool) -- if this module is the outermost module
- innermost (bool) -- if this module is the innermost module
- norm_layer -- normalization layer
- user_dropout (bool) -- if use dropout layers.
- """
- super().__init__()
- self.outermost = outermost
- use_bias = norm_layer == nn.InstanceNorm2d
- if input_nc is None:
- input_nc = outer_nc
- downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
- downrelu = nn.LeakyReLU(0.2, True)
- downnorm = norm_layer(inner_nc)
- uprelu = nn.LeakyReLU(0.2, True)
- upnorm = norm_layer(outer_nc)
-
- if outermost:
- upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
- down = [downconv]
- up = [uprelu, upconv, nn.Tanh()]
- model = down + [submodule] + up
- elif innermost:
- upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
- down = [downrelu, downconv]
- up = [uprelu, upconv, upnorm]
- model = down + up
- else:
- upconv = nn.ConvTranspose2d(
- inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias
- )
- down = [downrelu, downconv, downnorm]
- up = [uprelu, upconv, upnorm]
-
- if use_dropout:
- model = down + [submodule] + up + [nn.Dropout(0.5)]
- else:
- model = down + [submodule] + up
-
- self.model = nn.Sequential(*model)
-
- def forward(self, x):
- if self.outermost:
- return self.model(x)
- else: # add skip connections
- return torch.cat([x, self.model(x)], 1)
-
-
-# ============================================
-# Network testing
-# ============================================
-if __name__ == "__main__":
- from torchsummary import summary
-
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
- model = UNet_two_decoders(
- in_channels=3,
- out_channels1=3,
- out_channels2=1,
- depth=4,
- conv_num=1,
- wf=6,
- padding=True,
- batch_norm=True,
- up_mode="upsample",
- with_tanh=False,
- )
- model.to(device)
-
- model_pix2pix = UnetGenerator(3, 3, 5, ngf=64, norm_type="BN", use_dropout=False)
- model_pix2pix.to(device)
-
- print("customized unet:")
- summary(model, (3, 256, 256))
-
- print("cyclegan unet:")
- summary(model_pix2pix, (3, 256, 256))
-
- x = torch.zeros(1, 3, 256, 256).requires_grad_(True).cuda()
- g = make_dot(model(x))
- g.render("models/Digraph.gv", view=False)
-
diff --git a/spaces/martingrados/gradio-google-sheet/app.py b/spaces/martingrados/gradio-google-sheet/app.py
deleted file mode 100644
index de9ea54cbc711c94df53e8eadb566e5da3410d6a..0000000000000000000000000000000000000000
--- a/spaces/martingrados/gradio-google-sheet/app.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pandas as pd
-import os
-
-URL = "https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0"
-csv_url = URL.replace('/edit#gid=', '/export?format=csv&gid=')
-
-SECRET_TEST = os.environ["SECRET_TEST"]
-
-def get_data():
- return pd.read_csv(csv_url)
-
-import gradio as gr
-
-with gr.Blocks() as demo:
- gr.Markdown("# 📈 " + SECRET_TEST + "Real-Time Line Plot")
- with gr.Row():
- with gr.Column():
- gr.DataFrame(get_data, every=5)
- with gr.Column():
- gr.LinePlot(get_data, every=5, x="Date", y="Sales", y_title="Sales ($ millions)", overlay_point=True, width=500, height=500)
-
-demo.queue().launch() # Run the demo with queuing enabled
\ No newline at end of file
diff --git a/spaces/megaaziib/hololive-rvc-models-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/megaaziib/hololive-rvc-models-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py
deleted file mode 100644
index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000
--- a/spaces/megaaziib/hololive-rvc-models-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py
+++ /dev/null
@@ -1,16 +0,0 @@
-class F0Predictor(object):
- def compute_f0(self, wav, p_len):
- """
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length]
- """
- pass
-
- def compute_f0_uv(self, wav, p_len):
- """
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
- """
- pass
diff --git a/spaces/mengdeweide/VITS/app.py b/spaces/mengdeweide/VITS/app.py
deleted file mode 100644
index 8e81d614f80624d2633e39dc83430e277330906e..0000000000000000000000000000000000000000
--- a/spaces/mengdeweide/VITS/app.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# coding=utf-8
-import time
-import os
-import gradio as gr
-import utils
-import argparse
-import commons
-from models import SynthesizerTrn
-from text import text_to_sequence
-import torch
-from torch import no_grad, LongTensor
-import webbrowser
-import logging
-import gradio.processing_utils as gr_processing_utils
-logging.getLogger('numba').setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
-
-audio_postprocess_ori = gr.Audio.postprocess
-def audio_postprocess(self, y):
- data = audio_postprocess_ori(self, y)
- if data is None:
- return None
- return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
-gr.Audio.postprocess = audio_postprocess
-
-def get_text(text, hps):
- text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
- if hps.data.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = LongTensor(text_norm)
- return text_norm, clean_text
-
-def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale):
- start = time.perf_counter()
- if not len(text):
- return "输入文本不能为空!", None, None
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
- if len(text) > 100 and limitation:
- return f"输入文字过长!{len(text)}>100", None, None
- if language == 0:
- text = f"[ZH]{text}[ZH]"
- elif language == 1:
- text = f"[JA]{text}[JA]"
- else:
- text = f"{text}"
- stn_tst, clean_text = get_text(text, hps_ms)
- with no_grad():
- x_tst = stn_tst.unsqueeze(0).to(device)
- x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
- speaker_id = LongTensor([speaker_id]).to(device)
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
-
- return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s"
-
-def search_speaker(search_value):
- for s in speakers:
- if search_value == s:
- return s
- for s in speakers:
- if search_value in s:
- return s
-
-def change_lang(language):
- if language == 0:
- return 0.6, 0.668, 1.2
- else:
- return 0.6, 0.668, 1.1
-
-download_audio_js = """
-() =>{{
- let root = document.querySelector("body > gradio-app");
- if (root.shadowRoot != null)
- root = root.shadowRoot;
- let audio = root.querySelector("#tts-audio").querySelector("audio");
- let text = root.querySelector("#input-text").querySelector("textarea");
- if (audio == undefined)
- return;
- text = text.value;
- if (text == undefined)
- text = Math.floor(Math.random()*100000000);
- audio = audio.src;
- let oA = document.createElement("a");
- oA.download = text.substr(0, 20)+'.wav';
- oA.href = audio;
- document.body.appendChild(oA);
- oA.click();
- oA.remove();
-}}
-"""
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--device', type=str, default='cpu')
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
- args = parser.parse_args()
- device = torch.device(args.device)
-
- hps_ms = utils.get_hparams_from_file(r'./model/config.json')
- net_g_ms = SynthesizerTrn(
- len(hps_ms.symbols),
- hps_ms.data.filter_length // 2 + 1,
- hps_ms.train.segment_size // hps_ms.data.hop_length,
- n_speakers=hps_ms.data.n_speakers,
- **hps_ms.model)
- _ = net_g_ms.eval().to(device)
- speakers = hps_ms.speakers
- model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None)
-
- with gr.Blocks() as app:
- gr.Markdown(
- "#
'
- )
-
- with gr.Tabs():
- with gr.TabItem("vits"):
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(label="Text (100 words limitation) " if limitation else "Text", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text")
- lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
- type="index", value="中文")
- btn = gr.Button(value="Submit")
- with gr.Row():
- search = gr.Textbox(label="Search Speaker", lines=1)
- btn2 = gr.Button(value="Search")
- sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228])
- with gr.Row():
- ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
- nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
- ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True)
- with gr.Column():
- o1 = gr.Textbox(label="Output Message")
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio")
- o3 = gr.Textbox(label="Extra Info")
- download = gr.Button("Download Audio")
- btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3])
- download.click(None, [], [], _js=download_audio_js.format())
- btn2.click(search_speaker, inputs=[search], outputs=[sid])
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
- with gr.TabItem("可用人物一览"):
- gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index")
- if args.colab:
- webbrowser.open("http://127.0.0.1:7860")
- app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
\ No newline at end of file
diff --git a/spaces/meraGPT/write-with-vcGPT/app.py b/spaces/meraGPT/write-with-vcGPT/app.py
deleted file mode 100644
index 530916ce5a43b72fabe40ef0dd97206a705d4803..0000000000000000000000000000000000000000
--- a/spaces/meraGPT/write-with-vcGPT/app.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import transformers
-import streamlit as st
-
-from transformers import AutoModelForCausalLM, AutoTokenizer
-
-checkpoint = "."
-
-tokenizer = AutoTokenizer.from_pretrained(checkpoint)
-@st.cache
-def load_model(model_name):
- model = AutoModelForCausalLM.from_pretrained(model_name)
- return model
-
-model = load_model(checkpoint)
-
-def infer(input_ids, bad_words_ids, max_tokens, temperature, top_k, top_p):
-
- output_sequences = model.generate(
- input_ids=input_ids,
- bad_words_ids = bad_words_ids,
- max_new_tokens=max_tokens,
- temperature=temperature,
- top_k=top_k,
- top_p=top_p,
- do_sample=True,
- no_repeat_ngram_size=2,
- early_stopping=True,
- num_beams=4,
- pad_token_id=tokenizer.eos_token_id,
- num_return_sequences=1
- )
-
- return output_sequences
-
-default_value = "We are building the first ever"
-
-#prompts
-st.title("Write with vcGPT 🦄")
-st.write("This is a LLM that was fine-tuned on a dataset of investment memos to help you generate your next pitch.")
-
-sent = st.text_area("Text", default_value)
-
-max_tokens = st.sidebar.slider("Max Tokens", min_value = 16, max_value=64)
-temperature = st.sidebar.slider("Temperature", value = 0.8, min_value = 0.05, max_value=1.0, step=0.05)
-top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=5, value = 4)
-top_p = st.sidebar.slider("Top-p", min_value = 0.0, max_value=1.0, step = 0.05, value = 0.9)
-
-# print(model.config.max_position_embeddings)
-
-encoded_prompt = tokenizer.encode(tokenizer.eos_token+sent, max_length=1024, return_tensors="pt", truncation=True)
-
-# get tokens of words that should not be generated
-bad_words_ids = tokenizer(["confidential", "angel.co", "angellist.com", "angellist"], add_special_tokens=False).input_ids
-
-if encoded_prompt.size()[-1] == 0:
- input_ids = None
-else:
- input_ids = encoded_prompt
-
-output_sequences = infer(input_ids, bad_words_ids, max_tokens, temperature, top_k, top_p)
-
-for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
- print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
- generated_sequences = generated_sequence.tolist()
-
- # Decode text
- text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True, skip_special_tokens=True)
-
- # Remove all text after the stop token
- #text = text[: text.find(args.stop_token) if args.stop_token else None]
-
- # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
- total_sequence = (
- sent + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True, skip_special_tokens=True)) :]
- )
-
- generated_sequences.append(total_sequence)
- print(total_sequence)
-
-st.markdown(generated_sequences[-1])
\ No newline at end of file
diff --git a/spaces/merve/hidden-bias/source/_posts/2019-10-03-fairness.html b/spaces/merve/hidden-bias/source/_posts/2019-10-03-fairness.html
deleted file mode 100644
index e87b79e7fec2d286610661ddae8970bb7c9fe1dc..0000000000000000000000000000000000000000
--- a/spaces/merve/hidden-bias/source/_posts/2019-10-03-fairness.html
+++ /dev/null
@@ -1,219 +0,0 @@
-
----
-permalink: /measuring-fairness/
-template: post.html
-
-title: Considering Model Fairness
-title: Measuring Fairness
-summary: There are multiple ways to measure accuracy. No matter how we build our model, accuracy across these measures will vary when applied to different groups of people.
-summaryalt: There are multiple ways to assess machine learning models, such as its overall accuracy. Another important perspective to consider is the fairness of the model with respect to different groups of people or different contexts of use.
-shareimg: https://pair.withgoogle.com/explorables/images/measuring-fairness.png
-date: 2021-05-01
----
-
-
-
-
-
-
-
-
-
-
-
-
Measuring Fairness
-
-
How do you make sure a model works equally well for different groups of people? It turns out that in many situations, this is harder than you might think.
-
-
The problem is that there are different ways to measure the accuracy of a model, and often it's mathematically impossible for them all to be equal across groups.
-
-
We'll illustrate how this happens by creating a (fake) medical model to screen these people for a disease.
-
-
-
-
-
Ground Truth
-
-
About half of these people actually have the disease a; half of them don't b.
-
-
-
-
-
Model Predictions
-
-
In a perfect world, only sick people would test positive for the disease and only healthy people would test negative.
-
-
-
-
-
Model Mistakes
-
-
But models and tests aren't perfect.
-
-
The model might make a mistake and mark a sick person as healthy c.
-
-
Or the opposite: marking a healthy person as sick f.
-
-
-
-
Never Miss the Disease...
-
-
If there's a simple follow-up test, we could have the model aggressively call close cases so it rarely misses the disease.
-
-
We can quantify this by measuring the percentage of sick people a who test positive g.
-
-
-
-
-
-
-
...Or Avoid Overcalling?
-
-
On the other hand, if there isn't a secondary test, or the treatment uses a drug with a limited supply, we might care more about the percentage of people with positive tests who are actually sick g.
-
-
-
-
These issues and trade-offs in model optimization aren't new, but they're brought into focus when we have the ability to fine-tune exactly how aggressively disease is diagnosed.
-
-
-
- Try adjusting how aggressive the model is in diagnosing the disease
-
-
-
-
-
Subgroup Analysis
-
-
Things get even more complicated when we check if the model treats different groups fairly.¹
-
-
Whatever we decide on in terms of trade-offs between these metrics, we'd probably like them to be roughly even across different groups of people.
-
-
If we're trying to evenly allocate resources, having the model miss more cases in children than adults would be bad! ²
-
-
-
-
-
Base Rates
-
-
If you look carefully, you'll see that the disease is more prevalent in children. That is, the "base rate" of the disease is different across groups.
-
-
The fact that the base rates are different makes the situation surprisingly tricky. For one thing, even though the test catches the same percentage of sick adults and sick children, an adult who tests positive is less likely to have the disease than a child who tests positive.
-
-
-
-
-
Imbalanced Metrics
-
-
Why is there a disparity in diagnosing between children and adults? There is a higher proportion of well adults, so mistakes in the test will cause more well adults to be marked "positive" than well children (and similarly with mistaken negatives).
-
-
-
-
-
To fix this, we could have the model take age into account.
-
-
-
-
-
-
-
Try adjusting the slider to make the model grade adults less aggressively than children.
-
-
-
This allows us to align one metric. But now adults who have the disease are less likely to be diagnosed with it!
-
-
-
-
-
-
No matter how you move the sliders, you won't be able to make both metrics fair at once. It turns out this is inevitable any time the base rates are different, and the test isn't perfect.
-
-
There are multiple ways to define fairness mathematically. It usually isn't possible to satisfy all of them.³
-
-
-
-
-
-
-
-
-
-
Conclusion
-
-
Thankfully, the notion of fairness you choose to satisfy will depend on the context of your model, so while it may not be possible to satisfy every definition of fairness, you can focus on the notions of fairness that make sense for your use case.
-
-
Even if fairness along every dimension isn't possible, we shouldn't stop checking for bias. The Hidden Bias explorable outlines different ways human bias can feed into an ML model.
-
-
More Reading
-
-
In some contexts, setting different thresholds for different populations might not be acceptable. Can you make AI fairer than a judge? explores an algorithm that can send people to jail.
-
-
Machine learning practitioners use words like “recall” to describe the percentage of sick people who test positive. Checkout the PAIR Guidebook Glossary to learn how to learn how to talk to the people building the models.
-
-
² Sometimes we might care more about different error modes in different populations. If treatment is riskier for children, we'd probably want the model to be less aggressive in diagnosing.
-
-
³The above example assumes the model sorts and scores people based on how likely it is that they are sick. With complete control over the model's exact rate of under- and over-diagnosing in both groups, it's actually possible to align both of the metrics we've discussed so far. Try tweaking the model below to get both of them to line up.
-
-
Adding a third metric, the percentage of well people a who test negative e, makes perfect fairness impossible. Can you see why all three metrics won't align unless the base rate of the disease is the same in both populations?
-
-
-
-
Drag — to adjust model accuracy and | to adjust the occurrence of disease
-
-
-
Credits
-
-
Adam Pearce // May 2020
-
-
Thanks to Carey Radebaugh, Dan Nanas, David Weinberger, Emily Denton, Emily Reif, Fernanda Viégas, Hal Abelson, James Wexler, Kristen Olson, Lucas Dixon, Mahima Pushkarna, Martin Wattenberg, Michael Terry, Rebecca Salois, Timnit Gebru, Tulsee Doshi, Yannick Assogba, Yoni Halpern, Zan Armstrong, and my other colleagues at Google for their help with this piece.
-
-
- PODsys focuses on AI cluster deployment scenarios, providing a complete
- toolchain including
- infrastructure environment installation, environment deployment, user
- management, system
- monitoring and resource scheduling, aiming to create an open-source,
- efficient, compatible and
- easy-to-use intelligent cluster system environment deployment solution.
-
-
- To achieve these capabilities, PODsys integrates dozens of drivers,
- softwares, and other
- installation packages required for AI cluster deployment, and provides a
- range of scripting
- tools to simplify deployment. Using these tools, users can complete the
- deployment of the entire
- cluster with several simple commands.
-
-
-
Environment deployment and management: PODsys provides quick
- tools for environment deployment
- and management, including quick installation, configuration, and updating
- of cluster environments.
- It also includes the operating system, NVIDIA drivers, InfiniBand drivers
- and other necessary
- software base packages, to provide users with a complete GPU cluster
- environment. Users can
- manage cluster nodes, add or remove nodes, and monitor node status and
- performance with simple commands.
-
-
User management and permission control: PODsys has a
- comprehensive user management and permission
- control mechanism. Administrators can create and manage user accounts and
- assign different permissions
- and resource quotas. This allows each user or team to flexibly allocate
- resources in the cluster
- and ensures the security of the cluster.
-
-
System monitoring and performance optimization: PODsys provides
- comprehensive system monitoring
- and performance optimization capabilities to help users monitor the status
- and performance indicators
- of the cluster in real time. Through a visual interface, users can view
- cluster resource usage,
- job execution, and performance bottlenecks to adjust cluster configurations
- and optimize job
- performance in a timely manner.
-
-
Resource scheduling and job management: PODsys provides efficient
- resource scheduling and job
- management functions, which can automatically schedule and manage jobs
- according to users' needs
- to ensure the resource utilization of the cluster and the execution
- efficiency of jobs.
-
- Choose one machine from the cluster as the management node, and the
- remaining machines as compute nodes. All PODsys deployment operations are
- performed on the management node.
-
-
-
-
Installation Steps on the Management Node
-
Installing OS through BMC
-
- Install Ubuntu Server 22.04.2 on the management node through BMC. Set
- the hostname to 'mu01' and the username to 'nexus'.
-
-
Running podsys_manager
-
- Go to File page to download the podsys_manager_v1.0.tar.gz, run:
-
-
- sudo tar -xzvf podsys_manager_v1.0.tar.gz -C /home/nexus/
- cd /home/nexus/podsys_manager/
- sudo ./install_server.sh
- sudo ./verify_installation.sh
-
-
- Refer to the /var/log/installer/autoinstall-user-data file on the
- management node for modification.
- This file contains the necessary configurations for unattended automated
- installations.
- The contents that need to be modified are as follows:
-
-
- In line 3, the encrypted password can be generated using the
- following command, with the default being 'podsys@123'
-
- printf 'podsys@123' | openssl passwd -6 -salt 'FhcddHFVZ7ABA4Gi'
- -stdin
-
-
-
- Replace the NIC information in lines 6-10 with the NIC information
- of the compute node. Make sure to put the NIC that need to be used
- on the cluster in the first place.
-
-
- The information about the disk partition layout (storage:)
- can be modified by referring to the corresponding
- contents from the “/var/log/installer/autoinstall-user-data” file on
- the management node.
-
-
- Remove the contents from the line 14 (serial) and the line 15 (wwn).
-
-
- Change the IP address in the line 17 and the line 18 to the IP
- address of the
- management node.
-
-
-
-
Starting relevant services
-
- Start the following services on the management node.
-
-
- service dnsmasq start
- service apache2 start
-
-
- Start the compute nodes without an installed operating system to
- initiate automatic installation. If the compute nodes already have an
- operating system,
- they need to be placed in PXE (Preboot Execution Environment) mode.
- After the installation is complete, the compute nodes will shut down
- automatically.
- Once all compute nodes are powered off, stop the relevant services and
- exit Docker on the management node, and start the compute nodes to
- complete the operating system deployment.
-
-
- service dnsmasq stop
- service apache2 stop
- exit
-
-
-
-
Configuration of Cluster Parallel Environments
- All following operations are performed on the management node. Before
- setting the relevant services, run the following commands:
-
-
- cd /home/nexus/podsys_manager
- ./config_client.sh -pre
-
-
-
-
Configuration of NIS (Network Information Service)
-
- Configuration on the Management Node:
-
-
- cd /home/nexus/podsys_manager
- sudo ./config_server.sh -nis-server [serverIP]
- sudo /usr/lib/yp/ypinit –m
- sudo make -C /var/yp
-
-
- Configuration on the Compute Node:
-
-
- cd /home/nexus/podsys_manager
- ./config_client.sh -nis-client [nis server ip]
- sudo yptest
-
-
-
-
Configuration of NFSoRDMA (NFS over Remote Direct Memory
- Access)
- Configuration on the Management Node:
-
-
- ibdev2netdev
- ip address add server-ib-ip dev ibp94s0
- ip link set ibp94s0 up
- cd /home/nexus/podsys_manager
- sudo ./config_server.sh -nfs-server [share directory]
-
-
-
-
\ No newline at end of file
diff --git a/spaces/politweet-sh/politweet/twitterscraper/TwitterScraper.py b/spaces/politweet-sh/politweet/twitterscraper/TwitterScraper.py
deleted file mode 100644
index 0d48b4d10a75a6f216b9dda9d4e517f95a42b50e..0000000000000000000000000000000000000000
--- a/spaces/politweet-sh/politweet/twitterscraper/TwitterScraper.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import re
-from datetime import date
-
-import twint
-
-
-class TwitterScraper(object):
- """
- This class is a twitter TwitterScraper called TwitterScraper. It takes the user as input and collects the user's tweets
- from 'from_date' to 'to_date'. If 'from_date' and 'to_date' are not specified, it collects the number of tweets 'num_tweets' from today.
- It outputs a dictionary with the tweet unique id and some other information.
- input: user, from_date, to_date, num_tweets
- output: dict
- """
-
- def __init__(self, from_date="2022-07-01", to_date=str(date.today()), num_tweets=20):
- """
- This method initializes the TwitterScraper class. It takes the user as input and collects the user's tweets
- from 'from_date' to 'to_date'. If 'from_date' and 'to_date' are not specified, it collects the number of
- tweets 'num_tweets' from today.
- :param from_date: str (format: YYYY-MM-DD)
- :param to_date: str (format: YYYY-MM-DD)
- :param num_tweets: int (number of tweets to be scraped)
- """
- # Make sure the dates are in the correct format
- assert re.match(r'^\d{4}-\d{2}-\d{2}$', from_date) is not None, "from_date must be in the format YYYY-MM-DD"
- # Make sure to_date is later than from_date
- assert from_date < to_date, "from_date must be earlier than to_date"
- # Make sure num_tweets is a positive integer
- # assert 0 < num_tweets <= 60, "num_tweets must be a positive integer and at most 60"
-
- self.from_date = from_date
- self.to_date = to_date
- self.num_tweets = num_tweets
- self.conf = twint.Config()
-
- def scrape_by_user(self, _user: str):
- """This method uses twint to extract tweets based on username"""
- self.conf.Search = "from:@" + _user # is the search configuration is given in this format it searches after
- # user_names.
- return self.__get_tweets__from_twint__()
-
- def scrape_by_several_users(self, _users: list):
- """
- This method uses twint to extract tweets based on username. It takes a list of users as input.
-
- :param _users: list of users
- :return: dataframe
- """
- # TODO: test this method
- self.conf.Search = "from:@" + _users[0]
- for user in _users[1:]:
- self.conf.Search += " OR from:@" + user
- return self.__get_tweets__from_twint__()
-
- def scrape_by_string(self, _string: str):
- """This method uses twint to extract tweets based on string.
- all extracted tweets have the specified word in _string parameter in it.
- """
- self.conf.Search = _string # this tells twint configuration to search for string
- return self.__get_tweets__from_twint__()
-
- def scrape_by_user_and_string(self, _user: str, _string: str):
- """
- This method uses twint to extract tweets based on string and username. It takes a list of users as input.
- :param _user: str
- :param _string: str
- :return: dataframe
- """
- self.conf.Username = _user
- self.conf.Search = _string
- return self.__get_tweets__from_twint__()
-
- # TODO: make method static (Possibly remove this)
- def get_only_tweets(self, tweet_and_replies_info):
- tweet_and_replies = tweet_and_replies_info["tweet"]
- """
- This functions input arg is a data frame (the output from scrape methords ) and removes...
- all tweets starting with \"@\" which is indicator of a reply or retweet.
- """
- indx_replies = []
- for i in range(len(tweet_and_replies)):
- if tweet_and_replies[i].startswith("@"):
- indx_replies.append(i)
-
- tweets_info = tweet_and_replies_info.drop(labels=indx_replies, axis=0)
- # drop removes the columns which its index specified by
- # indx_replies. axis=0 if we want to delete rows.
- # print(len(tweets['tweet']), " of them are Tweets")
- return tweets_info
-
- def __get_tweets__from_twint__(self):
- """
- __get_tweets_from_twint__
- tweet info is a dataframe with fallowing columns
- Index(['id', 'conversation_id', 'created_at', 'date', 'timezone', 'place',
- 'tweet', 'language', 'hashtags', 'cashtags', 'user_id', 'user_id_str',
- 'username', 'name', 'day', 'hour', 'link', 'urls', 'photos', 'video',
- 'thumbnail', 'retweet', 'nlikes', 'nreplies', 'nretweets', 'quote_url',
- 'search', 'near', 'geo', 'source', 'user_rt_id', 'user_rt',
- 'retweet_id', 'reply_to', 'retweet_date', 'translate', 'trans_src',
- 'trans_dest']
- we just pick the relevant ones.
- c is a twint.Config() object
- we also configure twint output.
- """
- self.conf.Pandas = True #
- self.conf.Count = True #
- self.conf.Limit = self.num_tweets # specifies how many tweet should be scraped
- self.conf.Since = self.from_date
- self.conf.Until = self.to_date
- self.conf.Hide_output = True # Hides the output. If set to False it will print tweets in the terminal window.
- twint.run.Search(self.conf)
- tweet_and_replies_inf = twint.output.panda.Tweets_df # here we say that output is a dataframe
- if tweet_and_replies_inf.empty:
- print("No tweet containing the word \"" + self.conf.Search + "\" could be found!")
- else:
- tweet_and_replies_inf = tweet_and_replies_inf[
- ["id", "tweet", "date", "user_id", "username", "urls", 'nlikes', 'nreplies', 'nretweets']]
- return tweet_and_replies_inf
-
- # def __check_date_type(d1,d2): if (type(d1) or type(d2)) is not type("str"): # If the type of ite date input
- # is not string it generates exception print("[!] Please make sure the date is a string in this format
- # \"yyyy-mm-dd\" ") raise EXCEPTION("Incorrect date type Exception!") elif (len(d1.split("-")) or len(d2.split(
- # "-")))<2: print("[!] Please make sure the date is a string in this format \"yyyy-mm-dd\" ") raise EXCEPTION(
- # "Incorrect date type Exception!")
-
- def __repr__(self):
- return "TwitterScraper(from_date={}, to_date={}, num_tweets={})".format(self.from_date, self.to_date,
- self.num_tweets)
-
-
-if __name__ == "__main__":
- sc = TwitterScraper(from_date="2022-05-01", to_date="2022-07-31", num_tweets=40)
- dc = sc.scrape_by_user("jimmieakesson")
- print(dc.head())
- print(dc.shape)
- print(dc.columns)
diff --git a/spaces/pranked03/IssueFixerGPT/app.py b/spaces/pranked03/IssueFixerGPT/app.py
deleted file mode 100644
index ce50721ce2c38f72df004bad51146f79cadc8877..0000000000000000000000000000000000000000
--- a/spaces/pranked03/IssueFixerGPT/app.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import streamlit as st
-from new import chat_with_chatseo
-import pandas as pd
-
-st.set_page_config(layout="wide")
-
-i, j = st.columns(2)
-#set title
-with i:
- st.title("Chat with ChatSEO")
-
-
- #create 3 input boxes for the 3 input variables
- issue = st.text_input("Issue")
- description = st.text_input("Description")
- url = st.text_input("URL")
-
- #create button and on press, run the chain
- if st.button("Run"):
- #run the chain
- output = chat_with_chatseo(issue, description, url).content
- #display the output
- st.write(output)
-
-with j:
-
- st.write("## Upload a csv file")
-
- # Upload a csv file using streamlit and show the data
- uploaded_file = st.file_uploader("Choose a CSV file")
- if uploaded_file is not None:
- df = pd.read_csv(uploaded_file)
- result_df = df[df["Issue Priority"] == "High"].drop(["URLs", "% of Total", "How To Fix"], axis=1)
- st.write(result_df)
- for index, row in result_df.iterrows():
- with st.expander(row["Issue Name"]):
- st.write(row["Description"])
- if st.button("Fix Issue", key=index):
- output = chat_with_chatseo(row["Issue Name"], row["Description"], "https://www.upthrust.io").content
- st.write(output)
\ No newline at end of file
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/cli/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/cli/__init__.py
deleted file mode 100644
index d95fedfe5723713337f1a94ec8f0a00b6ca7816a..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/cli/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .__main__ import cli_detect, query_yes_no
-
-__all__ = (
- "cli_detect",
- "query_yes_no",
-)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/subset/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/subset/__init__.py
deleted file mode 100644
index 4b9cb00f6038bee271aaaa0d8140fb420b637136..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/subset/__init__.py
+++ /dev/null
@@ -1,3714 +0,0 @@
-# Copyright 2013 Google, Inc. All Rights Reserved.
-#
-# Google Author(s): Behdad Esfahbod
-
-from fontTools import config
-from fontTools.misc.roundTools import otRound
-from fontTools import ttLib
-from fontTools.ttLib.tables import otTables
-from fontTools.ttLib.tables.otBase import USE_HARFBUZZ_REPACKER
-from fontTools.otlLib.maxContextCalc import maxCtxFont
-from fontTools.pens.basePen import NullPen
-from fontTools.misc.loggingTools import Timer
-from fontTools.misc.cliTools import makeOutputFileName
-from fontTools.subset.util import _add_method, _uniq_sort
-from fontTools.subset.cff import *
-from fontTools.subset.svg import *
-from fontTools.varLib import varStore # for subset_varidxes
-from fontTools.ttLib.tables._n_a_m_e import NameRecordVisitor
-import sys
-import struct
-import array
-import logging
-from collections import Counter, defaultdict
-from functools import reduce
-from types import MethodType
-
-__usage__ = "pyftsubset font-file [glyph...] [--option=value]..."
-
-__doc__ = (
- """\
-pyftsubset -- OpenType font subsetter and optimizer
-
-pyftsubset is an OpenType font subsetter and optimizer, based on fontTools.
-It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff)
-font file. The subsetted glyph set is based on the specified glyphs
-or characters, and specified OpenType layout features.
-
-The tool also performs some size-reducing optimizations, aimed for using
-subset fonts as webfonts. Individual optimizations can be enabled or
-disabled, and are enabled by default when they are safe.
-
-Usage: """
- + __usage__
- + """
-
-At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file,
---text, --text-file, --unicodes, or --unicodes-file, must be specified.
-
-Args:
-
-font-file
- The input font file.
-glyph
- Specify one or more glyph identifiers to include in the subset. Must be
- PS glyph names, or the special string '*' to keep the entire glyph set.
-
-Initial glyph set specification
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-These options populate the initial glyph set. Same option can appear
-multiple times, and the results are accummulated.
-
---gids=[,...]
- Specify comma/whitespace-separated list of glyph IDs or ranges as decimal
- numbers. For example, --gids=10-12,14 adds glyphs with numbers 10, 11,
- 12, and 14.
-
---gids-file=
- Like --gids but reads from a file. Anything after a '#' on any line is
- ignored as comments.
-
---glyphs=[,...]
- Specify comma/whitespace-separated PS glyph names to add to the subset.
- Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc
- that are accepted on the command line. The special string '*' will keep
- the entire glyph set.
-
---glyphs-file=
- Like --glyphs but reads from a file. Anything after a '#' on any line
- is ignored as comments.
-
---text=
- Specify characters to include in the subset, as UTF-8 string.
-
---text-file=
- Like --text but reads from a file. Newline character are not added to
- the subset.
-
---unicodes=[,...]
- Specify comma/whitespace-separated list of Unicode codepoints or
- ranges as hex numbers, optionally prefixed with 'U+', 'u', etc.
- For example, --unicodes=41-5a,61-7a adds ASCII letters, so does
- the more verbose --unicodes=U+0041-005A,U+0061-007A.
- The special strings '*' will choose all Unicode characters mapped
- by the font.
-
---unicodes-file=
- Like --unicodes, but reads from a file. Anything after a '#' on any
- line in the file is ignored as comments.
-
---ignore-missing-glyphs
- Do not fail if some requested glyphs or gids are not available in
- the font.
-
---no-ignore-missing-glyphs
- Stop and fail if some requested glyphs or gids are not available
- in the font. [default]
-
---ignore-missing-unicodes [default]
- Do not fail if some requested Unicode characters (including those
- indirectly specified using --text or --text-file) are not available
- in the font.
-
---no-ignore-missing-unicodes
- Stop and fail if some requested Unicode characters are not available
- in the font.
- Note the default discrepancy between ignoring missing glyphs versus
- unicodes. This is for historical reasons and in the future
- --no-ignore-missing-unicodes might become default.
-
-Other options
-^^^^^^^^^^^^^
-
-For the other options listed below, to see the current value of the option,
-pass a value of '?' to it, with or without a '='.
-
-Examples::
-
- $ pyftsubset --glyph-names?
- Current setting for 'glyph-names' is: False
- $ ./pyftsubset --name-IDs=?
- Current setting for 'name-IDs' is: [0, 1, 2, 3, 4, 5, 6]
- $ ./pyftsubset --hinting? --no-hinting --hinting?
- Current setting for 'hinting' is: True
- Current setting for 'hinting' is: False
-
-Output options
-^^^^^^^^^^^^^^
-
---output-file=
- The output font file. If not specified, the subsetted font
- will be saved in as font-file.subset.
-
---flavor=
- Specify flavor of output font file. May be 'woff' or 'woff2'.
- Note that WOFF2 requires the Brotli Python extension, available
- at https://github.com/google/brotli
-
---with-zopfli
- Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 %
- smaller than pure zlib, but the compression speed is much slower.
- The Zopfli Python bindings are available at:
- https://pypi.python.org/pypi/zopfli
-
---harfbuzz-repacker
- By default, we serialize GPOS/GSUB using the HarfBuzz Repacker when
- uharfbuzz can be imported and is successful, otherwise fall back to
- the pure-python serializer. Set the option to force using the HarfBuzz
- Repacker (raises an error if uharfbuzz can't be found or fails).
-
---no-harfbuzz-repacker
- Always use the pure-python serializer even if uharfbuzz is available.
-
-Glyph set expansion
-^^^^^^^^^^^^^^^^^^^
-
-These options control how additional glyphs are added to the subset.
-
---retain-gids
- Retain glyph indices; just empty glyphs not needed in-place.
-
---notdef-glyph
- Add the '.notdef' glyph to the subset (ie, keep it). [default]
-
---no-notdef-glyph
- Drop the '.notdef' glyph unless specified in the glyph set. This
- saves a few bytes, but is not possible for Postscript-flavored
- fonts, as those require '.notdef'. For TrueType-flavored fonts,
- this works fine as long as no unsupported glyphs are requested
- from the font.
-
---notdef-outline
- Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is
- used when glyphs not supported by the font are to be shown. It is not
- needed otherwise.
-
---no-notdef-outline
- When including a '.notdef' glyph, remove its outline. This saves
- a few bytes. [default]
-
---recommended-glyphs
- Add glyphs 0, 1, 2, and 3 to the subset, as recommended for
- TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'.
- Some legacy software might require this, but no modern system does.
-
---no-recommended-glyphs
- Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in
- glyph set. [default]
-
---no-layout-closure
- Do not expand glyph set to add glyphs produced by OpenType layout
- features. Instead, OpenType layout features will be subset to only
- rules that are relevant to the otherwise-specified glyph set.
-
---layout-features[+|-]=[,...]
- Specify (=), add to (+=) or exclude from (-=) the comma-separated
- set of OpenType layout feature tags that will be preserved.
- Glyph variants used by the preserved features are added to the
- specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs',
- 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt',
- 'rlig', 'rvrn', and all features required for script shaping are
- preserved. To see the full list, try '--layout-features=?'.
- Use '*' to keep all features.
- Multiple --layout-features options can be provided if necessary.
- Examples:
-
- --layout-features+=onum,pnum,ss01
- * Keep the default set of features and 'onum', 'pnum', 'ss01'.
- --layout-features-='mark','mkmk'
- * Keep the default set of features but drop 'mark' and 'mkmk'.
- --layout-features='kern'
- * Only keep the 'kern' feature, drop all others.
- --layout-features=''
- * Drop all features.
- --layout-features='*'
- * Keep all features.
- --layout-features+=aalt --layout-features-=vrt2
- * Keep default set of features plus 'aalt', but drop 'vrt2'.
-
---layout-scripts[+|-]='
-
- def tex2mathml_catch_exception(content, *args, **kwargs):
- try:
- content = tex2mathml(content, *args, **kwargs)
- except:
- content = content
- return content
-
- def replace_math_no_render(match):
- content = match.group(1)
- if 'mode=display' in match.group(0):
- content = content.replace('\n', '')
- return f"$${content}$$"
- else:
- return f"${content}$"
-
- def replace_math_render(match):
- content = match.group(1)
- if 'mode=display' in match.group(0):
- if '\\begin{aligned}' in content:
- content = content.replace('\\begin{aligned}', '\\begin{array}')
- content = content.replace('\\end{aligned}', '\\end{array}')
- content = content.replace('&', ' ')
- content = tex2mathml_catch_exception(content, display="block")
- return content
- else:
- return tex2mathml_catch_exception(content)
-
- def markdown_bug_hunt(content):
- """
- 解决一个mdx_math的bug(单$包裹begin命令时多余\n', '')
- return content
-
-
- if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
- # convert everything to html format
- split = markdown.markdown(text='---')
- convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
- convert_stage_1 = markdown_bug_hunt(convert_stage_1)
- # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
- # 1. convert to easy-to-copy tex (do not render math)
- convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
- # 2. convert to rendered equation
- convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
- # cat them together
- return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
- else:
- return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
-
-
-sample = preprocess_newbing_out(sample)
-sample = close_up_code_segment_during_stream(sample)
-sample = markdown_convertion(sample)
-with open('tmp.html', 'w', encoding='utf8') as f:
- f.write("""
-
-
- My Website
-
-
-
- """)
- f.write(sample)
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Intelcelerong530graphicsdriverdownload.md b/spaces/quidiaMuxgu/Expedit-SAM/Intelcelerong530graphicsdriverdownload.md
deleted file mode 100644
index 3d3f8ada656ba8e6274ee49b48c63ae164cd211e..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Intelcelerong530graphicsdriverdownload.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Hillsong United-More Than Life full album zip · intel celeron g530 graphics driver download. 좋아요공ê°. ê³µìœ í•˜ê¸°. 글 요소. 구ë…하기 vanquitelli. 4d29de3e1b
-
-
-
diff --git a/spaces/r3gm/RVC_HF/infer/lib/train/utils.py b/spaces/r3gm/RVC_HF/infer/lib/train/utils.py
deleted file mode 100644
index dd965fc4dd2af09e445a7f625f2681460874da7a..0000000000000000000000000000000000000000
--- a/spaces/r3gm/RVC_HF/infer/lib/train/utils.py
+++ /dev/null
@@ -1,478 +0,0 @@
-import argparse
-import glob
-import json
-import logging
-import os
-import subprocess
-import sys
-import shutil
-
-import numpy as np
-import torch
-from scipy.io.wavfile import read
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
-
- ##################
- def go(model, bkey):
- saved_state_dict = checkpoint_dict[bkey]
- if hasattr(model, "module"):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict = {}
- for k, v in state_dict.items(): # 模型需要的shape
- try:
- new_state_dict[k] = saved_state_dict[k]
- if saved_state_dict[k].shape != state_dict[k].shape:
- logger.warn(
- "shape-%s-mismatch. need: %s, get: %s",
- k,
- state_dict[k].shape,
- saved_state_dict[k].shape,
- ) #
- raise KeyError
- except:
- # logger.info(traceback.format_exc())
- logger.info("%s is not in the checkpoint", k) # pretrain缺失的
- new_state_dict[k] = v # 模型自带的随机值
- if hasattr(model, "module"):
- model.module.load_state_dict(new_state_dict, strict=False)
- else:
- model.load_state_dict(new_state_dict, strict=False)
- return model
-
- go(combd, "combd")
- model = go(sbd, "sbd")
- #############
- logger.info("Loaded model weights")
-
- iteration = checkpoint_dict["iteration"]
- learning_rate = checkpoint_dict["learning_rate"]
- if (
- optimizer is not None and load_opt == 1
- ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
- # try:
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
- # except:
- # traceback.print_exc()
- logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-# def load_checkpoint(checkpoint_path, model, optimizer=None):
-# assert os.path.isfile(checkpoint_path)
-# checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
-# iteration = checkpoint_dict['iteration']
-# learning_rate = checkpoint_dict['learning_rate']
-# if optimizer is not None:
-# optimizer.load_state_dict(checkpoint_dict['optimizer'])
-# # print(1111)
-# saved_state_dict = checkpoint_dict['model']
-# # print(1111)
-#
-# if hasattr(model, 'module'):
-# state_dict = model.module.state_dict()
-# else:
-# state_dict = model.state_dict()
-# new_state_dict= {}
-# for k, v in state_dict.items():
-# try:
-# new_state_dict[k] = saved_state_dict[k]
-# except:
-# logger.info("%s is not in the checkpoint" % k)
-# new_state_dict[k] = v
-# if hasattr(model, 'module'):
-# model.module.load_state_dict(new_state_dict)
-# else:
-# model.load_state_dict(new_state_dict)
-# logger.info("Loaded checkpoint '{}' (epoch {})" .format(
-# checkpoint_path, iteration))
-# return model, optimizer, learning_rate, iteration
-def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
-
- saved_state_dict = checkpoint_dict["model"]
- if hasattr(model, "module"):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict = {}
- for k, v in state_dict.items(): # 模型需要的shape
- try:
- new_state_dict[k] = saved_state_dict[k]
- if saved_state_dict[k].shape != state_dict[k].shape:
- logger.warn(
- "shape-%s-mismatch|need-%s|get-%s",
- k,
- state_dict[k].shape,
- saved_state_dict[k].shape,
- ) #
- raise KeyError
- except:
- # logger.info(traceback.format_exc())
- logger.info("%s is not in the checkpoint", k) # pretrain缺失的
- new_state_dict[k] = v # 模型自带的随机值
- if hasattr(model, "module"):
- model.module.load_state_dict(new_state_dict, strict=False)
- else:
- model.load_state_dict(new_state_dict, strict=False)
- logger.info("Loaded model weights")
-
- iteration = checkpoint_dict["iteration"]
- learning_rate = checkpoint_dict["learning_rate"]
- if (
- optimizer is not None and load_opt == 1
- ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
- # try:
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
- # except:
- # traceback.print_exc()
- logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info(
- "Saving model and optimizer state at epoch {} to {}".format(
- iteration, checkpoint_path
- )
- )
- if hasattr(model, "module"):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save(
- {
- "model": state_dict,
- "iteration": iteration,
- "optimizer": optimizer.state_dict(),
- "learning_rate": learning_rate,
- },
- checkpoint_path,
- )
-
-
-def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info(
- "Saving model and optimizer state at epoch {} to {}".format(
- iteration, checkpoint_path
- )
- )
- if hasattr(combd, "module"):
- state_dict_combd = combd.module.state_dict()
- else:
- state_dict_combd = combd.state_dict()
- if hasattr(sbd, "module"):
- state_dict_sbd = sbd.module.state_dict()
- else:
- state_dict_sbd = sbd.state_dict()
- torch.save(
- {
- "combd": state_dict_combd,
- "sbd": state_dict_sbd,
- "iteration": iteration,
- "optimizer": optimizer.state_dict(),
- "learning_rate": learning_rate,
- },
- checkpoint_path,
- )
-
-
-def summarize(
- writer,
- global_step,
- scalars={},
- histograms={},
- images={},
- audios={},
- audio_sampling_rate=22050,
-):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats="HWC")
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- logger.debug(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
-
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger("matplotlib")
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
-
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger("matplotlib")
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(
- alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
- )
- fig.colorbar(im, ax=ax)
- xlabel = "Decoder timestep"
- if info is not None:
- xlabel += "\n\n" + info
- plt.xlabel(xlabel)
- plt.ylabel("Encoder timestep")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding="utf-8") as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- """
- todo:
- 结尾七人组:
- 保存频率、总epoch done
- bs done
- pretrainG、pretrainD done
- 卡号:os.en["CUDA_VISIBLE_DEVICES"] done
- if_latest done
- 模型:if_f0 done
- 采样率:自动选择config done
- 是否缓存数据集进GPU:if_cache_data_in_gpu done
-
- -m:
- 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done
- -c不要了
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "-se",
- "--save_every_epoch",
- type=int,
- required=True,
- help="checkpoint save frequency (epoch)",
- )
- parser.add_argument(
- "-te", "--total_epoch", type=int, required=True, help="total_epoch"
- )
- parser.add_argument(
- "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path"
- )
- parser.add_argument(
- "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path"
- )
- parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -")
- parser.add_argument(
- "-bs", "--batch_size", type=int, required=True, help="batch size"
- )
- parser.add_argument(
- "-e", "--experiment_dir", type=str, required=True, help="experiment dir"
- ) # -m
- parser.add_argument(
- "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k"
- )
- parser.add_argument(
- "-sw",
- "--save_every_weights",
- type=str,
- default="0",
- help="save the extracted model in weights directory when saving checkpoints",
- )
- parser.add_argument(
- "-v", "--version", type=str, required=True, help="model version"
- )
- parser.add_argument(
- "-f0",
- "--if_f0",
- type=int,
- required=True,
- help="use f0 as one of the inputs of the model, 1 or 0",
- )
- parser.add_argument(
- "-l",
- "--if_latest",
- type=int,
- required=True,
- help="if only save the latest G/D pth file, 1 or 0",
- )
- parser.add_argument(
- "-c",
- "--if_cache_data_in_gpu",
- type=int,
- required=True,
- help="if caching the dataset in GPU memory, 1 or 0",
- )
-
- args = parser.parse_args()
- name = args.experiment_dir
- experiment_dir = os.path.join("./logs", args.experiment_dir)
-
- config_save_path = os.path.join(experiment_dir, "config.json")
- with open(config_save_path, "r") as f:
- config = json.load(f)
-
- hparams = HParams(**config)
- hparams.model_dir = hparams.experiment_dir = experiment_dir
- hparams.save_every_epoch = args.save_every_epoch
- hparams.name = name
- hparams.total_epoch = args.total_epoch
- hparams.pretrainG = args.pretrainG
- hparams.pretrainD = args.pretrainD
- hparams.version = args.version
- hparams.gpus = args.gpus
- hparams.train.batch_size = args.batch_size
- hparams.sample_rate = args.sample_rate
- hparams.if_f0 = args.if_f0
- hparams.if_latest = args.if_latest
- hparams.save_every_weights = args.save_every_weights
- hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu
- hparams.data.training_files = "%s/filelist.txt" % experiment_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn(
- "{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- )
- )
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn(
- "git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]
- )
- )
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams:
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/radames/nginx-gradio-reverse-proxy/README.md b/spaces/radames/nginx-gradio-reverse-proxy/README.md
deleted file mode 100644
index 685a0ed1d1baa6a6e3cccb0a45f4a6c87db65e8f..0000000000000000000000000000000000000000
--- a/spaces/radames/nginx-gradio-reverse-proxy/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Nginx Gradio Reverse Proxy
-emoji: 🐠
-colorFrom: purple
-colorTo: purple
-sdk: docker
-app_port: 4444
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Arcview3264bitfreedownload Best Practices and Examples for Using ArcView 3.2 in GIS Projects.md b/spaces/raedeXanto/academic-chatgpt-beta/Arcview3264bitfreedownload Best Practices and Examples for Using ArcView 3.2 in GIS Projects.md
deleted file mode 100644
index ba6489510d146c4d2a3714be615f0bcc43119e59..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Arcview3264bitfreedownload Best Practices and Examples for Using ArcView 3.2 in GIS Projects.md
+++ /dev/null
@@ -1,149 +0,0 @@
-
-
ArcView 3.2 64 Bit Free Download: A Guide for GIS Users
-
If you are a GIS user who wants to use ArcView 3.2, a popular software for visualizing, managing, creating, and analyzing geographic data, you might encounter some difficulties when trying to install and run it on a 64 bit Windows system. This is because ArcView 3.2 was designed for 32 bit Windows and is not compatible with newer versions of Windows. However, this does not mean that you have to give up on using ArcView 3.2. In this article, we will show you how to install and use ArcView 3.2 on 64 bit Windows with some simple solutions.
A brief introduction to ArcView 3.2 and its features
-
ArcView 3.2 is a software developed by Esri, a leading company in geographic information system (GIS) technology. It was released in 1999 and was widely used by GIS professionals and enthusiasts for many years. ArcView 3.2 allows users to perform various tasks with geographic data, such as:
-
-
Displaying and exploring maps and data layers
-
Creating and editing spatial data
-
Performing spatial analysis and queries
-
Generating charts, reports, and layouts
-
Customizing the interface and functionality with scripts and extensions
-
-
ArcView 3.2 has many features that make it a powerful and user-friendly software for GIS users, such as:
-
-
A simple and intuitive graphical user interface (GUI)
-
A flexible project-based structure that organizes data, maps, tables, charts, layouts, scripts, and extensions
-
A rich set of tools and commands for manipulating data and maps
-
A variety of data formats and sources that can be imported and exported
-
A scripting language (Avenue) that allows users to create custom functions and applications
-
A large collection of extensions that add more capabilities and functionalities to the software
-
-
The benefits of using ArcView 3.2 for GIS analysis and visualization
-
Although ArcView 3.2 is an old software that has been replaced by newer versions of Esri products, such as ArcGIS Desktop and ArcGIS Pro, it still has some benefits that make it worth using for GIS users, such as:
-
-
It is easy to learn and use, especially for beginners who want to get started with GIS
-
It is fast and stable, especially for handling small to medium-sized datasets
-
It is compatible with many other GIS software and data formats
-
It has a loyal fan base that provides support and resources online
-
It has a nostalgic value for many GIS users who grew up with it
-
-
How to install ArcView 3.2 on 64 bit Windows?
-
The challenges of installing ArcView 3.2 on 64 bit Windows
-
If you have a copy of ArcView 3.2 installation files or CD-ROMs, you might think that installing it on your 64 bit Windows system is as simple as running the setup.exe file. However, you will soon realize that this will not work because of two main reasons:
-
-
The setup.exe file is a 16 bit executable file that cannot run on a 64 bit system.
-
The installation process requires some registry entries and system files that are not available or compatible with a 64 bit system.
-
-
The solutions for installing ArcView 3.2 on 64 bit Windows
-
Fortunately, there are some solutions that can help you overcome these challenges and successfully install ArcView 3.2 on your 64 bit Windows system.
-
Using a 32 bit installshield engine
-
One solution is to use a different executable file that can run on a 64 bit system to install ArcView 3.2 files from the original installation media.
Copy the setup32.exe file to the same folder where the setup.exe file of ArcView 3.2 is located.
Run the setup32.exe file as administrator.
Follow the instructions on the screen to complete the installation process.
-
This solution should work for most cases, but it might not work for some systems or versions of Windows.
-
Using a virtual machine or a compatibility mode
-
Another solution is to use a virtual machine or a compatibility mode that can emulate a different operating system or environment where ArcView 3.2 can run normally.
Install or run ArcView 3.2 on the VM or CM using the original setup.exe file.
Follow the instructions on the screen to complete the installation process.
-
This solution should work for most cases, but it might require more resources or technical skills.
-
arcview gis 3.2 64 bit free download
-arcview 3.2 for windows 10 64 bit download
-arcview 3.2 software free download full version
-arcview 3.2 windows 7 64 bit download
-arcview gis 3.2 free download for windows 10
-arcview gis 3.2 crack free download
-arcview gis 3.2 download with crack
-arcview gis 3.2 full version free download
-arcview gis 3.2 windows 10 64 bit
-arcview gis 3.2 windows 7 64 bit
-how to install arcview gis 3.2 on windows 10
-how to run arcview gis 3.2 on windows 10
-how to use arcview gis 3.2 on windows 10
-how to install arcview gis 3.2 on windows 7
-how to run arcview gis 3.2 on windows 7
-how to use arcview gis 3.2 on windows 7
-arcview gis tutorial pdf free download
-arcview gis manual pdf free download
-arcview gis training course free download
-arcview gis projects free download
-arcview gis shapefiles free download
-arcview gis extensions free download
-arcview gis scripts free download
-arcview gis themes free download
-arcview gis data free download
-best alternative to arcview gis free download
-open source arcview gis free download
-qgis vs arcview gis free download
-grass vs arcview gis free download
-saga vs arcview gis free download
-mapinfo vs arcview gis free download
-ilwis vs arcview gis free download
-idrisi vs arcview gis free download
-erdas vs arcview gis free download
-envi vs arcview gis free download
-surfer vs arcview gis free download
-global mapper vs arcview gis free download
-autocad map vs arcview gis free download
-civil 3d vs arcview gis free download
-microstation vs arcview gis free download
-esri products compatible with arcview gis free download
-arccatalog vs arcview gis free download
-arceditor vs arcview gis free download
-arcreader vs arcview gis free download
-arctoolbox vs arcview gis free download
-arccad vs arcview gis free download
-arccoverage vs arcview gis free download
-arctin vs arcview gis free download
-arcraster vs arcview gis free download
-arcnetwork vs arcview gis free download
-
How to use ArcView 3.2 on 64 bit Windows?
-
The basic steps for using ArcView 3.2 on 64 bit Windows
-
If you have successfully installed ArcView 3.2 on your 64 bit Windows system, you can start using it for your GIS projects. Here are the basic steps for using ArcView 3.2 on 64 bit Windows:
-
Launch ArcView 3.2 by double-clicking the arcview.exe file or the shortcut you created.
Create a new project or open an existing project by choosing File > New Project or File > Open Project.
Add data layers to your project by choosing View > Add Theme or clicking the Add Theme button.
Explore and manipulate your data and maps using the tools and commands available in the menus and toolbars.
Save your project by choosing File > Save Project or File > Save Project As.
-
These are the basic steps for using ArcView 3.2 on 64 bit Windows, but you can do much more with ArcView 3.2 depending on your needs and preferences.
-
The tips and tricks for using ArcView 3.2 on 64 bit Windows
-
To make the most out of ArcView 3.2 on 64 bit Windows, you might want to follow some tips and tricks that can enhance your experience and performance with the software, such as:
-
Updating the extensions and drivers
-
ArcView 3.2 comes with many extensions that add more capabilities and functionalities to the software, such as 3D Analyst, Spatial Analyst, Network Analyst, Image Analysis, Data Publisher, Tracking Analyst, StreetMap, and more. However, some of these extensions might not work properly on 64 bit Windows or might require updates to fix some bugs or improve some features. Therefore, you might want to check if there are any updates available for the extensions you use and install them accordingly. You can find some updates for ArcView 3.2 extensions on Esri's website or other sources online.
-
Similarly, ArcView 3.2 supports many data formats and sources that can be imported and exported, such as shapefiles, coverages, grids, images, tables, databases, CAD files, and more. However, some of these data formats and sources might not be compatible with 64 bit Windows or might require drivers to be installed or updated to work properly. Therefore, you might want to check if there are any drivers available for the data formats and sources you use and install them accordingly. You can find some drivers for ArcView 3.2 data formats and sources on Esri's website or other sources online.
-
Adjusting the settings and preferences
-
ArcView 3.2 allows you to customize the interface and functionality of the software according to your needs and preferences. You can adjust the settings and preferences of ArcView 3.2 by choosing Edit > Preferences or clicking the Preferences button. Some of the settings and preferences you can adjust include:
-
-
The appearance and behavior of the GUI elements, such as menus, toolbars, dialogs, windows, fonts, colors, etc.
-
The default locations and names of the project files, data files, scripts files, etc.
-
The units and formats of the coordinates, distances, areas, angles, dates, etc.
-
The options and parameters of the tools and commands, such as snapping tolerance, projection method, geocoding service, etc.
-
The extensions and scripts that are loaded at startup or available in the menus
-
-
Troubleshooting the common errors and issues
-
Even if you have installed ArcView 3.2 successfully on your 64 bit Windows system , you might still encounter some errors and issues when using it. Some of the common errors and issues that ArcView 3.2 users might face include:
-
-
Unable to open or save projects or data files
-
Unable to load or use extensions or scripts
-
Unable to display or edit data or maps
-
Unable to perform analysis or queries
-
Unable to generate charts, reports, or layouts
-
Unable to import or export data formats or sources
-
Crashing or freezing of the software
-
-
To troubleshoot these errors and issues, you might want to try some of the following steps:
-
Check if your system meets the minimum requirements for running ArcView 3.2.
Check if your ArcView 3.2 installation is complete and updated with the latest patches.
Check if your data files are valid and compatible with ArcView 3.2.
Check if your extensions and scripts are loaded and updated correctly.
Check if your settings and preferences are configured properly.
Check if your tools and commands are used correctly.
Check if your project files are backed up regularly.
Check if there are any conflicts or interferences with other software or hardware on your system.
Check if there are any solutions or workarounds available online for your specific error or issue.
-
If none of these steps help, you might want to contact Esri support or other ArcView 3.2 users online for further assistance.
-
Conclusion
-
In this article, we have shown you how to install and use ArcView 3.2 on 64 bit Windows with some simple solutions. We have also given you some tips and tricks for enhancing your experience and performance with the software, as well as some steps for troubleshooting the common errors and issues that you might encounter. We hope that this article has helped you to enjoy using ArcView 3.2 on your 64 bit Windows system for your GIS projects.
-
FAQs
-
Here are some frequently asked questions about ArcView 3.2 on 64 bit Windows:
-
Q: Can I use ArcView 3.2 on Windows 10?
-
A: Yes, you can use ArcView 3.2 on Windows 10 by following the same solutions that we have described in this article for installing and using it on 64 bit Windows.
-
Q: Can I use ArcView 3.2 with newer versions of Esri products?
-
A: Yes, you can use ArcView 3.2 with newer versions of Esri products, such as ArcGIS Desktop and ArcGIS Pro, by importing and exporting data formats that are compatible with both software. However, some features and functionalities might not be available or compatible between different software versions.
-
Q: Can I use ArcView 3.2 with other GIS software?
-
A: Yes, you can use ArcView 3.2 with other GIS software by importing and exporting data formats that are compatible with both software. However, some features and functionalities might not be available or compatible between different software.
-
Q: Can I use ArcView 3.2 with online GIS services?
-
A: Yes, you can use ArcView 3.2 with online GIS services by connecting to them through the Internet Map Server extension or other methods. However, some features and functionalities might not be available or compatible between different services.
-
Q: Can I use ArcView 3.2 with mobile devices?
-
A: No, you cannot use ArcView 3.2 with mobile devices directly because it is not designed for them. However, you can use other Esri products that are designed for mobile devices, such as ArcPad or ArcGIS Collector, and transfer data between them and ArcView 3.2 using compatible data formats.
-
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Torrent Download The Best Way to Stream or Download the Film.md b/spaces/raedeXanto/academic-chatgpt-beta/Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Torrent Download The Best Way to Stream or Download the Film.md
deleted file mode 100644
index bac11ee96d852ec23de6899de2d1093a50e0cd9b..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Torrent Download The Best Way to Stream or Download the Film.md
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Torrent Download
-
Are you a fan of Bollywood comedy movies? If yes, then you must have heard of Atithi Tum Kab Jaoge, a hilarious film that was released in 2010. The movie stars Ajay Devgn, Konkona Sen Sharma, and Paresh Rawal as a married couple who have to deal with an unwanted guest who overstays his welcome. The movie was a hit at the box office and received positive reviews from critics and audiences alike.
-
But what if you want to watch this movie again at your own convenience? Or what if you missed it when it was in theaters? Well, don't worry, because you can download Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites. This way, you can enjoy the movie in your preferred language, with high quality video and audio, and without spending any money or time.
-
Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Torrent Download
In this article, we will show you how to download Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites. We will also tell you about the benefits and risks of doing so, and give you some tips to avoid any problems. So, let's get started!
-
How to download Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio
-
Downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites is not very difficult. You just need to follow these simple steps:
-
Step 1: Find a reliable torrent site that has the movie file
-
The first thing you need to do is to find a torrent site that has the movie file you want. There are many torrent sites on the internet, but not all of them are trustworthy or safe. Some of them may have fake or malicious files that can harm your device or data. Therefore, you need to be careful and choose a reputable torrent site that has good ratings and reviews from other users.
-
Some of the popular torrent sites that may have Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio are:
-
Atithi Tum Kab Jaoge Full Movie Hindi 720p Dual Audio Torrent
-Download Atithi Tum Kab Jaoge Part 1 In Hindi HD Dual Audio
-Atithi Tum Kab Jaoge Part 1 Hindi Dubbed 720p Torrent Download
-Watch Atithi Tum Kab Jaoge Part 1 Online Free In Hindi Dual Audio
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Magnet Link
-How To Download Atithi Tum Kab Jaoge Part 1 In Hindi Dual Audio 720p
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p Torrentz2
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Filmywap
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p Kickass Torrent
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Moviesflix
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p YTS Torrent
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Worldfree4u
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p Extratorrent
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Khatrimaza
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p Limetorrents
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Bolly4u
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p RARBG Torrent
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Movierulz
-Atithi Tum Kab Jaoge Part 1 Hindi Dual Audio 720p EZTV Torrent
-Atithi Tum Kab Jaoge Part 1 In Hindi Download 720p Dual Audio Tamilrockers
-Atithi Tum Kab Jaoge Part 1 In Hindi Download HD Dual Audio Torrentking
-Atithi Tum Kab Jaoge Full Movie Download In Hindi Dual Audio HD Torrent
-Watch Online Atithi Tum Kab Jaoge Full Movie In Hindi Dual Audio HD
-Download Free Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio
-How To Watch Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Online
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Magnet Link Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Torrentz2 Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Filmywap Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Kickass Torrent Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Moviesflix Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio YTS Torrent Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Worldfree4u Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Extratorrent Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Khatrimaza Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Limetorrents Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Bolly4u Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio RARBG Torrent Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Movierulz Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio EZTV Torrent Download
-Atithi Tum Kab Jaoge Full Movie In Hindi HD Dual Audio Tamilrockers Download
-
-
The Pirate Bay: This is one of the oldest and most famous torrent sites on the internet. It has a huge collection of movies, TV shows, music, games, software, and more. You can search for Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio by typing it in the search bar or browsing through the categories.
-
Kickass Torrents: This is another well-known torrent site that has millions of users and files. It also has a user-friendly interface and a fast download speed. You can find Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio by using the search function or filtering by category, quality, size, etc.
-
LimeTorrents: This is a relatively new torrent site that has gained popularity among users. It has a clean and simple design and a large database of files. You can look for Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio by entering it in the search box or sorting by date, seeders, leechers, etc.
-
-
Once you find a torrent site that has the movie file you want, you need to check if it is verified and has enough seeders. A verified file means that it has been checked by other users and is safe to download. A seeder is someone who has downloaded the file and is sharing it with others. The more seeders a file has, the faster it will download.
-
Step 2: Download a torrent client software that can handle the file
-
The next thing you need to do is to download a torrent client software that can handle the file you want. A torrent client software is a program that allows you to download files from torrent sites using peer-to-peer technology. It connects you with other users who have the same file and lets you download parts of it from them.
-
There are many torrent client software available on the internet, but some of them are better than others. Some of them may have more features, faster speed, or better security than others. Therefore, you need to choose a reliable and efficient torrent client software that suits your needs.
-
Some of the popular torrent client software that you can use are:
-
-
uTorrent: This is one of the most widely used torrent client software on the internet. It has a small size, easy installation, and simple interface. It also has advanced options such as bandwidth control, encryption, magnet links, etc.
-
BitTorrent: This is another popular torrent client software that has similar features as uTorrent. It also has a built-in media player, antivirus protection, remote access, etc.
-
Vuze: This is a powerful torrent client software that has more features than uTorrent or BitTorrent. It also has a sleek interface, HD video support, streaming capabilities, VPN integration, etc.
-
-
Once you choose a torrent client software that you like, you need to download it from its official website and install it on your device.
-
Step 3: Open the torrent file with the torrent client and start downloading
-
The final thing you need to do is to open the torrent file with the torrent client and start downloading it. A torrent file is a small file that contains information about the larger file you want to download. It tells your torrent client where to find other users who have the same file and how to download it from them.
-
To open the torrent file with your torrent client software,
-
-
Go back to the torrent site where you found Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio.
-
Click on the download button or link next to the file name.
-
Select "Open with" option and choose your torrent client software from the list.
-
Your torrent client software will open automatically and show you some details about the file such as name, size, seeders, leechers,
-etc.
Click on "OK" or "Start" button to begin downloading.
-
-
Step 4: Wait for the download to finish and enjoy the movie
-
The last step is to wait for your download to finish and enjoy watching Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio on your device. Depending on your internet speed and number of seeders,
- the download time may vary from minutes to hours.
You can check your download progress on your torrent client software by looking at indicators such as percentage completed,
- download speed, estimated time remaining, etc.
You can also pause, resume,
Benefits of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio
-
Downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites has some benefits that you may not get from other sources. Here are some of them:
-
Benefit 1: You can watch the movie in your preferred language
-
One of the main benefits of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio is that you can watch the movie in your preferred language. Dual audio means that the movie file has two audio tracks: one in Hindi and one in another language, usually English. You can choose which audio track to play depending on your preference and understanding.
-
This way, you can enjoy the movie without missing any dialogue or jokes. You can also learn some new words or phrases from another language while watching the movie. For example, if you are a Hindi speaker who wants to improve your English skills, you can watch the movie with English audio and Hindi subtitles.
-
Benefit 2: You can enjoy the movie with high quality video and audio
-
Another benefit of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio is that you can enjoy the movie with high quality video and audio. Torrent sites usually offer different versions of the movie file with different resolutions and bitrates. Resolution refers to the number of pixels that make up the image on the screen, while bitrate refers to the amount of data that is transferred per second.
-
The higher the resolution and bitrate, the better the video and audio quality. For example, a 720p resolution means that the image has 1280 x 720 pixels, while a 1080p resolution means that it has 1920 x 1080 pixels. A higher resolution means more details and clarity in the image. Similarly, a higher bitrate means more sound information and richness in the audio.
-
By downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites, you can choose the version that suits your device and internet speed. You can watch the movie on your laptop, tablet, smartphone, or TV with crisp and clear video and audio.
-
Benefit 3: You can save money and time by avoiding going to the theater or buying DVDs
-
The third benefit of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites is that you can save money and time by avoiding going to the theater or buying DVDs. Going to the theater can be expensive and time-consuming, especially if you have to travel far or deal with traffic, parking, tickets, snacks, etc. Buying DVDs can also be costly and inconvenient, especially if they are not available in your region or language.
-
By downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites, you can watch the movie anytime and anywhere you want. You don't have to pay anything or wait for anything. You just need a device, an internet connection, and a torrent client software.
-
Risks of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio
-
However, downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites also has some risks that you should be aware of. Here are some of them:
-
Risk 1: You may encounter viruses, malware, or spyware that can harm your device or data
-
One of the main risks of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites is that you may encounter viruses, malware, or spyware that can harm your device or data. These are malicious programs that can infect your device or data and cause various problems such as slowing down your performance, deleting your files, stealing your information, displaying unwanted ads, etc.
-
Some torrent sites may have fake or corrupted files that contain these malicious programs. Some torrent files may also have hidden trackers that monitor your online activity and send it to third parties. Therefore, you need to be careful and cautious when downloading files from torrent sites. You should always check the file name, size, type, comments, ratings,
- and reviews before downloading it.
Risk 2: You may violate the copyright laws and face legal consequences
-
Another risk of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites is that you may violate the copyright laws and face legal consequences. Copyright laws are laws that protect the rights of creators and owners of original works such as movies, music,
- books, etc. They grant them exclusive rights to distribute, reproduce, display, perform, or modify their works.
By downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites,
- you are essentially copying and sharing their works without their permission. This is considered illegal and unethical, and may result in fines, lawsuits, or even jail time.
Some countries have strict copyright laws and enforcement agencies that monitor and punish online piracy.
- Some ISPs may also track your online activity and report it to authorities. Therefore, you need to be aware and respectful of the copyright laws and regulations in your country and region.
Risk 3: You may lose your internet connection or bandwidth due to excessive downloading
-
The third risk of downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites is that you may lose your internet connection or bandwidth due to excessive downloading. Downloading files from torrent sites requires a lot of data transfer between your device and other users who have the same file. This can consume a lot of your internet bandwidth,
- which is the amount of data that you can send or receive per second.
If you have a limited bandwidth plan,
- you may exceed your limit and incur extra charges. If you have an unlimited bandwidth plan, you may still experience slowdowns or interruptions in your internet connection due to network congestion or throttling. Throttling is when your ISP deliberately reduces your internet speed when you use too much bandwidth.
This can affect not only your download speed,
- but also your online activities such as browsing, streaming, gaming, etc. Therefore, you need to be mindful and moderate of your download frequency and size.
Tips to avoid risks and enhance your downloading experience
-
To avoid these risks and enhance your downloading experience,
- here are some tips that you can follow:
Tip 1: Use a VPN service to hide your IP address and location
-
A VPN service is a service that creates a secure tunnel between your device and a server located in another country.
- It encrypts your data and masks your IP address and location. This way, you can hide your online identity and activity from anyone who might be spying on you, such as hackers, ISPs, governments, etc. You can also access geo-restricted content that may not be available in your country or region. For example, if you want to watch Netflix US content, you can use a VPN service to connect to a US server and bypass Netflix's geo-blocking.
There are many VPN services available on the internet,
- but not all of them are trustworthy or safe. Some of them may have poor performance, weak encryption, or shady policies. Therefore, you need to choose a reliable and efficient VPN service that suits your needs. Some of the popular VPN services that you can use are:
-
ExpressVPN: This is one of the best VPN services on the internet.
-It has over 3000 servers in 94 countries, fast speed, strong encryption, no-logs policy, kill switch, split tunneling, and more. It also has a user-friendly interface and supports multiple devices and platforms. You can use it to download Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites without any hassle.
NordVPN: This is another excellent VPN service that has similar features as ExpressVPN.
-It has over 5400 servers in 59 countries, double VPN encryption, no-logs policy, kill switch, split tunneling, and more. It also has a sleek interface and supports multiple devices and platforms. You can use it to download Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites with ease.
Surfshark: This is a relatively new VPN service that has gained popularity among users.
-It has over 3200 servers in 65 countries, unlimited simultaneous connections, no-logs policy, kill switch, split tunneling, and more.
Tip 2: Use an antivirus software to scan and remove any potential threats
-
A antivirus software is a software that detects, blocks, and removes any viruses, malware, or spyware that can harm your device or data. These are malicious programs that can infect your device or data and cause various problems such as slowing down your performance, deleting your files, stealing your information, displaying unwanted ads, etc.
-
Some torrent sites may have fake or corrupted files that contain these malicious programs. Some torrent files may also have hidden trackers that monitor your online activity and send it to third parties. Therefore, you need to use an antivirus software to scan and remove any potential threats before and after downloading files from torrent sites.
-
There are many antivirus software available on the internet, but not all of them are trustworthy or effective. Some of them may have poor performance, weak detection, or shady policies. Therefore, you need to choose a reliable and efficient antivirus software that suits your needs.
-
Some of the popular antivirus software that you can use are:
-
-
Avast Free Antivirus: This is one of the best antivirus software on the internet. It has over 400 million users worldwide and offers world-class protection against viruses and other malware. It also secures your Wi-Fi network and strengthens your privacy.
-
Bitdefender Antivirus Free Edition: This is another excellent antivirus software that has a simple and lightweight design. It offers powerful protection against all types of threats with minimal impact on your system resources.
-
Kaspersky Security Cloud Free: This is a relatively new antivirus software that has a cloud-based approach. It offers real-time protection against viruses, ransomware, phishing, and more with adaptive security features.
-
-
Once you choose an antivirus software that you like, you need to download it from its official website and install it on your device.
-
Tip 3: Choose a reputable torrent site and a verified torrent file
-
The last tip to avoid risks and enhance your downloading experience is to choose a reputable torrent site and a verified torrent file. A reputable torrent site is a torrent site that has good ratings and reviews from other users and offers high-quality and safe files. A verified torrent file is a torrent file that has been checked by other users and is safe to download.
-
There are many torrent sites on the internet, but not all of them are trustworthy or safe. Some of them may have fake or malicious files that can harm your device or data. Therefore, you need to be careful and choose a reputable torrent site that has good ratings and reviews from other users.
-
Some of the popular torrent sites that may have Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio are:
-
-
The Pirate Bay: This is one of the oldest and most famous torrent sites on the internet. It has a huge collection of movies, TV shows, music, games, software, and more. You can search for Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio by typing it in the search bar or browsing through the categories.
-
Kickass Torrents: This is another well-known torrent site that has millions of users and files. It also has a user-friendly interface and a fast download speed. You can find Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio by using the search function or filtering by category, quality, size, etc.
-
LimeTorrents: This is a relatively new torrent site that has gained popularity among users. It has a clean and simple design and a large database of files. You can look for Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio by entering it in the search box or sorting by date, seeders, leechers, etc.
-
-
Once you find a torrent site that has the movie file you want,
- you need to check if it is verified and has enough seeders. A verified file means that it has been checked by other users and is safe to download. A seeder is someone who has downloaded the file and is sharing it with others. The more seeders a file has, the faster it will download.
Conclusion
-
In conclusion,
- downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites is a great way to watch this hilarious movie in your preferred language, with high quality video and audio, and without spending any money or time. However, it also comes with some risks that you should be aware of, such as viruses, malware, spyware, copyright violations, and internet issues. To avoid these risks and enhance your downloading experience, you should follow these tips:
-
Use a VPN service to hide your IP address and location
-
Use an antivirus software to scan and remove any potential threats
-
Choose a reputable torrent site and a verified torrent file
-
-
We hope this article was helpful for you
- and gave you some useful information about downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites. If you have any questions, comments, or feedback, please feel free to share them with us. Thank you for reading!
Frequently Asked Questions
-
Here are some frequently asked questions about downloading Atithi Tum Kab Jaoge Part 1 in Hindi with dual audio from torrent sites:
-
Q: What is Atithi Tum Kab Jaoge?
-
A: Atithi Tum Kab Jaoge is a 2010 Bollywood comedy film directed by Ashwni Dhir and starring Ajay Devgn,
- Konkona Sen Sharma, and Paresh Rawal. The film tells the story of a married couple whose lives are disrupted by the arrival of an unwanted guest who overstays his welcome.
Q: What is dual audio?
-
A: Dual audio means that the movie file has two audio tracks: one in Hindi and one in another language,
- usually English. You can choose which audio track to play depending on your preference and understanding.
Q: What is a torrent site?
-
A: A torrent site is a website that hosts files that can be downloaded using peer-to-peer technology.
- It connects you with other users who have the same file and lets you download parts of it from them.
Q: What is a VPN service?
-
A: A VPN service is a service that creates a secure tunnel between your device and a server located in another country.
- It encrypts your data and masks your IP address and location. This way, you can hide your online identity and activity from anyone who might be spying on you, such as hackers, ISPs, governments, etc. You can also access geo-restricted content that may not be available in your country or region.
Q: What is an antivirus software?
-
A: An antivirus software is a software that detects,
- blocks, and removes any viruses, malware, or spyware that can harm your device or data. These are malicious programs that can infect your device or data and cause various problems such as slowing down your performance, deleting your files, stealing your information, displaying unwanted ads, etc. 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Chori Chori 1 Full Movie In English Hd 1080p.md b/spaces/raedeXanto/academic-chatgpt-beta/Chori Chori 1 Full Movie In English Hd 1080p.md
deleted file mode 100644
index b2e7fd2041840d675ee47b39f333bc39c097e814..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Chori Chori 1 Full Movie In English Hd 1080p.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
Chori Chori: A Romantic Comedy with a Twist
-
Chori Chori is a 2003 Hindi-language romantic comedy film directed by Milan Luthria and starring Ajay Devgn, Rani Mukerji and Sonali Bendre. The film is a remake of the 1991 Hollywood film Housesitter, starring Steve Martin and Goldie Hawn.
-
The plot revolves around Khushi (Mukerji), a free-spirited girl who lies her way into the life of Ranbir (Devgn), a successful architect who is engaged to Pooja (Bendre). Khushi pretends to be Ranbir's wife and moves into his dream house in a hill station, where she wins over his family and friends. However, things get complicated when Ranbir and Pooja arrive at the house and discover Khushi's deception.
Chori Chori is a light-hearted film that blends romance, comedy and drama. The film showcases the chemistry between Devgn and Mukerji, who have worked together in several films before. The film also features some melodious songs composed by Sajid-Wajid and lyrics by Sameer. The film was released on 1 August 2003 and received positive reviews from critics and audiences alike.
-
If you are looking for a fun and entertaining movie to watch with your loved ones, you can stream Chori Chori online in HD quality with English subtitles on Bilibili[^1^]. Enjoy the movie and don't forget to share your feedback with us!
Chori Chori is not the only film that Devgn and Mukerji have starred in together. They have also shared screen space in films like Kuch Khatti Kuch Meethi (2001), Chalte Chalte (2003), Yuva (2004) and Omkara (2006). They have been praised for their versatile acting skills and their ability to portray different characters and emotions.
-
Mukerji, who made her debut in 1997 with Raja Ki Aayegi Baraat, is one of the most popular and acclaimed actresses in Bollywood. She has won several awards, including seven Filmfare Awards, for her performances in films like Kuch Kuch Hota Hai (1998), Saathiya (2002), Black (2005), Bunty Aur Babli (2005) and No One Killed Jessica (2011). She is also known for her humanitarian work and her support for various causes.
-
Devgn, who started his career in 1991 with Phool Aur Kaante, is also one of the most successful and respected actors in the industry. He has won four National Film Awards and two Filmfare Awards for his roles in films like Zakhm (1998), The Legend of Bhagat Singh (2002), Singham (2011) and Tanhaji: The Unsung Warrior (2020). He is also a director and producer, who has helmed films like U Me Aur Hum (2008), Shivaay (2016) and Mayday (2022).
Bendre is also a cancer survivor, who was diagnosed with metastatic cancer in 2018. She underwent treatment in New York and returned to India in 2019. She has been vocal about her journey and her positive outlook on life. She has also written a book titled The Golden Hour, which chronicles her experience of battling cancer and finding joy in every moment.
-
Chori Chori is a film that celebrates love, life and laughter. It is a film that will make you smile, cry and cheer for the characters. It is a film that you can watch with your family and friends and have a great time. So don't miss this opportunity to watch Chori Chori online in HD quality with English subtitles on Bilibili. And don't forget to share your feedback with us!
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Fsdreamteam Gsx For Fsx Serial Number.md b/spaces/raedeXanto/academic-chatgpt-beta/Fsdreamteam Gsx For Fsx Serial Number.md
deleted file mode 100644
index aefe7e8641a726c660f909e406e5a561d95fcf6f..0000000000000000000000000000000000000000
--- a/spaces/raedeXanto/academic-chatgpt-beta/Fsdreamteam Gsx For Fsx Serial Number.md
+++ /dev/null
@@ -1,196 +0,0 @@
-
-
FSDreamTeam GSX for FSX Serial Number: How to Get It and Use It
-
If you are a fan of flight simulation games, you might have heard of FSDreamTeam GSX, a popular add-on for Microsoft Flight Simulator X (FSX) that enhances your ground services experience. But what is FSDreamTeam GSX exactly, and how can you get and use a serial number for it? In this article, we will answer these questions and more.
FSDreamTeam GSX (short for Ground Services X) is a software add-on that simulates realistic ground services for your aircraft in FSX. It adds various features and animations to make your airport operations more immersive and realistic, such as:
-
Features of FSDreamTeam GSX
-
-
Customizable pushback with sound and voice
-
Animated passengers and crew boarding and deboarding
-
Animated catering and fuel trucks
-
Animated baggage loaders and carts
-
Animated marshalling agents and follow-me cars
-
Animated jetways and stairs
-
Dynamic lighting and shadows
-
Custom sound effects and ambient noises
-
Support for third-party aircraft and sceneries
-
And much more!
-
-
Requirements for FSDreamTeam GSX
-
To use FSDreamTeam GSX, you need to have Microsoft Flight Simulator X installed on your PC. You also need to have the following system requirements:
-
-
Windows 7 or higher (64-bit recommended)
-
Intel Core 2 Duo or AMD equivalent processor (quad-core recommended)
-
4 GB of RAM (8 GB recommended)
-
1 GB of free hard disk space
-
NVIDIA GeForce 8800 or ATI Radeon HD 3870 or higher graphics card (2 GB VRAM recommended)
-
DirectX 10 or higher compatible sound card
-
Internet connection for activation and updates
-
-
What is a Serial Number and Why Do You Need It?
-
A serial number is a unique code that identifies your copy of FSDreamTeam GSX. It is usually composed of letters and numbers, such as XXXX-XXXX-XXXX-XXXX. You need a serial number to activate your FSDreamTeam GSX add-on and enjoy its full features.
-
How a Serial Number Works
-
A serial number works as a proof of purchase and ownership of your FSDreamTeam GSX add-on. When you buy FSDreamTeam GSX from the official website or a trusted reseller, you will receive a serial number via email. You will then need to enter this serial number during the installation or activation process of FSDreamTeam GSX. The serial number will be verified online by the FSDreamTeam server, and if it is valid, you will be able to use FSDreamTeam GSX without any limitations.
-
Benefits of Having a Serial Number
-
Having a valid serial number for your FSDreamTeam GSX add-on has several benefits, such as:
-
-
You can use FSDreamTeam GSX on any computer that meets the system requirements.
-
You can reinstall FSDreamTeam GSX as many times as you want without losing your activation.
-
You can update FSDreamTeam GSX to the latest version for free.
-
You can access the official support forum and get help from the developers and other users.
-
You can support the developers and help them create more quality products.
-
-
Risks of Using a Fake or Invalid Serial Number
-
Using a fake or invalid serial number for your FSDreamTeam GSX add-on has several risks, such as:
-
-
You may not be able to use FSDreamTeam GSX at all or only with limited features.
-
You may encounter errors, bugs, crashes, or performance issues with FSDreamTeam GSX.
-
You may expose your computer to viruses, malware, or spyware that may harm your system or steal your personal information.
-
You may violate the terms of service and end-user license agreement of FSDreamTeam GSX and face legal consequences.
-
You may miss out on the latest updates, features, improvements, and fixes of FSDreamTeam GSX.
-
You may not be able to access the official support forum or get any help from the developers or other users.
-
You may discourage the developers from creating more quality products.
-
-
How to Get a FSDreamTeam GSX Serial Number?
-
If you want to get a serial number for your FSDreamTeam GSX add-on, you have three main options:
-
Buying from the Official Website
-
The easiest and safest way to get a serial number for your FSDreamTeam GSX add-on is to buy it from the official website: https://www.fsdreamteam.com/products_gsx.html. Here are the steps to follow:
-
fsdreamteam gsx for fsx activation code
-fsdreamteam gsx for fsx crack download
-fsdreamteam gsx for fsx license key
-fsdreamteam gsx for fsx product key
-fsdreamteam gsx for fsx registration code
-fsdreamteam gsx for fsx keygen free
-fsdreamteam gsx for fsx serial number generator
-fsdreamteam gsx for fsx serial number lookup
-fsdreamteam gsx for fsx serial number recovery
-fsdreamteam gsx for fsx serial number email
-fsdreamteam gsx for fsx serial number faq
-fsdreamteam gsx for fsx serial number forum
-fsdreamteam gsx for fsx serial number support
-fsdreamteam gsx for fsx serial number self-help
-fsdreamteam gsx for fsx serial number deactivate
-how to get fsdreamteam gsx for fsx serial number
-how to find fsdreamteam gsx for fsx serial number
-how to retrieve fsdreamteam gsx for fsx serial number
-how to activate fsdreamteam gsx for fsx serial number
-how to register fsdreamteam gsx for fsx serial number
-how to use fsdreamteam gsx for fsx serial number
-how to enter fsdreamteam gsx for fsx serial number
-how to install fsdreamteam gsx for fsx serial number
-how to update fsdreamteam gsx for fsx serial number
-how to reset fsdreamteam gsx for fsx serial number
-where to buy fsdreamteam gsx for fsx serial number
-where to download fsdreamteam gsx for fsx serial number
-where to find my old email with my original GSX Serial Number[^1^]
-where is the FAQ page of FSDreamTeam site[^1^] [^2^]
-where is the Self-Help Site of FSDreamTeam[^2^]
-what is the difference between GSX and GSX L2[^1^]
-what is the procedure to retrieve my existing Serial Numbers[^3^]
-what is the procedure to deactivate my Serial Numbers[^2^]
-what is the Couatl error in GSX[^1^]
-what are the system requirements for GSX
-what are the features of GSX
-what are the airports supported by GSX
-what are the latest updates of GSX
-what are the best settings for GSX
-what are some tips and tricks for GSX
-why do I need a Serial Number for GSX[^1^]
-why do I need to register both GSX and GSX L2[^1^]
-why do I get a panic log when using GSX[^1^]
-why do I get a trial mode when using GSX[^1^]
-why do I get a different retailer when buying GSX[^3^]
-how does GSX work with P3Dv4[^1^]
-how does GSX work with LatinVFR airports[^1^]
-how does GSX work with Project Airbus aircrafts[^1^]
-how does GSX work with SODE jetways
-how does GSX work with AI traffic
-how does GSX work with multiplayer sessions
-how does GSX work with custom sceneries
-
-
Go to the official website and click on the "Buy Now" button.
-
Select your preferred payment method (PayPal, credit card, etc.) and enter your personal details.
-
Confirm your order and complete the payment process.
-
Check your email inbox for an email from sales@fsdreamteam.com with your serial number.
-
If you don't receive an email within 24 hours, check your spam folder or contact support@fsdreamteam.com.
-
-
The price of FSDreamTeam GSX is $34 USD (excluding taxes). You can also buy it as part of a bundle with other products from FSDreamTeam at a discounted price.
-
Buying from a Trusted Reseller
-
The second option to get a serial number for your FSDreamTeam GSX add-on is to buy it from a trusted reseller. A trusted reseller is an authorized online store that sells legitimate copies of FSDreamTeam products. Some examples of trusted resellers are:
The steps to buy from a trusted reseller are similar to buying from the official website. You just need to go to their website, select FSDreamTeam GSX, enter your payment details, confirm your order, and wait for an email with your serial number. The price may vary depending on the reseller, but it should be around $34 USD (excluding taxes).
-
Using a Key Generator or a Crack
-
A crack is a software that modifies or bypasses the activation process of FSDreamTeam GSX. You can find key generators and cracks on various websites, forums, or torrents. However, we strongly advise you not to use them for the following reasons:
-
-
They are illegal and unethical. You are stealing from the developers who spent time and money to create FSDreamTeam GSX.
-
They are unreliable and risky. You may not find a serial number that works with FSDreamTeam GSX, or you may lose your activation after an update. You may also damage your system or expose it to viruses, malware, or spyware.
-
They are unsupported and outdated. You will not be able to access the official support forum or get any help from the developers or other users. You will also miss out on the latest updates, features, improvements, and fixes of FSDreamTeam GSX.
-
-
Therefore, we recommend you to avoid using key generators or cracks and buy a legitimate serial number for your FSDreamTeam GSX add-on from the official website or a trusted reseller.
-
How to Use a FSDreamTeam GSX Serial Number?
-
Once you have a valid serial number for your FSDreamTeam GSX add-on, you can use it to install and activate FSDreamTeam GSX on your computer. Here are the steps to follow:
Unzip the file and run the installer as administrator.
-
Follow the instructions on the screen and select your FSX installation folder.
-
When prompted, enter your serial number and click on "Register".
-
Wait for the installation to complete and close the installer.
-
-
Activating FSDreamTeam GSX
-
-
Launch FSX and select any airport and aircraft.
-
Press Ctrl+F12 to open the FSDreamTeam GSX menu.
-
Select "Customize Airport Positions" and click on "Activate Product".
-
Enter your serial number again and click on "Register".
-
Wait for the activation to complete and close the menu.
-
-
Congratulations! You have successfully installed and activated FSDreamTeam GSX on your computer. You can now enjoy its features and animations by using the Ctrl+F12 menu or the default FSX keys (Shift+E+2 for jetways, Shift+D for pushback, etc.).
-
Troubleshooting Common Issues
-
If you encounter any issues with FSDreamTeam GSX, such as:
-
-
FSDreamTeam GSX does not appear in FSX.
-
FSDreamTeam GSX does not recognize your serial number.
-
FSDreamTeam GSX does not work with some airports or aircraft.
-
FSDreamTeam GSX causes errors, bugs, crashes, or performance issues with FSX.
-
-
You can try the following solutions:
-
-
Make sure you have installed and activated FSDreamTeam GSX correctly.
-
Make sure you have entered your serial number correctly (case-sensitive, no spaces).
-
Make sure you have an internet connection for activation and updates.
-
Make sure you have updated FSDreamTeam GSX to the latest version using the Live Update feature.
-
Make sure you have updated FSX to the latest version (Service Pack 2 or Acceleration).
-
Make sure you have installed any required third-party libraries or dependencies (such as SODE).
-
Make sure you have configured your FSX settings properly (such as scenery complexity, traffic density, etc.).
-
Make sure you have disabled any conflicting add-ons or programs (such as antivirus software).
-
Make sure you have run FSX and FSDreamTeam GSX as administrator.
In this article, we have explained what FSDreamTeam GSX is, what a serial number is and why you need it, how to get a serial number for FSDreamTeam GSX, and how to use it. We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
Here are some frequently asked questions about FSDreamTeam GSX and its serial number:
-
-
Q: Can I use FSDreamTeam GSX with other flight simulators besides FSX?
-
A: Yes, you can use FSDreamTeam GSX with Prepar3D (P3D) versions 1 to 5. However, you need to buy a separate license for each simulator. You can also use FSDreamTeam GSX with Microsoft Flight Simulator 2020 (MSFS 2020), but it is still in beta testing and has limited features.
-
Q: Can I use one serial number for multiple computers?
-
A: Yes, you can use one serial number for up to two computers that belong to you. However, you cannot use one serial number for more than two computers or share it with other people.
-
Q: Can I transfer my serial number to another computer?
-
A: Yes, you can transfer your serial number to another computer by deactivating it from your old computer and activating it on your new computer. To deactivate your serial number, go to the FSDreamTeam GSX menu in FSX and select "Deactivate Product". To activate your serial number on your new computer, follow the steps in this article.
-
Q: Can I get a refund if I don't like FSDreamTeam GSX?
-
and PHNL Honolulu Intl.).
-
Q: Can I customize FSDreamTeam GSX to my preferences?
-
A: Yes, you can customize FSDreamTeam GSX to your preferences by using the "Customize Airport Positions" and "Customize Aircraft Configuration" options in the FSDreamTeam GSX menu. You can also use the "GSX Editor" tool to create your own custom ground services for any airport or aircraft.
-
Q: Can I request new features or improvements for FSDreamTeam GSX?
-
A: Yes, you can request new features or improvements for FSDreamTeam GSX by posting your suggestions on the official support forum: https://www.fsdreamteam.com/forum/index.php/board,50.0.html. The developers are always open to feedback and may consider your ideas for future updates.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/text/cleaners.py b/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/text/cleaners.py
deleted file mode 100644
index a7d4e029baa436e88e4d68090e886afdd998a68d..0000000000000000000000000000000000000000
--- a/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/text/cleaners.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import re
-
-from unidecode import unidecode
-from .numbers import normalize_numbers
-
-
-
-
-# Regular expression matching whitespace:
-_whitespace_re = re.compile(r"\s+")
-
-def lowercase(text):
- return text.lower()
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, " ", text)
-
-def basic_indic_cleaners(text):
- """Basic pipeline that collapses whitespace without transliteration."""
- text = collapse_whitespace(text)
- return text
-
-
-def english_cleaner(text):
- text = text.lower().replace('‘','\'').replace('’','\'')
- return text
-
-
-def lowercase(text):
- return text.lower()
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-def expand_numbers(text):
- return normalize_numbers(text)
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'missus'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
- ('pvt', 'private'),
- ('rs', 'Rupees')
-]]
-
-
-
-
-
-
-def english_cleaners(text):
- '''Pipeline for English text, including number and abbreviation expansion.'''
- text = convert_to_ascii(text)
- text = lowercase(text)
- text = expand_numbers(text)
- text = expand_abbreviations(text)
- text = collapse_whitespace(text)
- return text
diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/voice_encoder.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/voice_encoder.py
deleted file mode 100644
index 88cdee2de76b72db58c5dd19a888597e0fe12fbb..0000000000000000000000000000000000000000
--- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/voice_encoder.py
+++ /dev/null
@@ -1,173 +0,0 @@
-from speaker_encoder.hparams import *
-from speaker_encoder import audio
-from pathlib import Path
-from typing import Union, List
-from torch import nn
-from time import perf_counter as timer
-import numpy as np
-import torch
-
-
-class SpeakerEncoder(nn.Module):
- def __init__(self, weights_fpath, device: Union[str, torch.device]=None, verbose=True):
- """
- :param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda").
- If None, defaults to cuda if it is available on your machine, otherwise the model will
- run on cpu. Outputs are always returned on the cpu, as numpy arrays.
- """
- super().__init__()
-
- # Define the network
- self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
- self.linear = nn.Linear(model_hidden_size, model_embedding_size)
- self.relu = nn.ReLU()
-
- # Get the target device
- if device is None:
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- elif isinstance(device, str):
- device = torch.device(device)
- self.device = device
-
- # Load the pretrained model'speaker weights
- # weights_fpath = Path(__file__).resolve().parent.joinpath("pretrained.pt")
- # if not weights_fpath.exists():
- # raise Exception("Couldn't find the voice encoder pretrained model at %s." %
- # weights_fpath)
-
- start = timer()
- checkpoint = torch.load(weights_fpath, map_location="cpu")
-
- self.load_state_dict(checkpoint["model_state"], strict=False)
- self.to(device)
-
- if verbose:
- print("Loaded the voice encoder model on %s in %.2f seconds." %
- (device.type, timer() - start))
-
- def forward(self, mels: torch.FloatTensor):
- """
- Computes the embeddings of a batch of utterance spectrograms.
- :param mels: a batch of mel spectrograms of same duration as a float32 tensor of shape
- (batch_size, n_frames, n_channels)
- :return: the embeddings as a float 32 tensor of shape (batch_size, embedding_size).
- Embeddings are positive and L2-normed, thus they lay in the range [0, 1].
- """
- # Pass the input through the LSTM layers and retrieve the final hidden state of the last
- # layer. Apply a cutoff to 0 for negative values and L2 normalize the embeddings.
- _, (hidden, _) = self.lstm(mels)
- embeds_raw = self.relu(self.linear(hidden[-1]))
- return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
-
- @staticmethod
- def compute_partial_slices(n_samples: int, rate, min_coverage):
- """
- Computes where to split an utterance waveform and its corresponding mel spectrogram to
- obtain partial utterances of each. Both the waveform and the
- mel spectrogram slices are returned, so as to make each partial utterance waveform
- correspond to its spectrogram.
-
- The returned ranges may be indexing further than the length of the waveform. It is
- recommended that you pad the waveform with zeros up to wav_slices[-1].stop.
-
- :param n_samples: the number of samples in the waveform
- :param rate: how many partial utterances should occur per second. Partial utterances must
- cover the span of the entire utterance, thus the rate should not be lower than the inverse
- of the duration of a partial utterance. By default, partial utterances are 1.6s long and
- the minimum rate is thus 0.625.
- :param min_coverage: when reaching the last partial utterance, it may or may not have
- enough frames. If at least of are present,
- then the last partial utterance will be considered by zero-padding the audio. Otherwise,
- it will be discarded. If there aren't enough frames for one partial utterance,
- this parameter is ignored so that the function always returns at least one slice.
- :return: the waveform slices and mel spectrogram slices as lists of array slices. Index
- respectively the waveform and the mel spectrogram with these slices to obtain the partial
- utterances.
- """
- assert 0 < min_coverage <= 1
-
- # Compute how many frames separate two partial utterances
- samples_per_frame = int((sampling_rate * mel_window_step / 1000))
- n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
- frame_step = int(np.round((sampling_rate / rate) / samples_per_frame))
- assert 0 < frame_step, "The rate is too high"
- assert frame_step <= partials_n_frames, "The rate is too low, it should be %f at least" % \
- (sampling_rate / (samples_per_frame * partials_n_frames))
-
- # Compute the slices
- wav_slices, mel_slices = [], []
- steps = max(1, n_frames - partials_n_frames + frame_step + 1)
- for i in range(0, steps, frame_step):
- mel_range = np.array([i, i + partials_n_frames])
- wav_range = mel_range * samples_per_frame
- mel_slices.append(slice(*mel_range))
- wav_slices.append(slice(*wav_range))
-
- # Evaluate whether extra padding is warranted or not
- last_wav_range = wav_slices[-1]
- coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
- if coverage < min_coverage and len(mel_slices) > 1:
- mel_slices = mel_slices[:-1]
- wav_slices = wav_slices[:-1]
-
- return wav_slices, mel_slices
-
- def embed_utterance(self, wav: np.ndarray, return_partials=False, rate=1.3, min_coverage=0.75):
- """
- Computes an embedding for a single utterance. The utterance is divided in partial
- utterances and an embedding is computed for each. The complete utterance embedding is the
- L2-normed average embedding of the partial utterances.
-
- TODO: independent batched version of this function
-
- :param wav: a preprocessed utterance waveform as a numpy array of float32
- :param return_partials: if True, the partial embeddings will also be returned along with
- the wav slices corresponding to each partial utterance.
- :param rate: how many partial utterances should occur per second. Partial utterances must
- cover the span of the entire utterance, thus the rate should not be lower than the inverse
- of the duration of a partial utterance. By default, partial utterances are 1.6s long and
- the minimum rate is thus 0.625.
- :param min_coverage: when reaching the last partial utterance, it may or may not have
- enough frames. If at least of are present,
- then the last partial utterance will be considered by zero-padding the audio. Otherwise,
- it will be discarded. If there aren't enough frames for one partial utterance,
- this parameter is ignored so that the function always returns at least one slice.
- :return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
- is True, the partial utterances as a numpy array of float32 of shape
- (n_partials, model_embedding_size) and the wav partials as a list of slices will also be
- returned.
- """
- # Compute where to split the utterance into partials and pad the waveform with zeros if
- # the partial utterances cover a larger range.
- wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
- max_wave_length = wav_slices[-1].stop
- if max_wave_length >= len(wav):
- wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
-
- # Split the utterance into partials and forward them through the model
- mel = audio.wav_to_mel_spectrogram(wav)
- mels = np.array([mel[s] for s in mel_slices])
- with torch.no_grad():
- mels = torch.from_numpy(mels).to(self.device)
- partial_embeds = self(mels).cpu().numpy()
-
- # Compute the utterance embedding from the partial embeddings
- raw_embed = np.mean(partial_embeds, axis=0)
- embed = raw_embed / np.linalg.norm(raw_embed, 2)
-
- if return_partials:
- return embed, partial_embeds, wav_slices
- return embed
-
- def embed_speaker(self, wavs: List[np.ndarray], **kwargs):
- """
- Compute the embedding of a collection of wavs (presumably from the same speaker) by
- averaging their embedding and L2-normalizing it.
-
- :param wavs: list of wavs a numpy arrays of float32.
- :param kwargs: extra arguments to embed_utterance()
- :return: the embedding as a numpy array of float32 of shape (model_embedding_size,).
- """
- raw_embed = np.mean([self.embed_utterance(wav, return_partials=False, **kwargs) \
- for wav in wavs], axis=0)
- return raw_embed / np.linalg.norm(raw_embed, 2)
\ No newline at end of file
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Ice Age 3 Full Movie In Hindi.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Ice Age 3 Full Movie In Hindi.md
deleted file mode 100644
index 95e29366be4fef3a9c4a7341fabb6c9d1245ace4..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Ice Age 3 Full Movie In Hindi.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Beini Wpa2 Dictionary Learn How to Crack WiFi Passwords with This Wordlist.md b/spaces/rorallitri/biomedical-language-models/logs/Beini Wpa2 Dictionary Learn How to Crack WiFi Passwords with This Wordlist.md
deleted file mode 100644
index 0e810f2d1d57d039d59ac278d45fb47857b65390..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Beini Wpa2 Dictionary Learn How to Crack WiFi Passwords with This Wordlist.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
Hello Admin, i want ask u. i using feeding bottle by beini and using it by usb bootloader. i already download the dictionary. but i dont know how to find the dictionary file in the bootloader. hope u can teach me. I not expert person. tq
Hello admin, I want to ask u. I already download dictionary. I am extracted it on my desktop but when i use ( aircrack-ng -w wordllist.text -b E8:94:F6:5B:C3:21 crack-wpa-01.cap) after this its show # aircrack-ng -w wordlist.text -b E8:94:F6:5B:C3:21 crack-wpa-01.capFopen(dictionary) failed : no such file or directory Opnening crack-wpa-01.cap hope u help me thanks
-
Hi, somebody, I am using Linux and i have yet to do crack wpa2. but,i use a rockyou.txt file is not working or no enough for me. cant found key for wpa2. so, how can i do? need ext that file or need download another big dict once?my rock you.txt is only 133 mb and i need to known enough or not. if need to ext, tell me how to do and need download another, teach me where can i get it every one.
-
There are just two small problems here buddy 1: In order to create this big dictionary with 9989999999 this many combinations it make take u several days or rather a month atleast 2: If u manage to get enough time also then u will need only some 4000 terrabyetes of space to store them 3: when u want to actually use this password dictionary make sure u write the date u started on some stone so that when after 200 or m2000 years later you will get the password u would be able to remember , if u managed to survive. hehehe
-
I hadn't ventured into Hackforums since a while, and this time when I went there I saw a thread about a script called Fluxion. It's based on another script called linset (actually it's no much different from linset, think of it as an improvement, with some bug fixes and additional options). I did once think about (and was asked in a comment about) using something like a man in the middle attack/ evil twin attack to get WPA password instead of going the bruteforce/dictionary route, but never looked the idea up on the internet nor spent much time pondering over it. However, once I saw the thread about this cool script, I decided to give it a try. So in this post I'll show you how I used Fluxion, and how you can too.
-
-
Wireless networks use encryption to protect the data they carry against eavesdropping and malicious modifications. However, legacy encryption protocols (like WEP) are vulnerable to attack, and even secure protocols can be cracked using brute-force and dictionary-based attacks. Several different tools exist for cracking the passwords securing Wi-Fi networks.
-
Aircrack-ng is a popular wireless password-cracking tool. It starts by capturing wireless network packets, then attempts to crack the network password by analyzing them. Aircrack-ng supports FMS, PTW, Korek and other attacks against WEP passwords. Aircrack-ng can also use dictionary attacks to guess passwords for WPA, WPA2 and WPA3 Wi-Fi networks.
-
Fern Wifi Wireless Cracker is designed to crack WEP/WPA/WPA/WPA2 keys on Wi-Fi networks. It accomplishes this through a variety of different attacks including exploitation of vulnerable protocols, phishing attacks, brute-force and dictionary-based password guessing attacks.
-
One of the most used password pentesting method is password dictionary attack. In this case, the cracking tool sequentially checks all possible passwords stored in special files called password dictionary.
-
RockYou (/usr/share/wordlists/rockyou) is the most popular pentest dictionary for any business. It can also be used for WiFi, but I recommend that you first clean up inappropriate passwords using the same pw-inspector.
-
Password attacks can be divided into two large groups: a hash attack and an attempt to pick up a password for authentication. We will not dwell on their characteristics in detail. Since password dictionary attack is possible in both groups.
-
it seems rockyou is the best Password dictionary for now, if you tried all the files on the list above then i don't have anymore to recommend now , but soon a new dictionary should arrive and it will be the best , a dictionary based on linkedin passwords
-
Hello, i am using kali linux in vm i have already downloaded dictionary in my windows folder can u suggest me a way how can i find that in linux (vm), or is there any process which needs to be done.. Thank you in adv
-
im using kali on the raspberry pi 3, mostly the same, but doesnt have the wordlist file. ive tried about a dozen different lists now and all come back saying "passphrase not in dictionary" 3/0 keys tested immediately after i put the command in. Using aircrack, have the WPA handshake and all
-
Wi-Fi Protected Access (WPA/WPA2) The authentication methodology is the same on both, a dictionary attack must be deployed otherwise forget about it (in case of brute-force/dictionary attack failure)
-
Airmon-ng start wlan0 airodump-ng mon0 copy BSSID and CHANNEL New Client airodump-ng -w wep -c channel -bssid INPUT mon0 New Client aireplay-ng -1 0 -a INPUT mon0 New client aireplay-ng -3 -b INPUT mon0 final aircrack-ng wep-01.cap airmon-ng start wlan0 airodump-ng mon0 copy BSSID and CHANNEL New Client airodump-ng -w wep -c channel -bssid INPUT mon0 New Client aireplay-ng -1 0 -a INPUT mon0 New client aireplay-ng -3 -b INPUT mon0 final aircrack-ng wep-01.cap.The code are sometime need some changes. Why i'm not suggest to use backtrack. File is very big-last time I download Backtrack 5.0 the size about 1.06Gb. I have tested Backtrack 4.2 release and luckily I get to crack WEP just after learning for few days. Waste my time for cracking WPA - what I understands it needs Dictionary that contains about all words + characters + numbers so it can run dictionary attacks so you'll need a complete dictionary to crack WPA.
-
Just thought i would share the link for those who are looking for a decent list to pen test their networks.
The list contains 982,963,904 words exactly no dupes and all optimized for wpa/wpa2. Would also just like to point out that this is not my work, instead it was a guy who compiled a whole load of useful lists, including his own to come up with 2 lists (one is 11gb and one is 2gb) i will be seeding this torrent indefinitely since it is shareware! 20mb up!
INFO
-
What a great program! I hate saying this-- but it runs in windows perfectly! I loathe windows now but it's the only OS that can run my awesome zeroday new NVIDA card with hella CUDA cores. Before that program everyone was using pyrit for the cuda functionality and as I said, it's a pain. aircrack-ng is not going to handle the massive dictionary files you want to use. Download ocl-hashcat and read some tutorials about how to use it to crack your hccap file with your super duper "Super WPA" file you grabbed from this post. Another note, the .rar. file in this post is (I think) made with rar v3 or something, I couldnt get a successful extraction with 7zip. You'll need to download unrar.exe command line tool (for windows, not DOS) from the winrar labs site. Beware of any other RAR programs as they all seem to be bundled with malware these days. You can also safely extract it in linux.
-
It takes me about 6 hours to run through the SuperWPA dictionary at 44k hashes a second using a $400 video card. That's actually pretty crappy performance for such an expensive card compared to some of the speeds I've seen posted online, but it's aight. Anyway, good luck. you'll need it.
-
Hak5 Fan ++ you very welcome, glad it helped you out. .. For a point of reference, i noticed a lot of people banging on about rainbow tables, in my experience thy are a complete waste of time, bandwidth and hd space !. .. I saw one that was about 34GB in size and only had around 1000 words and was compiled ageist around 100 DEFAULT essids such as netgear, linksys ...etc .. witch is stupid because default set wpa keys are random chars anywhere from 8 - 12 chrs long, so no dictionary in world would crack them. (usually can use reaver to crack pin tho :) ) . so thing to bare in mind is .. custom essid = custom wpa key.! so much more the likely-hood of cracking them. :)
-
I have been using reaver to brute-force attack on my WPA/WPA2 connection , But i seem to have a problem , The WPS pin cannot be found , It stops searching for a PIN at a specific place. Why is this happening ? And by the way i am using reaver from BEINI OS , Using Minidwep-gtk. I have searched for the WPA/WPA2 handshake and i've got it but i'm not sure if it really has a PSK or not because i tried cracking it using Cloudcracker and so far unsuccessful. I've tried with the 1.2 billion dictionary word list and i was unsuccessful. The router i am using for the WiFi is a Belkin 3bb9 router which is known for it's security standards. So my question is how do i fix this outcome for a positive one and what should i try?
-
I do not use Kali, I use BEINI , And i used the Minidwep-gtk on beini. So Mainly the processes are automated not much modification can be possible.(Which means that i have to push like 2-3 buttons and sit back and watch the airmon-ng screen processes.Yes, Wifite and fern as you have mentioned are available on Kali linux, Which i do not use.
-
And many users i have consulted have said that MINIDWEP-GTK on tinycore linux (BEINI) was very effective. But since my password is a non-dictionary passphrase, It's hard. And also, and and -password-profiling-with.html. Which one do you recommend and how do i install this on BEINI minidwep-gtk , Using root or online?
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rorallitri/biomedical-language-models/logs/FULL Adobe Photoshop Lightroom Classic CC 2018 7.2.0.10 (x64) Crack.md b/spaces/rorallitri/biomedical-language-models/logs/FULL Adobe Photoshop Lightroom Classic CC 2018 7.2.0.10 (x64) Crack.md
deleted file mode 100644
index 9fd046f97d5ef5c05f6bfd3e0c13f3149e536e8d..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/FULL Adobe Photoshop Lightroom Classic CC 2018 7.2.0.10 (x64) Crack.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
FULL Adobe Photoshop Lightroom Classic CC 2018 7.2.0.10 (x64) Crack
-
-
-
diff --git a/spaces/samuelinferences/TabPFN/TabPFN/scripts/transformer_prediction_interface.py b/spaces/samuelinferences/TabPFN/TabPFN/scripts/transformer_prediction_interface.py
deleted file mode 100644
index 54f29c4f0fe537c74fa12650593aaed2c5468ab7..0000000000000000000000000000000000000000
--- a/spaces/samuelinferences/TabPFN/TabPFN/scripts/transformer_prediction_interface.py
+++ /dev/null
@@ -1,357 +0,0 @@
-import torch
-import random
-
-from torch.utils.checkpoint import checkpoint
-
-from utils import normalize_data, to_ranking_low_mem, remove_outliers
-from priors.utils import normalize_by_used_features_f
-from utils import NOP
-
-from sklearn.preprocessing import PowerTransformer, QuantileTransformer, RobustScaler
-
-from notebook_utils import CustomUnpickler
-
-import numpy as np
-from sklearn.base import BaseEstimator, ClassifierMixin
-from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
-from sklearn.utils.multiclass import check_classification_targets
-from sklearn.utils import column_or_1d
-from pathlib import Path
-from model_builder import load_model
-import os
-
-def load_model_workflow(i, e, add_name, base_path, device='cpu', eval_addition=''):
- """
- Workflow for loading a model and setting appropriate parameters for diffable hparam tuning.
-
- :param i:
- :param e:
- :param eval_positions_valid:
- :param add_name:
- :param base_path:
- :param device:
- :param eval_addition:
- :return:
- """
- def check_file(e):
- model_file = f'models_diff/prior_diff_real_checkpoint{add_name}_n_{i}_epoch_{e}.cpkt'
- model_path = os.path.join(base_path, model_file)
- # print('Evaluate ', model_path)
- results_file = os.path.join(base_path,
- f'models_diff/prior_diff_real_results{add_name}_n_{i}_epoch_{e}_{eval_addition}.pkl')
- if not Path(model_path).is_file(): # or Path(results_file).is_file():
- return None, None, None
- return model_file, model_path, results_file
-
- model_file = None
- if e == -1:
- for e_ in range(100, -1, -1):
- model_file_, model_path_, results_file_ = check_file(e_)
- if model_file_ is not None:
- e = e_
- model_file, model_path, results_file = model_file_, model_path_, results_file_
- break
- else:
- model_file, model_path, results_file = check_file(e)
-
- if model_file is None:
- print('No checkpoint found')
- return None
-
- print(f'Loading {model_file}')
-
- model, c = load_model(base_path, model_file, device, eval_positions=[], verbose=False)
-
- return model, c, results_file
-
-
-class TabPFNClassifier(BaseEstimator, ClassifierMixin):
-
- def __init__(self, device='cpu', base_path='.'):
- # Model file specification (Model name, Epoch)
- model_string = ''
- i, e = '8x_lr0.0003', -1
-
- # File which contains result of hyperparameter tuning run: style (i.e. hyperparameters) and a dataframe with results.
- style_file = 'prior_tuning_result.pkl'
-
- model, c, results_file = load_model_workflow(i, e, add_name=model_string, base_path=base_path, device=device,
- eval_addition='')
- style, temperature = self.load_result_minimal(style_file, i, e, base_path=base_path)
-
- self.device = device
- self.base_path = base_path
- self.model = model
- self.c = c
- self.style = style
- self.temperature = temperature
-
- self.max_num_features = self.c['num_features']
- self.max_num_classes = self.c['max_num_classes']
-
- def load_result_minimal(self, path, i, e, base_path='.'):
- with open(os.path.join(base_path,path), 'rb') as output:
- _, _, _, style, temperature, optimization_route = CustomUnpickler(output).load()
-
- return style, temperature
-
- def fit(self, X, y):
- # Check that X and y have correct shape
- X, y = check_X_y(X, y)
- y = self._validate_targets(y)
-
- self.X_ = X
- self.y_ = y
-
- if X.shape[1] > self.max_num_features:
- raise ValueError("The number of features for this classifier is restricted to ", self.max_num_features)
- if len(np.unique(y)) > self.max_num_classes:
- raise ValueError("The number of classes for this classifier is restricted to ", self.max_num_classes)
-
- # Return the classifier
- return self
-
- def _validate_targets(self, y):
- y_ = column_or_1d(y, warn=True)
- check_classification_targets(y)
- cls, y = np.unique(y_, return_inverse=True)
- if len(cls) < 2:
- raise ValueError(
- "The number of classes has to be greater than one; got %d class"
- % len(cls)
- )
-
- self.classes_ = cls
-
- return np.asarray(y, dtype=np.float64, order="C")
-
- def predict_proba(self, X):
- # Check is fit had been called
- check_is_fitted(self)
-
- # Input validation
- X = check_array(X)
-
- X_full = np.concatenate([self.X_, X], axis=0)
- X_full = torch.tensor(X_full, device=self.device).float().unsqueeze(1)
- y_full = np.concatenate([self.y_, self.y_[0] + np.zeros_like(X[:, 0])], axis=0)
- y_full = torch.tensor(y_full, device=self.device).float().unsqueeze(1)
-
- eval_pos = self.X_.shape[0]
-
- prediction = transformer_predict(self.model[2], X_full, y_full, eval_pos,
- device=self.device,
- style=self.style,
- inference_mode=True,
- N_ensemble_configurations=10,
- softmax_temperature=self.temperature
- , **get_params_from_config(self.c))
- prediction_ = prediction.squeeze(0)
-
- return prediction_.detach().cpu().numpy()
-
- def predict(self, X, return_winning_probability=False):
- p = self.predict_proba(X)
- y = np.argmax(self.predict_proba(X), axis=-1)
- y = self.classes_.take(np.asarray(y, dtype=np.intp))
- if return_winning_probability:
- return y, p.max(axis=-1)
- return y
-
-def transformer_predict(model, eval_xs, eval_ys, eval_position,
- device='cpu',
- max_features=100,
- style=None,
- inference_mode=False,
- num_classes=2,
- extend_features=True,
- normalize_to_ranking=False,
- softmax_temperature=0.0,
- multiclass_decoder='permutation',
- preprocess_transform='mix',
- categorical_feats=[],
- feature_shift_decoder=True,
- N_ensemble_configurations=10,
- average_logits=True,
- normalize_with_sqrt=False, **kwargs):
- """
-
- :param model:
- :param eval_xs:
- :param eval_ys: should be classes that are 0-indexed and every class until num_classes-1 is present
- :param eval_position:
- :param rescale_features:
- :param device:
- :param max_features:
- :param style:
- :param inference_mode:
- :param num_classes:
- :param extend_features:
- :param normalize_to_ranking:
- :param softmax_temperature:
- :param multiclass_decoder:
- :param preprocess_transform:
- :param categorical_feats:
- :param feature_shift_decoder:
- :param N_ensemble_configurations:
- :param average_logits:
- :param normalize_with_sqrt:
- :param metric_used:
- :return:
- """
- num_classes = len(torch.unique(eval_ys))
-
- def predict(eval_xs, eval_ys, used_style, softmax_temperature, return_logits):
- # Initialize results array size S, B, Classes
-
- inference_mode_call = torch.inference_mode() if inference_mode else NOP()
- with inference_mode_call:
- output = model(
- (used_style.repeat(eval_xs.shape[1], 1) if used_style is not None else None, eval_xs, eval_ys.float()),
- single_eval_pos=eval_position)[:, :, 0:num_classes]
-
- output = output[:, :, 0:num_classes] / torch.exp(softmax_temperature)
- if not return_logits:
- output = torch.nn.functional.softmax(output, dim=-1)
- #else:
- # output[:, :, 1] = model((style.repeat(eval_xs.shape[1], 1) if style is not None else None, eval_xs, eval_ys.float()),
- # single_eval_pos=eval_position)
-
- # output[:, :, 1] = torch.sigmoid(output[:, :, 1]).squeeze(-1)
- # output[:, :, 0] = 1 - output[:, :, 1]
-
- #print('RESULTS', eval_ys.shape, torch.unique(eval_ys, return_counts=True), output.mean(axis=0))
-
- return output
-
- def preprocess_input(eval_xs, preprocess_transform):
- import warnings
-
- if eval_xs.shape[1] > 1:
- raise Exception("Transforms only allow one batch dim - TODO")
- if preprocess_transform != 'none':
- if preprocess_transform == 'power' or preprocess_transform == 'power_all':
- pt = PowerTransformer(standardize=True)
- elif preprocess_transform == 'quantile' or preprocess_transform == 'quantile_all':
- pt = QuantileTransformer(output_distribution='normal')
- elif preprocess_transform == 'robust' or preprocess_transform == 'robust_all':
- pt = RobustScaler(unit_variance=True)
-
- # eval_xs, eval_ys = normalize_data(eval_xs), normalize_data(eval_ys)
- eval_xs = normalize_data(eval_xs)
-
- # Removing empty features
- eval_xs = eval_xs[:, 0, :].cpu().numpy()
- sel = [len(np.unique(eval_xs[0:eval_ys.shape[0], col])) > 1 for col in range(eval_xs.shape[1])]
- eval_xs = np.array(eval_xs[:, sel])
-
- warnings.simplefilter('error')
- if preprocess_transform != 'none':
- feats = set(range(eval_xs.shape[1])) if 'all' in preprocess_transform else set(
- range(eval_xs.shape[1])) - set(categorical_feats)
- for col in feats:
- try:
- pt.fit(eval_xs[0:eval_ys.shape[0], col:col + 1])
- trans = pt.transform(eval_xs[:, col:col + 1])
- # print(scipy.stats.spearmanr(trans[~np.isnan(eval_xs[:, col:col+1])], eval_xs[:, col:col+1][~np.isnan(eval_xs[:, col:col+1])]))
- eval_xs[:, col:col + 1] = trans
- except:
- pass
- warnings.simplefilter('default')
-
- eval_xs = torch.tensor(eval_xs).float().unsqueeze(1).to(device)
-
- # eval_xs = normalize_data(eval_xs)
-
- # TODO: Cautian there is information leakage when to_ranking is used, we should not use it
- eval_xs = remove_outliers(eval_xs) if not normalize_to_ranking else normalize_data(to_ranking_low_mem(eval_xs))
-
- # Rescale X
- eval_xs = normalize_by_used_features_f(eval_xs, eval_xs.shape[-1], max_features,
- normalize_with_sqrt=normalize_with_sqrt)
- return eval_xs.detach()
-
- eval_xs, eval_ys = eval_xs.to(device), eval_ys.to(device)
- eval_ys = eval_ys[:eval_position]
-
- model.to(device)
- style = style.to(device)
-
- model.eval()
-
- import itertools
- style = style.unsqueeze(0) if len(style.shape) == 1 else style
- num_styles = style.shape[0]
- styles_configurations = range(0, num_styles)
- preprocess_transform_configurations = [preprocess_transform if i % 2 == 0 else 'none' for i in range(0, num_styles)]
- if preprocess_transform == 'mix':
- def get_preprocess(i):
- if i == 0:
- return 'power_all'
- if i == 1:
- return 'robust_all'
- if i == 2:
- return 'none'
- preprocess_transform_configurations = [get_preprocess(i) for i in range(0, num_styles)]
- styles_configurations = zip(styles_configurations, preprocess_transform_configurations)
-
- feature_shift_configurations = range(0, eval_xs.shape[2]) if feature_shift_decoder else [0]
- class_shift_configurations = range(0, len(torch.unique(eval_ys))) if multiclass_decoder == 'permutation' else [0]
-
- ensemble_configurations = list(itertools.product(styles_configurations, feature_shift_configurations, class_shift_configurations))
- random.shuffle(ensemble_configurations)
- ensemble_configurations = ensemble_configurations[0:N_ensemble_configurations]
-
- output = None
-
- eval_xs_transformed = {}
- for ensemble_configuration in ensemble_configurations:
- (styles_configuration, preprocess_transform_configuration), feature_shift_configuration, class_shift_configuration = ensemble_configuration
-
- style_ = style[styles_configuration:styles_configuration+1, :]
- softmax_temperature_ = softmax_temperature[styles_configuration]
-
- eval_xs_, eval_ys_ = eval_xs.clone(), eval_ys.clone()
-
- if preprocess_transform_configuration in eval_xs_transformed:
- eval_xs_ = eval_xs_transformed['preprocess_transform_configuration'].clone()
- else:
- eval_xs_ = preprocess_input(eval_xs_, preprocess_transform=preprocess_transform_configuration)
- eval_xs_transformed['preprocess_transform_configuration'] = eval_xs_
-
- eval_ys_ = ((eval_ys_ + class_shift_configuration) % num_classes).float()
-
- eval_xs_ = torch.cat([eval_xs_[..., feature_shift_configuration:],eval_xs_[..., :feature_shift_configuration]],dim=-1)
-
- # Extend X
- if extend_features:
- eval_xs_ = torch.cat(
- [eval_xs_,
- torch.zeros((eval_xs_.shape[0], eval_xs_.shape[1], max_features - eval_xs_.shape[2])).to(device)], -1)
-
- #preprocess_transform_ = preprocess_transform if styles_configuration % 2 == 0 else 'none'
- import warnings
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", message="None of the inputs have requires_grad=True. Gradients will be None")
- output_ = checkpoint(predict, eval_xs_, eval_ys_, style_, softmax_temperature_, True)
- output_ = torch.cat([output_[..., class_shift_configuration:],output_[..., :class_shift_configuration]],dim=-1)
-
- #output_ = predict(eval_xs, eval_ys, style_, preprocess_transform_)
- if not average_logits:
- output_ = torch.nn.functional.softmax(output_, dim=-1)
- output = output_ if output is None else output + output_
-
- output = output / len(ensemble_configurations)
- if average_logits:
- output = torch.nn.functional.softmax(output, dim=-1)
-
- output = torch.transpose(output, 0, 1)
-
- return output
-
-def get_params_from_config(c):
- return {'max_features': c['num_features']
- , 'rescale_features': c["normalize_by_used_features"]
- , 'normalize_to_ranking': c["normalize_to_ranking"]
- , 'normalize_with_sqrt': c.get("normalize_with_sqrt", False)
- }
\ No newline at end of file
diff --git a/spaces/santrox/phcspmedpredic/app.py b/spaces/santrox/phcspmedpredic/app.py
deleted file mode 100644
index 37a3590fae04d4071598ef2be2ced1c9fbc3a608..0000000000000000000000000000000000000000
--- a/spaces/santrox/phcspmedpredic/app.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import gradio as gr
-from gradio.mix import Parallel
-modelPneumonia = gr.Interface.load("huggingface/nickmuchi/vit-finetuned-chest-xray-pneumonia",
- title="Pneumonia Prediction",
- description="Predict Pneumonia from XRay Image"
- )
-modelPneumonia.close()
-modelPneumonia.launch()
\ No newline at end of file
diff --git a/spaces/scedlatioru/img-to-music/The-Dark-Knight-Rises-Hindi-Movie-720p-BEST.md b/spaces/scedlatioru/img-to-music/The-Dark-Knight-Rises-Hindi-Movie-720p-BEST.md
deleted file mode 100644
index eb719397c1871636e50263cb0b0ca06f6d7fb273..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/The-Dark-Knight-Rises-Hindi-Movie-720p-BEST.md
+++ /dev/null
@@ -1,36 +0,0 @@
-The Dark Knight Rises Hindi Movie 720p
-
-
-
-LINK >>> [https://ekporriola.blogspot.com/?c=2tvDMY](https://ekporriola.blogspot.com/?c=2tvDMY)
-
-
-
-
-
-
-
-
-
-The Dark Knight Rises: A Thrilling Conclusion to the Batman Trilogy
-The Dark Knight Rises is the third and final installment of the Batman trilogy directed by Christopher Nolan. The film stars Christian Bale as Bruce Wayne/Batman, Anne Hathaway as Selina Kyle/Catwoman, Tom Hardy as Bane, and Joseph Gordon-Levitt as John Blake. The film was released in 2012 and received critical acclaim and commercial success.
-The Dark Knight Rises follows the events of The Dark Knight, where Batman took the blame for the death of Harvey Dent and went into hiding. Eight years later, Gotham City is at peace under the Dent Act, but a new threat emerges in the form of Bane, a masked terrorist who plans to destroy Gotham with a nuclear bomb. Batman must come out of retirement and team up with Catwoman and Blake to stop Bane and his allies.
-The Dark Knight Rises is a gripping and epic conclusion to the Batman saga, featuring stunning action sequences, emotional performances, and a satisfying resolution. The film explores themes of heroism, sacrifice, redemption, and legacy. The film also pays homage to the previous films in the trilogy, such as Batman Begins and The Dark Knight.
-If you are a fan of Batman or superhero movies in general, you should not miss The Dark Knight Rises. You can watch it in Hindi dubbed version with 720p quality on usa\uE000movie\uE001.ga[^1^]. Enjoy!Here are some more details about The Dark Knight Rises:
-
-The film was shot in various locations around the world, such as Pittsburgh, New York, Los Angeles, London, Glasgow, and India.
-The film features several references and Easter eggs to the Batman comics and other media, such as the Batcave, the Batpod, the Batwing, the Bat-Signal, and Robin.
-The film also features several cameo appearances by actors from previous Batman films, such as Michael Caine as Alfred Pennyworth, Gary Oldman as James Gordon, Morgan Freeman as Lucius Fox, Liam Neeson as Ra's al Ghul, and Cillian Murphy as Jonathan Crane/Scarecrow.
-The film was nominated for eight Academy Awards, including Best Picture, Best Director, Best Adapted Screenplay, Best Cinematography, Best Original Score, Best Sound Editing, Best Sound Mixing, and Best Visual Effects.
-The film grossed over $1 billion worldwide, making it the seventh-highest-grossing film of all time and the second-highest-grossing film of 2012.
-
-The Dark Knight Rises is a masterpiece of filmmaking that will leave you breathless and amazed. Don't miss this opportunity to watch it in Hindi dubbed version with 720p quality on usa\uE000movie\uE001.ga. You won't regret it!Here are some more reasons why you should watch The Dark Knight Rises:
-
-The film has a brilliant cast of actors who deliver powerful and memorable performances. Christian Bale portrays Batman as a broken and haunted hero who must overcome his fears and doubts to save Gotham. Anne Hathaway brings charm and wit to Catwoman, a complex and mysterious character who has her own agenda. Tom Hardy is menacing and formidable as Bane, a ruthless and intelligent villain who challenges Batman physically and mentally. Joseph Gordon-Levitt is inspiring and loyal as Blake, a young police officer who believes in Batman's legacy.
-The film has a captivating and intricate plot that keeps you on the edge of your seat. The film has many twists and turns that will surprise and shock you. The film also has a lot of emotional moments that will make you laugh, cry, and cheer. The film balances action, drama, humor, and romance in a perfect way.
-The film has a spectacular and immersive visual style that creates a realistic and gritty world of Gotham. The film uses practical effects and minimal CGI to create stunning scenes of destruction, chaos, and heroism. The film also has a beautiful and haunting score by Hans Zimmer that enhances the mood and atmosphere of the film.
-
-The Dark Knight Rises is a cinematic masterpiece that you should not miss. It is the ultimate Batman movie that will leave you satisfied and amazed. Watch it now in Hindi dubbed version with 720p quality on usa\uE000movie\uE001.ga. You will love it! dfd1c89656
-
-
-
diff --git a/spaces/scedlatioru/img-to-music/example/Tudo O Que Voce Queria Ouvir - O Melhor Dos GNR.md b/spaces/scedlatioru/img-to-music/example/Tudo O Que Voce Queria Ouvir - O Melhor Dos GNR.md
deleted file mode 100644
index 261b273ff816e4721e87daf22b91b0510613962e..0000000000000000000000000000000000000000
--- a/spaces/scedlatioru/img-to-music/example/Tudo O Que Voce Queria Ouvir - O Melhor Dos GNR.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-O grupo GNR volta a Coruche dez anos depois da sua última actuação na vila. ... “Eu quero que você me aqueça neste Inverno/E que tudo o mais vá p'ró ... mil pessoas para ver e ouvir a banda que recentemente havia lançado o álbum ... os GNR lançaram este ano “O melhor dos GNR: Continuação vol. 4d29de3e1b
-
-
-
diff --git a/spaces/sczhou/CodeFormer/CodeFormer/facelib/parsing/parsenet.py b/spaces/sczhou/CodeFormer/CodeFormer/facelib/parsing/parsenet.py
deleted file mode 100644
index e178ebe43a1ef666aaea0bc0faf629485c22a24f..0000000000000000000000000000000000000000
--- a/spaces/sczhou/CodeFormer/CodeFormer/facelib/parsing/parsenet.py
+++ /dev/null
@@ -1,194 +0,0 @@
-"""Modified from https://github.com/chaofengc/PSFRGAN
-"""
-import numpy as np
-import torch.nn as nn
-from torch.nn import functional as F
-
-
-class NormLayer(nn.Module):
- """Normalization Layers.
-
- Args:
- channels: input channels, for batch norm and instance norm.
- input_size: input shape without batch size, for layer norm.
- """
-
- def __init__(self, channels, normalize_shape=None, norm_type='bn'):
- super(NormLayer, self).__init__()
- norm_type = norm_type.lower()
- self.norm_type = norm_type
- if norm_type == 'bn':
- self.norm = nn.BatchNorm2d(channels, affine=True)
- elif norm_type == 'in':
- self.norm = nn.InstanceNorm2d(channels, affine=False)
- elif norm_type == 'gn':
- self.norm = nn.GroupNorm(32, channels, affine=True)
- elif norm_type == 'pixel':
- self.norm = lambda x: F.normalize(x, p=2, dim=1)
- elif norm_type == 'layer':
- self.norm = nn.LayerNorm(normalize_shape)
- elif norm_type == 'none':
- self.norm = lambda x: x * 1.0
- else:
- assert 1 == 0, f'Norm type {norm_type} not support.'
-
- def forward(self, x, ref=None):
- if self.norm_type == 'spade':
- return self.norm(x, ref)
- else:
- return self.norm(x)
-
-
-class ReluLayer(nn.Module):
- """Relu Layer.
-
- Args:
- relu type: type of relu layer, candidates are
- - ReLU
- - LeakyReLU: default relu slope 0.2
- - PRelu
- - SELU
- - none: direct pass
- """
-
- def __init__(self, channels, relu_type='relu'):
- super(ReluLayer, self).__init__()
- relu_type = relu_type.lower()
- if relu_type == 'relu':
- self.func = nn.ReLU(True)
- elif relu_type == 'leakyrelu':
- self.func = nn.LeakyReLU(0.2, inplace=True)
- elif relu_type == 'prelu':
- self.func = nn.PReLU(channels)
- elif relu_type == 'selu':
- self.func = nn.SELU(True)
- elif relu_type == 'none':
- self.func = lambda x: x * 1.0
- else:
- assert 1 == 0, f'Relu type {relu_type} not support.'
-
- def forward(self, x):
- return self.func(x)
-
-
-class ConvLayer(nn.Module):
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size=3,
- scale='none',
- norm_type='none',
- relu_type='none',
- use_pad=True,
- bias=True):
- super(ConvLayer, self).__init__()
- self.use_pad = use_pad
- self.norm_type = norm_type
- if norm_type in ['bn']:
- bias = False
-
- stride = 2 if scale == 'down' else 1
-
- self.scale_func = lambda x: x
- if scale == 'up':
- self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
-
- self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2)))
- self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
-
- self.relu = ReluLayer(out_channels, relu_type)
- self.norm = NormLayer(out_channels, norm_type=norm_type)
-
- def forward(self, x):
- out = self.scale_func(x)
- if self.use_pad:
- out = self.reflection_pad(out)
- out = self.conv2d(out)
- out = self.norm(out)
- out = self.relu(out)
- return out
-
-
-class ResidualBlock(nn.Module):
- """
- Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
- """
-
- def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
- super(ResidualBlock, self).__init__()
-
- if scale == 'none' and c_in == c_out:
- self.shortcut_func = lambda x: x
- else:
- self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
-
- scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
- scale_conf = scale_config_dict[scale]
-
- self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
- self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
-
- def forward(self, x):
- identity = self.shortcut_func(x)
-
- res = self.conv1(x)
- res = self.conv2(res)
- return identity + res
-
-
-class ParseNet(nn.Module):
-
- def __init__(self,
- in_size=128,
- out_size=128,
- min_feat_size=32,
- base_ch=64,
- parsing_ch=19,
- res_depth=10,
- relu_type='LeakyReLU',
- norm_type='bn',
- ch_range=[32, 256]):
- super().__init__()
- self.res_depth = res_depth
- act_args = {'norm_type': norm_type, 'relu_type': relu_type}
- min_ch, max_ch = ch_range
-
- ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
- min_feat_size = min(in_size, min_feat_size)
-
- down_steps = int(np.log2(in_size // min_feat_size))
- up_steps = int(np.log2(out_size // min_feat_size))
-
- # =============== define encoder-body-decoder ====================
- self.encoder = []
- self.encoder.append(ConvLayer(3, base_ch, 3, 1))
- head_ch = base_ch
- for i in range(down_steps):
- cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
- self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
- head_ch = head_ch * 2
-
- self.body = []
- for i in range(res_depth):
- self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
-
- self.decoder = []
- for i in range(up_steps):
- cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
- self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
- head_ch = head_ch // 2
-
- self.encoder = nn.Sequential(*self.encoder)
- self.body = nn.Sequential(*self.body)
- self.decoder = nn.Sequential(*self.decoder)
- self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
- self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
-
- def forward(self, x):
- feat = self.encoder(x)
- x = feat + self.body(feat)
- x = self.decoder(x)
- out_img = self.out_img_conv(x)
- out_mask = self.out_mask_conv(x)
- return out_mask, out_img
diff --git a/spaces/seanbenhur/tamilatis/tamilatis/trainer.py b/spaces/seanbenhur/tamilatis/tamilatis/trainer.py
deleted file mode 100644
index 147ab764e297e5d220a91536de67c4ed8b4f26dd..0000000000000000000000000000000000000000
--- a/spaces/seanbenhur/tamilatis/tamilatis/trainer.py
+++ /dev/null
@@ -1,284 +0,0 @@
-import logging
-import os
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import wandb
-from torchmetrics.functional import accuracy, f1_score, precision, recall
-from tqdm import tqdm, trange
-
-logger = logging.getLogger(__name__)
-
-
-class ATISTrainer:
- """A Trainer class consists of utitlity functions for training the model"""
- def __init__(
- self,
- model,
- optimizer,
- scheduler,
- criterion,
- accelerate,
- output_dir,
- num_labels,
- num_intents,
- run
- ):
- self.model = model
- self.criterion = criterion
- self.optimizer = optimizer
- self.scheduler = scheduler
- self.accelerator = accelerate
- self.output_dir = output_dir
- self.num_labels = num_labels
- self.num_intents = num_intents
-
- if not os.path.exists(self.output_dir):
- os.makedirs(self.output_dir)
-
- self.run = run
- logging.info(f"Strating Training, outputs are saved in {self.output_dir}")
-
- def train_step(self, iterator):
- training_progress_bar = tqdm(iterator, desc="training")
- for batch in training_progress_bar:
- input_ids, attention_mask, labels, intents = (
- batch["input_ids"],
- batch["attention_mask"],
- batch["labels"],
- batch["intent"],
- )
- self.optimizer.zero_grad()
- loss_dict = self.model(input_ids, attention_mask, labels)
- slot_logits, intent_logits, slot_loss = (
- loss_dict["dst_logits"],
- loss_dict["intent_loss"],
- loss_dict["dst_loss"],
- )
-
- # compute training accuracy for slots
- flattened_target_labels = batch["labels"].view(
- -1
- ) # [batch_size * seq_len, ]
- active_logits = slot_logits.view(
- -1, self.num_labels
- ) # [batch_size* seq_len, num_labels]
- flattened_preds = torch.argmax(
- active_logits, axis=-1
- ) # [batch_size * seq_len,]
-
- # compute accuracy at active labels
- active_accuracy = (
- batch["labels"].view(-1) != -100
- ) # [batch_size * seq_len, ]
-
- slot_labels = torch.masked_select(flattened_target_labels, active_accuracy)
- slot_preds = torch.masked_select(flattened_preds, active_accuracy)
-
- # compute loss for intents
- #use rlw
- intent_loss = self.criterion(intent_logits, batch["intent"])
- weight = F.softmax(torch.randn(1), dim=-1) # RLW is only this!
- intent_loss = torch.sum(intent_loss*weight.cuda())
- intent_preds = torch.argmax(intent_logits, axis=1)
- train_loss = slot_loss + intent_loss
- self.accelerator.backward(train_loss)
- self.optimizer.step()
-
- if self.scheduler is not None:
- if not self.accelerator.optimizer_step_was_skipped:
- self.scheduler.step()
-
- if self.scheduler is not None:
- self.scheduler.step()
-
- intent_acc = accuracy(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
- intent_f1 = f1_score(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
- intent_rec = recall(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
- intent_prec = precision(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
-
- slot_acc = accuracy(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
- slot_f1 = f1_score(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
- slot_rec = recall(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
- slot_prec = precision(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
-
- self.run.log(
- {
- "train_loss_step": train_loss.cpu().detach().numpy(),
- "train_intent_acc_step": intent_acc,
- "train_intent_f1_step": intent_f1,
- "train_slot_acc_step": slot_acc,
- "train_slot_f1_step": slot_f1,
- }
- )
- # logging.info({"train_loss_step": train_loss, "train_intent_acc_step": intent_acc, "train_intent_f1_step": intent_f1, "train_slot_acc_step": slot_acc, "train_slot_f1_step": slot_f1 })
-
- return {
- "train_loss_epoch": train_loss / len(iterator),
- "train_intent_f1_epoch": intent_f1 / len(iterator),
- "train_intent_acc_epoch": intent_acc / len(iterator),
- "train_slot_f1_epoch": slot_f1 / len(iterator),
- "train_slot_acc_epoch": slot_acc / len(iterator),
- }
-
- @torch.no_grad()
- def eval_step(self, iterator):
- eval_progress_bar = tqdm(iterator, desc="Evaluating")
- for batch in eval_progress_bar:
- input_ids, attention_mask, labels, intents = (
- batch["input_ids"],
- batch["attention_mask"],
- batch["labels"],
- batch["intent"],
- )
- loss_dict = self.model(input_ids, attention_mask, labels)
- slot_logits, intent_logits, slot_loss = (
- loss_dict["dst_logits"],
- loss_dict["intent_loss"],
- loss_dict["dst_loss"],
- )
- # compute training accuracy for slots
- flattened_target_labels = batch["labels"].view(
- -1
- ) # [batch_size * seq_len, ]
- active_logits = slot_logits.view(
- -1, self.num_labels
- ) # [batch_size* seq_len, num_labels]
- flattened_preds = torch.argmax(
- active_logits, axis=-1
- ) # [batch_size * seq_len,]
-
- # compute accuracy at active labels
- active_accuracy = (
- batch["labels"].view(-1) != -100
- ) # [batch_size * seq_len, ]
-
- slot_labels = torch.masked_select(flattened_target_labels, active_accuracy)
- slot_preds = torch.masked_select(flattened_preds, active_accuracy)
-
- # compute loss for intents
- intent_loss = self.criterion(intent_logits, batch["intent"])
- weight = F.softmax(torch.randn(1), dim=-1) # RLW is only this!
- intent_loss = torch.sum(intent_loss*weight.cuda())
-
- intent_preds = torch.argmax(intent_logits, axis=1)
- eval_loss = slot_loss + intent_loss
-
- intent_acc = accuracy(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
- intent_f1 = f1_score(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
- intent_rec = recall(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
- intent_prec = precision(
- intent_preds, intents, num_classes=self.num_intents, average="weighted"
- )
-
- slot_acc = accuracy(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
- slot_f1 = f1_score(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
- slot_rec = recall(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
- slot_prec = precision(
- slot_preds, slot_labels, num_classes=self.num_labels, average="weighted"
- )
-
- self.run.log(
- {
- "eval_loss_step": eval_loss,
- "eval_intent_acc_step": intent_acc,
- "eval_intent_f1_step": intent_f1,
- "eval_slot_acc_step": slot_acc,
- "eval_slot_f1_step": slot_f1,
- }
- )
-
- return {
- "eval_loss_epoch": eval_loss / len(iterator),
- "eval_intent_f1_epoch": intent_f1 / len(iterator),
- "eval_intent_acc_epoch": intent_acc / len(iterator),
- "eval_slot_f1_epoch": slot_f1 / len(iterator),
- "eval_slot_acc_epoch": slot_acc / len(iterator),
- }
-
- def fit(self, n_epochs, train_dataloader, eval_dataloader, patience):
- best_eval_loss = float("inf")
- pbar = trange(n_epochs)
-
- for epoch in pbar:
- train_metrics_dict = self.train_step(train_dataloader)
- eval_metrics_dict = self.eval_step(eval_dataloader)
- # access all the values from the dicts
- train_loss, eval_loss = (
- train_metrics_dict["train_loss_epoch"],
- eval_metrics_dict["eval_loss_epoch"],
- )
- train_intent_f1, eval_intent_f1 = (
- train_metrics_dict["train_intent_f1_epoch"],
- eval_metrics_dict["eval_intent_f1_epoch"],
- )
- train_intent_acc, eval_intent_acc = (
- train_metrics_dict["train_intent_acc_epoch"],
- eval_metrics_dict["eval_intent_acc_epoch"],
- )
- train_slot_f1, eval_slot_f1 = (
- train_metrics_dict["train_intent_acc_epoch"],
- eval_metrics_dict["eval_intent_acc_epoch"],
- )
- train_slot_acc, eval_slot_acc = (
- train_metrics_dict["train_slot_acc_epoch"],
- eval_metrics_dict["eval_slot_acc_epoch"],
- )
-
-
- if eval_loss < best_eval_loss:
- best_model = self.model
- best_eval_loss = eval_loss
-
- train_logs = {
- "epoch": epoch,
- "train_loss": train_loss,
- "eval_loss": eval_loss,
- "train_intent_acc": train_intent_acc,
- "train_intent_f1": train_intent_f1,
- "eval_intent_f1": eval_intent_f1,
- "eval_intent_acc": eval_intent_acc,
- "train_slot_f1": train_slot_f1,
- "train_slot_acc": train_slot_acc,
- "lr": {self.optimizer.param_groups[0]["lr"]: 0.2},
- }
-
- train_logs["patience"] = patience
- logging.info(train_logs)
- logging.info(eval_metrics_dict)
-
- self.accelerator.wait_for_everyone()
- model = self.accelerator.unwrap_model(self.model)
- self.accelerator.save_state(self.output_dir)
- logging.info(f"Checkpoint is saved in {self.output_dir}")
-
- return best_model, best_eval_loss
diff --git a/spaces/shaocongma/faiss_chat/llms/embeddings.py b/spaces/shaocongma/faiss_chat/llms/embeddings.py
deleted file mode 100644
index ce1d85ea91a2ec40025f180ff531e04344dd5fbb..0000000000000000000000000000000000000000
--- a/spaces/shaocongma/faiss_chat/llms/embeddings.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from langchain.embeddings.openai import OpenAIEmbeddings
-from langchain.embeddings import HuggingFaceEmbeddings
-
-model_name = "sebastian-hofstaetter/distilbert-dot-tas_b-b256-msmarco"
-model_kwargs = {'device': 'cpu'}
-encode_kwargs = {'normalize_embeddings': False}
-hf_embeddings_1 = HuggingFaceEmbeddings(
- model_name=model_name,
- model_kwargs=model_kwargs,
- encode_kwargs=encode_kwargs)
-
-openai_embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
-
-
-model_name = "GanymedeNil/text2vec-large-chinese"
-hf_embeddings_2 = HuggingFaceEmbeddings(
- model_name=model_name,
- model_kwargs=model_kwargs,
- encode_kwargs=encode_kwargs)
-
-
-EMBEDDINGS_MAPPING = {"text-embedding-ada-002": openai_embedding,
- "distilbert-dot-tas_b-b256-msmarco": hf_embeddings_1,
- "text2vec-large-chinese": hf_embeddings_2}
-
-def main():
- pass
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/simonduerr/ProteinMPNN/af_backprop/alphafold/model/tf/data_transforms.py b/spaces/simonduerr/ProteinMPNN/af_backprop/alphafold/model/tf/data_transforms.py
deleted file mode 100644
index 7af966ef4b7cb469f2b817a16ad42eea50f31e18..0000000000000000000000000000000000000000
--- a/spaces/simonduerr/ProteinMPNN/af_backprop/alphafold/model/tf/data_transforms.py
+++ /dev/null
@@ -1,625 +0,0 @@
-# Copyright 2021 DeepMind Technologies Limited
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Data for AlphaFold."""
-
-from alphafold.common import residue_constants
-from alphafold.model.tf import shape_helpers
-from alphafold.model.tf import shape_placeholders
-from alphafold.model.tf import utils
-import numpy as np
-import tensorflow.compat.v1 as tf
-
-# Pylint gets confused by the curry1 decorator because it changes the number
-# of arguments to the function.
-# pylint:disable=no-value-for-parameter
-
-
-NUM_RES = shape_placeholders.NUM_RES
-NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ
-NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ
-NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES
-
-
-def cast_64bit_ints(protein):
-
- for k, v in protein.items():
- if v.dtype == tf.int64:
- protein[k] = tf.cast(v, tf.int32)
- return protein
-
-
-_MSA_FEATURE_NAMES = [
- 'msa', 'deletion_matrix', 'msa_mask', 'msa_row_mask', 'bert_mask',
- 'true_msa'
-]
-
-
-def make_seq_mask(protein):
- protein['seq_mask'] = tf.ones(
- shape_helpers.shape_list(protein['aatype']), dtype=tf.float32)
- return protein
-
-
-def make_template_mask(protein):
- protein['template_mask'] = tf.ones(
- shape_helpers.shape_list(protein['template_domain_names']),
- dtype=tf.float32)
- return protein
-
-
-def curry1(f):
- """Supply all arguments but the first."""
-
- def fc(*args, **kwargs):
- return lambda x: f(x, *args, **kwargs)
-
- return fc
-
-
-@curry1
-def add_distillation_flag(protein, distillation):
- protein['is_distillation'] = tf.constant(float(distillation),
- shape=[],
- dtype=tf.float32)
- return protein
-
-
-def make_all_atom_aatype(protein):
- protein['all_atom_aatype'] = protein['aatype']
- return protein
-
-
-def fix_templates_aatype(protein):
- """Fixes aatype encoding of templates."""
- # Map one-hot to indices.
- protein['template_aatype'] = tf.argmax(
- protein['template_aatype'], output_type=tf.int32, axis=-1)
- # Map hhsearch-aatype to our aatype.
- new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
- new_order = tf.constant(new_order_list, dtype=tf.int32)
- protein['template_aatype'] = tf.gather(params=new_order,
- indices=protein['template_aatype'])
- return protein
-
-
-def correct_msa_restypes(protein):
- """Correct MSA restype to have the same order as residue_constants."""
- new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE
- new_order = tf.constant(new_order_list, dtype=protein['msa'].dtype)
- protein['msa'] = tf.gather(new_order, protein['msa'], axis=0)
-
- perm_matrix = np.zeros((22, 22), dtype=np.float32)
- perm_matrix[range(len(new_order_list)), new_order_list] = 1.
-
- for k in protein:
- if 'profile' in k: # Include both hhblits and psiblast profiles
- num_dim = protein[k].shape.as_list()[-1]
- assert num_dim in [20, 21, 22], (
- 'num_dim for %s out of expected range: %s' % (k, num_dim))
- protein[k] = tf.tensordot(protein[k], perm_matrix[:num_dim, :num_dim], 1)
- return protein
-
-
-def squeeze_features(protein):
- """Remove singleton and repeated dimensions in protein features."""
- protein['aatype'] = tf.argmax(
- protein['aatype'], axis=-1, output_type=tf.int32)
- for k in [
- 'domain_name', 'msa', 'num_alignments', 'seq_length', 'sequence',
- 'superfamily', 'deletion_matrix', 'resolution',
- 'between_segment_residues', 'residue_index', 'template_all_atom_masks']:
- if k in protein:
- final_dim = shape_helpers.shape_list(protein[k])[-1]
- if isinstance(final_dim, int) and final_dim == 1:
- protein[k] = tf.squeeze(protein[k], axis=-1)
-
- for k in ['seq_length', 'num_alignments']:
- if k in protein:
- protein[k] = protein[k][0] # Remove fake sequence dimension
- return protein
-
-
-def make_random_crop_to_size_seed(protein):
- """Random seed for cropping residues and templates."""
- protein['random_crop_to_size_seed'] = utils.make_random_seed()
- return protein
-
-
-@curry1
-def randomly_replace_msa_with_unknown(protein, replace_proportion):
- """Replace a proportion of the MSA with 'X'."""
- msa_mask = (tf.random.uniform(shape_helpers.shape_list(protein['msa'])) <
- replace_proportion)
- x_idx = 20
- gap_idx = 21
- msa_mask = tf.logical_and(msa_mask, protein['msa'] != gap_idx)
- protein['msa'] = tf.where(msa_mask,
- tf.ones_like(protein['msa']) * x_idx,
- protein['msa'])
- aatype_mask = (
- tf.random.uniform(shape_helpers.shape_list(protein['aatype'])) <
- replace_proportion)
-
- protein['aatype'] = tf.where(aatype_mask,
- tf.ones_like(protein['aatype']) * x_idx,
- protein['aatype'])
- return protein
-
-
-@curry1
-def sample_msa(protein, max_seq, keep_extra):
- """Sample MSA randomly, remaining sequences are stored as `extra_*`.
-
- Args:
- protein: batch to sample msa from.
- max_seq: number of sequences to sample.
- keep_extra: When True sequences not sampled are put into fields starting
- with 'extra_*'.
-
- Returns:
- Protein with sampled msa.
- """
- num_seq = tf.shape(protein['msa'])[0]
- shuffled = tf.random_shuffle(tf.range(1, num_seq))
- index_order = tf.concat([[0], shuffled], axis=0)
- num_sel = tf.minimum(max_seq, num_seq)
-
- sel_seq, not_sel_seq = tf.split(index_order, [num_sel, num_seq - num_sel])
-
- for k in _MSA_FEATURE_NAMES:
- if k in protein:
- if keep_extra:
- protein['extra_' + k] = tf.gather(protein[k], not_sel_seq)
- protein[k] = tf.gather(protein[k], sel_seq)
-
- return protein
-
-
-@curry1
-def crop_extra_msa(protein, max_extra_msa):
- """MSA features are cropped so only `max_extra_msa` sequences are kept."""
- num_seq = tf.shape(protein['extra_msa'])[0]
- num_sel = tf.minimum(max_extra_msa, num_seq)
- select_indices = tf.random_shuffle(tf.range(0, num_seq))[:num_sel]
- for k in _MSA_FEATURE_NAMES:
- if 'extra_' + k in protein:
- protein['extra_' + k] = tf.gather(protein['extra_' + k], select_indices)
-
- return protein
-
-
-def delete_extra_msa(protein):
- for k in _MSA_FEATURE_NAMES:
- if 'extra_' + k in protein:
- del protein['extra_' + k]
- return protein
-
-
-@curry1
-def block_delete_msa(protein, config):
- """Sample MSA by deleting contiguous blocks.
-
- Jumper et al. (2021) Suppl. Alg. 1 "MSABlockDeletion"
-
- Arguments:
- protein: batch dict containing the msa
- config: ConfigDict with parameters
-
- Returns:
- updated protein
- """
- num_seq = shape_helpers.shape_list(protein['msa'])[0]
- block_num_seq = tf.cast(
- tf.floor(tf.cast(num_seq, tf.float32) * config.msa_fraction_per_block),
- tf.int32)
-
- if config.randomize_num_blocks:
- nb = tf.random.uniform([], 0, config.num_blocks + 1, dtype=tf.int32)
- else:
- nb = config.num_blocks
-
- del_block_starts = tf.random.uniform([nb], 0, num_seq, dtype=tf.int32)
- del_blocks = del_block_starts[:, None] + tf.range(block_num_seq)
- del_blocks = tf.clip_by_value(del_blocks, 0, num_seq - 1)
- del_indices = tf.unique(tf.sort(tf.reshape(del_blocks, [-1])))[0]
-
- # Make sure we keep the original sequence
- sparse_diff = tf.sets.difference(tf.range(1, num_seq)[None],
- del_indices[None])
- keep_indices = tf.squeeze(tf.sparse.to_dense(sparse_diff), 0)
- keep_indices = tf.concat([[0], keep_indices], axis=0)
-
- for k in _MSA_FEATURE_NAMES:
- if k in protein:
- protein[k] = tf.gather(protein[k], keep_indices)
-
- return protein
-
-
-@curry1
-def nearest_neighbor_clusters(protein, gap_agreement_weight=0.):
- """Assign each extra MSA sequence to its nearest neighbor in sampled MSA."""
-
- # Determine how much weight we assign to each agreement. In theory, we could
- # use a full blosum matrix here, but right now let's just down-weight gap
- # agreement because it could be spurious.
- # Never put weight on agreeing on BERT mask
- weights = tf.concat([
- tf.ones(21),
- gap_agreement_weight * tf.ones(1),
- np.zeros(1)], 0)
-
- # Make agreement score as weighted Hamming distance
- sample_one_hot = (protein['msa_mask'][:, :, None] *
- tf.one_hot(protein['msa'], 23))
- extra_one_hot = (protein['extra_msa_mask'][:, :, None] *
- tf.one_hot(protein['extra_msa'], 23))
-
- num_seq, num_res, _ = shape_helpers.shape_list(sample_one_hot)
- extra_num_seq, _, _ = shape_helpers.shape_list(extra_one_hot)
-
- # Compute tf.einsum('mrc,nrc,c->mn', sample_one_hot, extra_one_hot, weights)
- # in an optimized fashion to avoid possible memory or computation blowup.
- agreement = tf.matmul(
- tf.reshape(extra_one_hot, [extra_num_seq, num_res * 23]),
- tf.reshape(sample_one_hot * weights, [num_seq, num_res * 23]),
- transpose_b=True)
-
- # Assign each sequence in the extra sequences to the closest MSA sample
- protein['extra_cluster_assignment'] = tf.argmax(
- agreement, axis=1, output_type=tf.int32)
-
- return protein
-
-
-@curry1
-def summarize_clusters(protein):
- """Produce profile and deletion_matrix_mean within each cluster."""
- num_seq = shape_helpers.shape_list(protein['msa'])[0]
- def csum(x):
- return tf.math.unsorted_segment_sum(
- x, protein['extra_cluster_assignment'], num_seq)
-
- mask = protein['extra_msa_mask']
- mask_counts = 1e-6 + protein['msa_mask'] + csum(mask) # Include center
-
- msa_sum = csum(mask[:, :, None] * tf.one_hot(protein['extra_msa'], 23))
- msa_sum += tf.one_hot(protein['msa'], 23) # Original sequence
- protein['cluster_profile'] = msa_sum / mask_counts[:, :, None]
-
- del msa_sum
-
- del_sum = csum(mask * protein['extra_deletion_matrix'])
- del_sum += protein['deletion_matrix'] # Original sequence
- protein['cluster_deletion_mean'] = del_sum / mask_counts
- del del_sum
-
- return protein
-
-
-def make_msa_mask(protein):
- """Mask features are all ones, but will later be zero-padded."""
- protein['msa_mask'] = tf.ones(
- shape_helpers.shape_list(protein['msa']), dtype=tf.float32)
- protein['msa_row_mask'] = tf.ones(
- shape_helpers.shape_list(protein['msa'])[0], dtype=tf.float32)
- return protein
-
-
-def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
- """Create pseudo beta features."""
- is_gly = tf.equal(aatype, residue_constants.restype_order['G'])
- ca_idx = residue_constants.atom_order['CA']
- cb_idx = residue_constants.atom_order['CB']
- pseudo_beta = tf.where(
- tf.tile(is_gly[..., None], [1] * len(is_gly.shape) + [3]),
- all_atom_positions[..., ca_idx, :],
- all_atom_positions[..., cb_idx, :])
-
- if all_atom_masks is not None:
- pseudo_beta_mask = tf.where(
- is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx])
- pseudo_beta_mask = tf.cast(pseudo_beta_mask, tf.float32)
- return pseudo_beta, pseudo_beta_mask
- else:
- return pseudo_beta
-
-
-@curry1
-def make_pseudo_beta(protein, prefix=''):
- """Create pseudo-beta (alpha for glycine) position and mask."""
- assert prefix in ['', 'template_']
- protein[prefix + 'pseudo_beta'], protein[prefix + 'pseudo_beta_mask'] = (
- pseudo_beta_fn(
- protein['template_aatype' if prefix else 'all_atom_aatype'],
- protein[prefix + 'all_atom_positions'],
- protein['template_all_atom_masks' if prefix else 'all_atom_mask']))
- return protein
-
-
-@curry1
-def add_constant_field(protein, key, value):
- protein[key] = tf.convert_to_tensor(value)
- return protein
-
-
-def shaped_categorical(probs, epsilon=1e-10):
- ds = shape_helpers.shape_list(probs)
- num_classes = ds[-1]
- counts = tf.random.categorical(
- tf.reshape(tf.log(probs + epsilon), [-1, num_classes]),
- 1,
- dtype=tf.int32)
- return tf.reshape(counts, ds[:-1])
-
-
-def make_hhblits_profile(protein):
- """Compute the HHblits MSA profile if not already present."""
- if 'hhblits_profile' in protein:
- return protein
-
- # Compute the profile for every residue (over all MSA sequences).
- protein['hhblits_profile'] = tf.reduce_mean(
- tf.one_hot(protein['msa'], 22), axis=0)
- return protein
-
-
-@curry1
-def make_masked_msa(protein, config, replace_fraction):
- """Create data for BERT on raw MSA."""
- # Add a random amino acid uniformly
- random_aa = tf.constant([0.05] * 20 + [0., 0.], dtype=tf.float32)
-
- categorical_probs = (
- config.uniform_prob * random_aa +
- config.profile_prob * protein['hhblits_profile'] +
- config.same_prob * tf.one_hot(protein['msa'], 22))
-
- # Put all remaining probability on [MASK] which is a new column
- pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))]
- pad_shapes[-1][1] = 1
- mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob
- assert mask_prob >= 0.
- categorical_probs = tf.pad(
- categorical_probs, pad_shapes, constant_values=mask_prob)
-
- sh = shape_helpers.shape_list(protein['msa'])
- mask_position = tf.random.uniform(sh) < replace_fraction
-
- bert_msa = shaped_categorical(categorical_probs)
- bert_msa = tf.where(mask_position, bert_msa, protein['msa'])
-
- # Mix real and masked MSA
- protein['bert_mask'] = tf.cast(mask_position, tf.float32)
- protein['true_msa'] = protein['msa']
- protein['msa'] = bert_msa
-
- return protein
-
-
-@curry1
-def make_fixed_size(protein, shape_schema, msa_cluster_size, extra_msa_size,
- num_res, num_templates=0):
- """Guess at the MSA and sequence dimensions to make fixed size."""
-
- pad_size_map = {
- NUM_RES: num_res,
- NUM_MSA_SEQ: msa_cluster_size,
- NUM_EXTRA_SEQ: extra_msa_size,
- NUM_TEMPLATES: num_templates,
- }
-
- for k, v in protein.items():
- # Don't transfer this to the accelerator.
- if k == 'extra_cluster_assignment':
- continue
- shape = v.shape.as_list()
- schema = shape_schema[k]
- assert len(shape) == len(schema), (
- f'Rank mismatch between shape and shape schema for {k}: '
- f'{shape} vs {schema}')
- pad_size = [
- pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)
- ]
- padding = [(0, p - tf.shape(v)[i]) for i, p in enumerate(pad_size)]
- if padding:
- protein[k] = tf.pad(
- v, padding, name=f'pad_to_fixed_{k}')
- protein[k].set_shape(pad_size)
-
- return protein
-
-
-@curry1
-def make_msa_feat(protein):
- """Create and concatenate MSA features."""
- # Whether there is a domain break. Always zero for chains, but keeping
- # for compatibility with domain datasets.
- has_break = tf.clip_by_value(
- tf.cast(protein['between_segment_residues'], tf.float32),
- 0, 1)
- aatype_1hot = tf.one_hot(protein['aatype'], 21, axis=-1)
-
- target_feat = [
- tf.expand_dims(has_break, axis=-1),
- aatype_1hot, # Everyone gets the original sequence.
- ]
-
- msa_1hot = tf.one_hot(protein['msa'], 23, axis=-1)
- has_deletion = tf.clip_by_value(protein['deletion_matrix'], 0., 1.)
- deletion_value = tf.atan(protein['deletion_matrix'] / 3.) * (2. / np.pi)
-
- msa_feat = [
- msa_1hot,
- tf.expand_dims(has_deletion, axis=-1),
- tf.expand_dims(deletion_value, axis=-1),
- ]
-
- if 'cluster_profile' in protein:
- deletion_mean_value = (
- tf.atan(protein['cluster_deletion_mean'] / 3.) * (2. / np.pi))
- msa_feat.extend([
- protein['cluster_profile'],
- tf.expand_dims(deletion_mean_value, axis=-1),
- ])
-
- if 'extra_deletion_matrix' in protein:
- protein['extra_has_deletion'] = tf.clip_by_value(
- protein['extra_deletion_matrix'], 0., 1.)
- protein['extra_deletion_value'] = tf.atan(
- protein['extra_deletion_matrix'] / 3.) * (2. / np.pi)
-
- protein['msa_feat'] = tf.concat(msa_feat, axis=-1)
- protein['target_feat'] = tf.concat(target_feat, axis=-1)
- return protein
-
-
-@curry1
-def select_feat(protein, feature_list):
- return {k: v for k, v in protein.items() if k in feature_list}
-
-
-@curry1
-def crop_templates(protein, max_templates):
- for k, v in protein.items():
- if k.startswith('template_'):
- protein[k] = v[:max_templates]
- return protein
-
-
-@curry1
-def random_crop_to_size(protein, crop_size, max_templates, shape_schema,
- subsample_templates=False):
- """Crop randomly to `crop_size`, or keep as is if shorter than that."""
- seq_length = protein['seq_length']
- if 'template_mask' in protein:
- num_templates = tf.cast(
- shape_helpers.shape_list(protein['template_mask'])[0], tf.int32)
- else:
- num_templates = tf.constant(0, dtype=tf.int32)
- num_res_crop_size = tf.math.minimum(seq_length, crop_size)
-
- # Ensures that the cropping of residues and templates happens in the same way
- # across ensembling iterations.
- # Do not use for randomness that should vary in ensembling.
- seed_maker = utils.SeedMaker(initial_seed=protein['random_crop_to_size_seed'])
-
- if subsample_templates:
- templates_crop_start = tf.random.stateless_uniform(
- shape=(), minval=0, maxval=num_templates + 1, dtype=tf.int32,
- seed=seed_maker())
- else:
- templates_crop_start = 0
-
- num_templates_crop_size = tf.math.minimum(
- num_templates - templates_crop_start, max_templates)
-
- num_res_crop_start = tf.random.stateless_uniform(
- shape=(), minval=0, maxval=seq_length - num_res_crop_size + 1,
- dtype=tf.int32, seed=seed_maker())
-
- templates_select_indices = tf.argsort(tf.random.stateless_uniform(
- [num_templates], seed=seed_maker()))
-
- for k, v in protein.items():
- if k not in shape_schema or (
- 'template' not in k and NUM_RES not in shape_schema[k]):
- continue
-
- # randomly permute the templates before cropping them.
- if k.startswith('template') and subsample_templates:
- v = tf.gather(v, templates_select_indices)
-
- crop_sizes = []
- crop_starts = []
- for i, (dim_size, dim) in enumerate(zip(shape_schema[k],
- shape_helpers.shape_list(v))):
- is_num_res = (dim_size == NUM_RES)
- if i == 0 and k.startswith('template'):
- crop_size = num_templates_crop_size
- crop_start = templates_crop_start
- else:
- crop_start = num_res_crop_start if is_num_res else 0
- crop_size = (num_res_crop_size if is_num_res else
- (-1 if dim is None else dim))
- crop_sizes.append(crop_size)
- crop_starts.append(crop_start)
- protein[k] = tf.slice(v, crop_starts, crop_sizes)
-
- protein['seq_length'] = num_res_crop_size
- return protein
-
-
-def make_atom14_masks(protein):
- """Construct denser atom positions (14 dimensions instead of 37)."""
- restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37
- restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14
- restype_atom14_mask = []
-
- for rt in residue_constants.restypes:
- atom_names = residue_constants.restype_name_to_atom14_names[
- residue_constants.restype_1to3[rt]]
-
- restype_atom14_to_atom37.append([
- (residue_constants.atom_order[name] if name else 0)
- for name in atom_names
- ])
-
- atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
- restype_atom37_to_atom14.append([
- (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
- for name in residue_constants.atom_types
- ])
-
- restype_atom14_mask.append([(1. if name else 0.) for name in atom_names])
-
- # Add dummy mapping for restype 'UNK'
- restype_atom14_to_atom37.append([0] * 14)
- restype_atom37_to_atom14.append([0] * 37)
- restype_atom14_mask.append([0.] * 14)
-
- restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32)
- restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32)
- restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32)
-
- # create the mapping for (residx, atom14) --> atom37, i.e. an array
- # with shape (num_res, 14) containing the atom37 indices for this protein
- residx_atom14_to_atom37 = tf.gather(restype_atom14_to_atom37,
- protein['aatype'])
- residx_atom14_mask = tf.gather(restype_atom14_mask,
- protein['aatype'])
-
- protein['atom14_atom_exists'] = residx_atom14_mask
- protein['residx_atom14_to_atom37'] = residx_atom14_to_atom37
-
- # create the gather indices for mapping back
- residx_atom37_to_atom14 = tf.gather(restype_atom37_to_atom14,
- protein['aatype'])
- protein['residx_atom37_to_atom14'] = residx_atom37_to_atom14
-
- # create the corresponding mask
- restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
- for restype, restype_letter in enumerate(residue_constants.restypes):
- restype_name = residue_constants.restype_1to3[restype_letter]
- atom_names = residue_constants.residue_atoms[restype_name]
- for atom_name in atom_names:
- atom_type = residue_constants.atom_order[atom_name]
- restype_atom37_mask[restype, atom_type] = 1
-
- residx_atom37_mask = tf.gather(restype_atom37_mask,
- protein['aatype'])
- protein['atom37_atom_exists'] = residx_atom37_mask
-
- return protein
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Among Us Cute APK A New Twist on the Popular Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Among Us Cute APK A New Twist on the Popular Game.md
deleted file mode 100644
index 5e222ec9a98a9b7696082978b721d4a0f8ca7121..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Among Us Cute APK A New Twist on the Popular Game.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-
Among Us Cute APK: How to Download and Play the Kawaii Mod for Android
-
If you are a fan of Among Us, the popular multiplayer game where you have to find the impostor among your crewmates, you might be interested in trying out a new mod that makes the game even more fun and adorable. It's called Among Us Cute APK, or Kawaii Mod, and it's a free download for Android devices. In this article, we will tell you everything you need to know about this mod, how to download and install it, and how to play it with your friends.
-
What is Among Us Cute APK?
-
A brief introduction to Among Us and its gameplay
-
Among Us is a social deduction game that was released in 2018 by Innersloth, an indie game studio. The game is set in a spaceship, where you can play online or over local WiFi with 4-15 players. The players are divided into two roles: crewmates and impostors. The crewmates have to work together to complete tasks and prepare the spaceship for departure, while the impostors have to secretly kill them or sabotage their mission. The crewmates can win by finishing all tasks or voting out all impostors, while the impostors can win by killing enough crewmates, causing a major sabotage, or convincing the crewmates to vote out a non-impostor.
The features and benefits of the Kawaii Mod for Among Us
-
The Kawaii Mod for Among Us is one of the most famous aesthetic modifications of the game, for obvious reasons. With this mod, you can enjoy a complete revamp of the game's character aesthetics, plus a completely reworked Kawaii-themed map. The mod changes the appearance of the crewmates, impostors, hats, pets, skins, weapons, tasks, vents, and more. Everything looks cute, colorful, and kawaii in this mod. The mod also adds new sound effects, animations, and emojis that match the kawaii style.
-
The benefits of playing with this mod are that you can have more fun and variety in your gameplay, as well as express your personality and preferences through your character customization. You can also impress your friends with your cute look and make them laugh with your kawaii actions. The mod is compatible with the latest version of Among Us, so you don't have to worry about missing out on any updates or features from the original game.
-
How to Download and Install Among Us Cute APK?
-
The steps to download the Kawaii Mod from a reliable source
-
To download the Kawaii Mod for Among Us, you need to find a reliable source that offers the mod file for free and without any viruses or malware. One such source is [Among-Us.net](^2^), a website that specializes in providing mods and hacks for Among Us. Here are the steps to download the mod from this website:
-
-
Go to [Among-Us.net](^2^) and click on "KAWAII MOD" from the menu.
-
Scroll down and click on "Download KAWAII MOD" button.
-
Wait for a few seconds until the download link appears.
-
Click on "Download" button and save the file on your device.
-
-
The instructions to install and run the mod on your Android device
-
To install and run the mod on your Android device, you need to follow these instructions:
-
-
Go to your device settings and enable the option to install apps from unknown sources.
-
Locate the downloaded mod file on your device and tap on it.
-
Follow the installation prompts and grant the necessary permissions.
-
Launch the modded game from your app drawer or home screen.
-
-
Congratulations, you have successfully installed the Kawaii Mod for Among Us on your Android device. Now you can enjoy the cute and fun gameplay with your friends.
-
How to Play Among Us Cute APK?
-
The differences and similarities between the original game and the mod
-
The Kawaii Mod for Among Us does not change the core gameplay mechanics of the original game. You can still play online or offline with other players, choose your role, customize your settings, chat with others, and vote for the impostor. However, the mod does add some new features and options that make the game more kawaii and enjoyable. For example, you can:
-
-
Select from a variety of cute hats, pets, skins, and weapons for your character.
-
Explore a new map that is designed with kawaii elements and decorations.
-
Perform new tasks that are related to kawaii themes and activities.
-
Use new sound effects, animations, and emojis that express your kawaii mood.
-
Change the color and style of your name and chat messages.
-
-
The tips and tricks to enjoy the Kawaii Mod for Among Us
-
To make the most out of the Kawaii Mod for Among Us, here are some tips and tricks that you can try:
-
among us kawaii mod apk download
-among us pink aesthetic apk
-among us cute characters apk
-among us apk with kawaii map
-among us cute skins apk
-among us kawaii mod android
-among us cute version apk
-among us apk with pink theme
-among us cute pets apk
-among us kawaii mod free download
-among us cute graphics apk
-among us apk with cute outfits
-among us cute hats apk
-among us kawaii mod latest version
-among us cute mod apk 2023
-among us apk with kawaii characters
-among us cute wallpaper apk
-among us kawaii mod for pc
-among us cute stickers apk
-among us apk with pink color
-among us cute emojis apk
-among us kawaii mod online
-among us cute sound effects apk
-among us apk with kawaii skins
-among us cute voice chat apk
-among us kawaii mod no ads
-among us cute animations apk
-among us apk with cute pets
-among us cute icons apk
-among us kawaii mod update
-among us cute fonts apk
-among us apk with kawaii hats
-among us cute memes apk
-among us kawaii mod offline
-among us cute filters apk
-among us apk with cute graphics
-among us cute stickers for whatsapp apk
-among us kawaii mod hack
-among us cute keyboard theme apk
-among us apk with kawaii stickers
-among us cute launcher apk
-among us kawaii mod gameplay
-among us cute lock screen apk
-among us apk with kawaii emojis
-among us cute photo editor apk
-among us kawaii mod features
-among us cute live wallpaper apk
-among us apk with kawaii wallpaper
-among us cute cursor pack apk
-
-
Invite your friends to play with you and share the mod file with them.
-
Create a custom game with your preferred settings and rules.
-
Choose a character that matches your personality and style.
-
Use the kawaii emojis and sound effects to communicate with others.
-
Have fun and be respectful of other players.
-
-
Conclusion
-
A summary of the main points and a call to action
-
In conclusion, Among Us Cute APK is a kawaii mod for Among Us that allows you to enjoy a cute and fun version of the game on your Android device. You can download and install the mod for free from a reliable source, and play it online or offline with your friends. You can also customize your character, explore a new map, perform new tasks, and use new sound effects, animations, and emojis. If you are looking for a way to spice up your gameplay and express your kawaii side, you should definitely give this mod a try. You won't regret it!
-
FAQs
-
Q: Is Among Us Cute APK safe to download and use?
-
A: Yes, as long as you download it from a trustworthy source like [Among-Us.net], which offers virus-free and malware-free mod files. You should also avoid downloading any other mods or hacks that might interfere with the Kawaii Mod or harm your device.
-
Q: Can I play Among Us Cute APK with players who don't have the mod?
-
A: Yes, you can still join public games or private games hosted by other players who don't have the mod. However, they won't be able to see your kawaii appearance or hear your kawaii sound effects. They will only see you as a normal player with a default skin and hat.
-
Q: Can I play Among Us Cute APK on other platforms like PC or iOS?
-
A: No, unfortunately, this mod is only available for Android devices at the moment. There is no official version of the mod for PC or iOS devices. However, you might be able to use an Android emulator on your PC or a jailbroken iOS device to run the mod, but this is not recommended or supported by the mod developers.
-
Q: How can I update Among Us Cute APK to the latest version?
-
A: To update the mod to the latest version, you need to visit [Among-Us.net] again and download the new mod file. Then, you need to uninstall the old version of the mod from your device and install the new one. You should also check if there are any updates for the original game from Google Play Store or App Store.
-
Q: How can I contact the developers of Among Us Cute APK?
-
A: If you have any questions, feedback, or suggestions for the developers of the Kawaii Mod for Among Us, you can visit their official website at [KawaiiMod.com] or follow them on their social media accounts at [Facebook], [Twitter], and [Instagram]. You can also join their Discord server at [Discord.gg/KawaiiMod] to chat with other players and get the latest news and updates about the mod.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ariana Grande and Nicki Minajs Side to Side Remix Tik Tok MP3 Where to Find It and How to Use It.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ariana Grande and Nicki Minajs Side to Side Remix Tik Tok MP3 Where to Find It and How to Use It.md
deleted file mode 100644
index 3fc9fc6ccbc6a1127a9ad10c608cab3a0880b2b7..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ariana Grande and Nicki Minajs Side to Side Remix Tik Tok MP3 Where to Find It and How to Use It.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-
How to Download Side to Side Remix Tik Tok MP3
-
TikTok is one of the most popular social media platforms in the world, with over one billion users. It allows users to create and share short videos with various effects, filters, stickers, music, and sounds. One of the features that makes TikTok stand out is its use of catchy audio clips from songs, movies, TV shows, and other sources. Users can lip-sync, dance, act, or do anything they want with these audio clips, creating fun and creative content.
One of the songs that has been widely used on TikTok is "Side to Side" by Ariana Grande and Nicki Minaj. The song was released in 2016 as part of Ariana Grande's album Dangerous Woman. It is a reggae-infused track that describes a woman who is obsessed with a man who has a bad reputation, and despite her friends' disapproval, she pursues a physical relationship with him. The song's title and hook, "Side to Side", actually refer to difficulty walking after vigorous sex.
-
The song has been remixed by various DJs and producers, and one of them has become viral on TikTok. The remix version features a faster tempo, a heavier bass, and some additional sound effects. Many users have used this remix version to make videos of themselves dancing, working out, or doing other activities. The remix version has also been praised for its catchy and upbeat vibe.
-
How to Download the Side to Side Remix Tik Tok MP3
-
If you are a fan of this remix version and want to download it as an mp3 file, you can follow these simple steps:
-
-
Find the TikTok video with the remix audio. You can search for keywords like "side to side remix", "side to side tik tok", or "side to side dance". You can also browse through hashtags like #sidetoside, #sidetosideremix, or #sidetosidetiktok.
-
Copy the link of the video. To do this, tap on the share icon on the right side of the screen, then tap on "Copy Link". If you are using a desktop browser, you can copy the link from the address bar when you watch a single TikTok.
Paste the link and select mp3 as the download format. On most websites, you will see a search bar where you can paste the link and press a button. Then you will see an option to choose the download format. Select mp3 and click on the download button.
-
Download the mp3 file. Depending on your device and browser settings, you may need to confirm the download or choose a location to save the file. Once the download is complete, you can enjoy listening to the side to side remix tik tok mp3.
-
-
Benefits of Downloading the Side to Side Remix Tik Tok MP3
-
There are many benefits of downloading the side to side remix tik tok mp3, such as:
-
-
You can listen to the song offline and without ads. This means you can play the song anytime and anywhere, without worrying about internet connection or interruptions.
-
You can use the song as a ringtone, alarm, or notification sound. This way, you can personalize your phone and make it more fun and unique.
-
You can share the song with your friends or on social media. You can send the mp3 file to your friends via messaging apps or email, or you can upload it to platforms like YouTube, Instagram, Facebook, or Twitter. You can also create your own TikTok videos with the song and join the trend.
-
-
Conclusion
-
TikTok is a great source of entertainment and creativity, and one of the ways it showcases this is through its use of music and sound. One of the songs that has become popular on TikTok is the remix version of "Side to Side" by Ariana Grande and Nicki Minaj. The remix version is catchy, upbeat, and fun, and many users have used it to make videos of themselves dancing, working out, or doing other activities.
-
side to side remix tik tok song
-side to side remix tik tok dance
-side to side remix tik tok video
-side to side remix tik tok challenge
-side to side remix tik tok lyrics
-side to side remix tik tok sound
-side to side remix tik tok audio
-side to side remix tik tok music
-side to side remix tik tok leasis
-side to side remix tik tok bounce n bass
-side to side remix tik tok free download
-side to side remix tik tok online download
-side to side remix tik tok youtube download
-side to side remix tik tok mp3 converter
-side to side remix tik tok mp3 juice
-side to side remix tik tok mp3 skull
-side to side remix tik tok mp3 direct
-side to side remix tik tok mp3 320kbps
-side to side remix tik tok mp3 ringtone
-side to side remix tik tok mp3 player
-ariana grande nicki minaj side to side remix tik tok
-ariana grande nicki minaj side to side remix mp3 download
-ariana grande nicki minaj side to side remix lyrics
-ariana grande nicki minaj side to side remix video
-ariana grande nicki minaj side to side remix song
-ariana grande nicki minaj side to side remix dance
-ariana grande nicki minaj side to side remix soundcloud
-ariana grande nicki minaj side to side remix spotify
-ariana grande nicki minaj side to side remix youtube
-ariana grande nicki minaj side to side remix juno download
-leasis - Side To Side (Remix) | TikTok mp3 download
-leasis - Side To Side (Remix) | TikTok lyrics
-leasis - Side To Side (Remix) | TikTok video
-leasis - Side To Side (Remix) | TikTok song
-leasis - Side To Side (Remix) | TikTok dance
-leasis - Side To Side (Remix) | TikTok soundcloud
-leasis - Side To Side (Remix) | TikTok spotify
-leasis - Side To Side (Remix) | TikTok youtube
-leasis - Side To Side (Remix) | TikTok juno download
-leasis - Side To Side (Remix) | TikTok music lover for you page
-
If you want to download the side to side remix tik tok mp3, you can follow the steps we have outlined in this article. You will need to find the TikTok video with the remix audio, copy the link of the video, go to a website that allows you to download TikTok videos as mp3 or mp4, paste the link and select mp3 as the download format, and download the mp3 file. By doing this, you will be able to enjoy listening to the song offline and without ads, use it as a ringtone, alarm, or notification sound, and share it with your friends or on social media.
-
We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
What is the meaning of the song "Side to Side"?
-
The song "Side to Side" by Ariana Grande and Nicki Minaj is about a woman who is obsessed with a man who has a bad reputation, and despite her friends' disapproval, she pursues a physical relationship with him. The song's title and hook, "Side to Side", actually refer to difficulty walking after vigorous sex.
-
Who made the remix version of the song that is popular on TikTok?
-
The remix version of the song that is popular on TikTok was made by DJ Rocco & DJ Ever B. They are two DJs and producers from Colombia who specialize in urban music. They have remixed songs by artists like Justin Bieber, Ed Sheeran, Drake, Rihanna, and more.
-
How can I make my own remix of a song on TikTok?
-
To make your own remix of a song on TikTok, you will need to use a third-party app or software that allows you to edit audio files. Some examples are GarageBand, Audacity, FL Studio, or Soundtrap. You can import the original song into the app or software and add effects, filters, loops, samples, or other sounds to create your own remix. Then you can export the remix as an mp3 file and upload it to TikTok.
-
Is it legal to download TikTok videos as mp3 or mp4?
-
It depends on the content and purpose of the download. Generally speaking, downloading TikTok videos as mp3 or mp4 for personal use is not illegal, as long as you do not distribute them commercially or infringe on any copyrights or trademarks. However, you should always respect the rights and wishes of the original creators and owners of the videos. If they do not want their videos to be downloaded or used by others, you should not do so. You should also avoid downloading or using videos that contain illegal, harmful, or offensive content.
-
What are some other websites that allow me to download TikTok videos as mp3 or mp4?
-
Some other websites that allow you to download TikTok videos as mp3 or mp4 are ttdownloader.com, musicallydown.com, snaptik.app, tiktokvideodownloader.com, or tiktokdownload.online. However, you should always be careful when using these websites, as some of them may contain ads, pop-ups, malware, or viruses. You should also check the quality and size of the files before downloading them.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Simulator 2 The Ultimate Simulation Game with Unlimited Money and All Cars.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Simulator 2 The Ultimate Simulation Game with Unlimited Money and All Cars.md
deleted file mode 100644
index 9f8ad9b14c1d44927efab0834719afff207e46e7..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Simulator 2 The Ultimate Simulation Game with Unlimited Money and All Cars.md
+++ /dev/null
@@ -1,84 +0,0 @@
-
-
Car Simulator 2: How to Download and Enjoy Unlimited Money and Cars
-
Do you love driving cars and exploring different places? Do you want to experience realistic car physics and graphics in a mobile game? If yes, then you should try Car Simulator 2, a simulation game where you can drive various cars, complete missions, race with other players, and customize your vehicles. In this article, we will tell you what Car Simulator 2 is, why you should download the modded version of the game, and how to do it easily.
-
What is Car Simulator 2?
-
Car Simulator 2 is a simulation game developed by Oppana Games, a studio that specializes in creating realistic car games for mobile devices. The game was released in 2019 and has been downloaded over 50 million times on Google Play Store. Car Simulator 2 is the sequel to the popular Car Simulator OG, which was also developed by Oppana Games.
Car Simulator 2 has many features that make it one of the best car games on the market. Some of these features are:
-
-
Realistic car physics and graphics: The game uses advanced physics and graphics engines to create realistic car movements, sounds, and visuals. You can feel the difference between different car models, such as sports cars, SUVs, trucks, etc. You can also see the damage effects on your car when you crash or hit something.
-
Open world map: The game has a large open world map that you can explore freely. You can drive around the city, the countryside, the airport, the desert, and more. You can also interact with various objects and people in the environment, such as traffic lights, pedestrians, animals, etc.
-
Online multiplayer mode: The game has an online multiplayer mode where you can race with other players from around the world. You can join different servers and compete in various modes, such as drag racing, free ride, police chase, etc. You can also chat with other players and make friends.
-
Car customization and garage: The game allows you to customize your cars with different parts and accessories. You can change the color, wheels, spoilers, bumpers, lights, etc. of your car. You can also upgrade your car's performance by improving the engine, transmission, brakes, suspension, etc. You can store your cars in your own garage and switch between them anytime.
-
Missions and quests: The game has many missions and quests that you can complete to earn money and experience points. You can accept missions from different characters in the game, such as taxi drivers, mechanics, police officers, etc. You can also find hidden quests and secrets in the map.
-
-
Gameplay of Car Simulator 2
-
The gameplay of Car Simulator 2 is simple and fun. You can choose from different modes of play, such as single-player or multiplayer. You can also choose from different difficulty levels, such as easy or hard. You can control your car using different options, such as steering wheel, buttons, or tilt. You can also adjust the camera angle and view your car from different perspectives.
-
Why Download Car Simulator 2 Mod APK?
-
Car Simulator 2 is a free-to-play game that you can download from Google Play Store or App Store. However, the game has some limitations that may affect your enjoyment of the game. For example:
-
-
The game has ads that may interrupt your gameplay or consume your data.
-
The game has in-app purchases that may require you to spend real money to unlock some features or items.
-
The game has limited resources that may restrict your progress or options in the game.
-
-
If you want to avoid these limitations and enjoy the game to the fullest, you should download the modded version of the game, also known as Car Simulator 2 Mod APK. This is a modified version of the game that has some advantages over the original version. Some of these advantages are:
-
Benefits of Car Simulator 2 Mod APK
-
Car Simulator 2 Mod APK has many benefits that make it worth downloading. Some of these benefits are:
-
-
Unlimited money: The modded version of the game gives you unlimited money that you can use to buy and upgrade any car you want. You can also buy any item or feature in the game without worrying about the cost.
-
Unlocked cars: The modded version of the game unlocks all the cars in the game, including the premium and exclusive ones. You can choose from over 50 cars of different types and brands, such as Ferrari, Lamborghini, BMW, etc.
-
No ads: The modded version of the game removes all the ads from the game, so you can enjoy the game without any interruption or distraction.
-
No root required: The modded version of the game does not require you to root your device, which means you can install and play it without any risk or hassle.
-
-
How to Download and Install Car Simulator 2 Mod APK
-
If you are interested in downloading and installing Car Simulator 2 Mod APK, you can follow these simple steps:
-
-
Download the APK file: The first step is to download the APK file of Car Simulator 2 Mod from a reliable source. You can use this link to download it: Car Simulator 2 Mod APK Download.
-
Enable unknown sources: The next step is to enable unknown sources on your device, which will allow you to install apps from sources other than Google Play Store. You can do this by going to Settings > Security > Unknown Sources and turning it on.
-
Install the APK file: The final step is to install the APK file on your device. You can do this by locating the downloaded file in your file manager and tapping on it. Then, follow the instructions on the screen to complete the installation.
-
-
Conclusion
-
Car Simulator 2 is a simulation game that lets you drive various cars, complete missions, race with other players, and customize your vehicles. It has realistic car physics and graphics, a large open world map, an online multiplayer mode, a car customization and garage system, and many missions and quests. However, the game also has some limitations that may affect your enjoyment of the game, such as ads, in-app purchases, and limited resources. If you want to avoid these limitations and enjoy the game to the fullest, you should download Car Simulator 2 Mod APK, which gives you unlimited money and cars, unlocks all the cars in the game, removes all the ads from the game, and does not require root access. You can download Car Simulator 2 Mod APK by following the steps mentioned above.
-
car simulator 2 mod apk with unlimited gold and cash
-how to get all cars unlocked in car simulator 2 for free
-car simulator 2 hack version download for android
-car simulator 2 cheats and tips to earn more money
-best simulation game car simulator 2 with unlimited resources
-car simulator 2 latest update with new features and cars
-download car simulator 2 for pc windows 10/8/7
-car simulator 2 online multiplayer mode with friends
-car simulator 2 gameplay and review by experts
-car simulator 2 realistic driving and physics engine
-car simulator 2 premium mod apk with no ads and in-app purchases
-car simulator 2 guide and walkthrough for beginners
-car simulator 2 codes and redeem coupons for free rewards
-car simulator 2 customizations and upgrades for your cars
-car simulator 2 missions and challenges to complete
-car simulator 2 offline mode with unlimited fun
-car simulator 2 graphics and sound quality comparison
-car simulator 2 alternatives and similar games to try
-car simulator 2 support and feedback from developers
-car simulator 2 ranking and ratings by users
-car simulator 2 tricks and secrets to unlock hidden features
-car simulator 2 mod menu with unlimited options and settings
-car simulator 2 free download link for ios devices
-car simulator 2 best cars and vehicles to choose from
-car simulator 2 maps and locations to explore
-
FAQs
-
Here are some frequently asked questions about Car Simulator 2 and Car Simulator 2 Mod APK:
-
-
Q: Is Car Simulator 2 Mod APK safe to use?
-
A: Yes, Car Simulator 2 Mod APK is safe to use as long as you download it from a trusted source. However, you should always be careful when downloading and installing any app from unknown sources and scan it for viruses or malware before using it.
-
Q: Can I play Car Simulator 2 Mod APK offline?
-
A: Yes, you can play Car Simulator 2 Mod APK offline without any internet connection. However, some features of the game may not work properly offline, such as online multiplayer mode or cloud save.
-
Q: Can I update Car Simulator 2 Mod APK?
-
A: Yes, you can update Car Simulator 2 Mod APK whenever there is a new version available. However, you may lose some of your progress or data if you update it without backing it up first.
-
Q: How can I contact the developers of Car Simulator 2?
-
A: You can contact the developers of Car Simulator 2 by visiting their official website or social media pages. You can also send them an email at support@oppanagames.com or leave a review on Google Play Store or App Store.
-
Q: What are some similar games to Car Simulator 2?
-
A: Some similar games to Car Simulator 2 are Real Driving Sim, Extreme Car Driving Simulator, Car Parking Multiplayer, and Driving School 2017. You can find these games on Google Play Store or App Store and download them for free.
-
-
I hope you enjoyed this article and learned something new about Car Simulator 2 and Car Simulator 2 Mod APK. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Game Raft Final Chapter PC and Enjoy the Culmination of More Than Five Years of Development.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Game Raft Final Chapter PC and Enjoy the Culmination of More Than Five Years of Development.md
deleted file mode 100644
index 258da145a9a4a78ab9c8dec9f5fc8a13d52eb7bd..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Game Raft Final Chapter PC and Enjoy the Culmination of More Than Five Years of Development.md
+++ /dev/null
@@ -1,217 +0,0 @@
-
-
Download Game Raft Final Chapter PC: A Guide to the Ultimate Survival Adventure
-
Have you ever dreamed of surviving on a raft in the middle of the ocean, surrounded by sharks, debris, and islands? If so, then you might want to check out Raft, a game that lets you do just that. Raft is a survival sandbox game that challenges you to build and expand your raft, explore new locations, craft items, fight enemies, and uncover the secrets of a post-apocalyptic world. In this article, we will show you how to download game raft final chapter pc, what's new in the latest update, and some tips and tricks for playing raft effectively. Let's get started!
Raft is a game developed by Redbeet Interactive and published by Axolot Games. It was released in early access on Steam in May 2018, and left early access on June 20, 2021, with The Final Chapter update. The game is available for Windows, Mac OS, and Linux platforms.
-
Raft belongs to the survival sandbox genre, which means that you have to gather resources, craft items, build structures, and manage your hunger, thirst, health, and oxygen levels. The game also has a sandbox mode where you can play without any limitations or threats.
-
The game's premise is simple: you are stranded on a small raft in the middle of the ocean, with nothing but a hook and some debris. You have to use your hook to collect floating materials, such as wood, plastic, metal, leaves, barrels, crates, etc. You can then use these materials to craft various items, such as tools, weapons, furniture, equipment, etc. You can also expand your raft by adding more foundations, floors, walls, roofs, stairs, etc.
-
However, surviving on a raft is not easy. You have to deal with various dangers and challenges, such as sharks that can attack your raft or yourself; storms that can damage your raft or sink your raft; hunger and thirst that can deplete your health; islands that can offer resources and dangers; and other survivors that can be friendly or hostile.
-
But surviving on a raft is also rewarding. You can explore the vast ocean and discover new places, such as abandoned ships, underwater caves, research stations, etc. You can also find clues and signals that lead you to the main story of the game, which involves a mysterious organization called Tangaroa and a hidden city in the sky. You can also meet and interact with new characters, such as animals, traders, and allies.
-
download raft version 1.0 final chapter pc
-how to download raft final chapter for pc
-raft final chapter pc download free
-raft final chapter pc download full version
-raft final chapter pc download steam
-raft final chapter pc download crack
-raft final chapter pc download torrent
-raft final chapter pc download mega
-raft final chapter pc download ocean of games
-raft final chapter pc download skidrow
-download game raft the final chapter for windows 10
-download game raft the final chapter for windows 7
-download game raft the final chapter for mac
-download game raft the final chapter for linux
-download game raft the final chapter for android
-download game raft the final chapter update
-download game raft the final chapter mod
-download game raft the final chapter cheats
-download game raft the final chapter trainer
-download game raft the final chapter walkthrough
-download game raft the final chapter gameplay
-download game raft the final chapter review
-download game raft the final chapter trailer
-download game raft the final chapter release date
-download game raft the final chapter system requirements
-download game raft the final chapter new destinations
-download game raft the final chapter new enemies
-download game raft the final chapter new items
-download game raft the final chapter voice acting
-download game raft the final chapter story overhaul
-download game raft the final chapter trading system
-download game raft the final chapter achievements
-download game raft the final chapter multiplayer
-download game raft the final chapter co-op
-download game raft the final chapter online
-download game raft the final chapter offline
-download game raft the final chapter survival mode
-download game raft the final chapter creative mode
-download game raft the final chapter sandbox mode
-download game raft the final chapter hard mode
-best site to download game raft the final chapter pc
-best way to download game raft the final chapter pc
-fastest way to download game raft the final chapter pc
-easiest way to download game raft the final chapter pc
-safest way to download game raft the final chapter pc
-cheapest way to download game raft the final chapter pc
-legal way to download game raft the final chapter pc
-illegal way to download game raft the final chapter pc (not recommended)
-how long does it take to download game raft the final chapter pc
-
Raft is a game that offers a lot of freedom and creativity. You can design your raft however you want, from a simple platform to a floating mansion. You can also customize your character's appearance, clothes, and accessories. You can also play with your friends online, and cooperate or compete with them.
-
Why You Should Play Raft
-
Raft is a game that has many benefits for players of all ages and preferences. Here are some of the reasons why you should play Raft:
-
-
It stimulates your creativity and imagination. You can create your own raft and items, and express your personality and style.
-
It enhances your problem-solving and decision-making skills. You have to think fast and plan ahead, and deal with various challenges and dilemmas.
-
It improves your resourcefulness and adaptability. You have to make use of what you have, and adjust to different situations and environments.
-
It provides you with a sense of accomplishment and satisfaction. You can see your progress and achievements, and feel proud of your work.
-
It entertains you and relaxes you. You can have fun and enjoy the game's humor, graphics, sound, and gameplay.
-
It educates you and inspires you. You can learn about the ocean, the environment, the history, and the culture of the game's world.
-
-
Raft is a game that has something for everyone. Whether you like adventure, exploration, building, crafting, fighting, or storytelling, you will find something to love in Raft.
-
How to Download Game Raft Final Chapter PC
-
If you are interested in playing Raft on your PC, you will need to follow these steps:
-
Step 1: Get Steam
-
Steam is a digital distribution platform that allows you to buy, download, install, update, and play games on your PC. To get Steam, you will need to:
-
-
Go to the Steam website and click on the "Install Steam" button.
-
Download the Steam installer file and run it on your PC.
-
Follow the instructions on the screen to install Steam on your PC.
-
Create a Steam account or log in with an existing one.
-
-
Step 2: Buy Raft
-
Raft is available for purchase on the Steam store. To buy Raft, you will need to:
Proceed to checkout and pay with your preferred method.
-
Add Raft to your Steam library.
-
-
Step 3: Install Raft
-
To install Raft on your PC, you will need to:
-
-
Go to your Steam library and find Raft in your list of games.
-
Select Raft and click on the "Install" button.
-
Wait for the download and installation process to finish.
-
-
Step 4: Launch Raft
-
To launch Raft on your PC, you will need to:
-
-
Go to your Steam library and find Raft in your list of games.
-
Select Raft and click on the "Play" button.
-
Adjust the settings according to your preferences and system requirements.
-
Choose between single-player or multiplayer mode.
-
Create a new world or load an existing one.
-
Enjoy playing Raft!
-
-
What's New in The Final Chapter?
-
The Final Chapter is the latest and final update for Raft that was released on June 20, 2021. It adds new content and features to the game, such as:
-
New Dest
New Destinations
-
The Final Chapter adds three new destinations to explore in the game, each with its own unique environment, resources, puzzles, and secrets. They are:
-
-
Tangaroa: The hidden city in the sky that is the ultimate goal of the game's story. It is a futuristic metropolis that floats above the clouds, powered by a giant tower. It is full of advanced technology, but also dark secrets and dangers.
-
Caravan Island: A large island that is home to a community of survivors who live in caravans and tents. It is a friendly and peaceful place, where you can trade, rest, and interact with other characters.
-
Oil Rig: An abandoned oil rig that is infested with mutated rats. It is a hostile and hazardous place, where you have to fight your way through the enemies and traps.
-
-
New Enemies
-
The Final Chapter also introduces new enemies to encounter and fight in the game, such as:
-
-
Mutated Rats: Large and aggressive rodents that have been affected by the radiation from the oil rig. They can attack you with their teeth and claws, and can also spit acid at you.
-
Security Bots: Robotic guards that patrol the Tangaroa city. They can detect you with their sensors, and can shoot you with their lasers or electrocute you with their tasers.
-
Titanic Eel: A massive and mysterious creature that lurks in the depths of the ocean. It can emerge from the water and swallow your raft whole, or strike you with its electric shocks.
-
-
New Items
-
The Final Chapter also adds new items to craft and use in the game, such as:
-
-
Zip Line Tool: A device that allows you to travel quickly and easily between high places. You can use it to zip across the Tangaroa city, or to create your own zip lines on your raft or islands.
-
Light Bulb: A component that allows you to create various light sources. You can use it to craft lamps, spotlights, neon signs, etc.
-
Titanium Ore: A rare and valuable resource that can be found on the oil rig or in underwater caves. You can use it to craft titanium ingots, which are needed for advanced items.
-
Mayor's Outfit: A special outfit that can be obtained by completing the game's story. It consists of a suit, a hat, a monocle, and a cane. It gives you a stylish and elegant look.
-
-
New Characters
-
The Final Chapter also features new characters to meet and interact with in the game, such as:
-
-
Emilie: A young girl who lives on Caravan Island with her grandfather. She is curious and adventurous, and likes to explore the island and collect things.
-
René: Emilie's grandfather and the leader of Caravan Island. He is wise and kind, and knows a lot about the world before the flood.
-
Taylor: A survivor who lives on Tangaroa city. He is friendly and helpful, and can guide you through the city and its secrets.
-
Mayor Lee: The former mayor of Tangaroa city. He is arrogant and greedy, and has a sinister plan for the city and its inhabitants.
-
-
New Story
-
The Final Chapter also concludes the game's story, which involves uncovering the mystery of Tangaroa city and its connection to the flood that destroyed the world. The story features:
-
-
Voice Acting: The new characters are voiced by professional actors, who bring them to life with their emotions and expressions.
-
Cutscenes: The story is told through cinematic cutscenes, which show the events and interactions between the characters.
-
Choices: The story has multiple endings, depending on your choices and actions throughout the game.
-
-
Tips and Tricks for Playing Raft
-
Raft is a game that requires skill, strategy, and patience. Here are some tips and tricks for playing Raft effectively:
-
Tip 1: Manage Your Resources Wisely
Resources are the key to survival in Raft. You need them to craft items, build your raft, and sustain your needs. However, resources are also limited and scarce, so you have to manage them wisely. Here are some ways to do that:
-
-
Collect as much as you can: Use your hook to grab any floating materials that you see, and don't let them go to waste. You can also use a net launcher or a collection net to catch more materials automatically.
-
Store them properly: Use storage boxes or chests to store your resources and items, and organize them by categories. This will help you find what you need quickly and easily.
-
Use them efficiently: Don't craft or build more than you need, and don't waste your resources on unnecessary things. For example, don't cook more food than you can eat, and don't make more weapons than you can use.
-
Recycle them when possible: Some items can be recycled or reused for other purposes. For example, you can melt plastic into plastic ingots, or turn metal into nails.
-
Trade them when needed: Some resources are rare or hard to find, but you can trade them with other survivors or traders. For example, you can trade wood for metal, or seeds for eggs.
-
-
Tip 2: Expand Your Raft Gradually
-
Your raft is your home and your base in Raft. You want it to be as big and as comfortable as possible, but you also want it to be stable and secure. Here are some ways to expand your raft gradually:
-
-
Start with the basics: Before you add any fancy or decorative items to your raft, make sure you have the essential items first. These include a water purifier, a grill, a crop plot, a bed, a research table, etc.
-
Add more foundations: To make your raft bigger, you need to add more foundations to it. You can use wood, plastic, metal, or brick as foundations. However, be careful not to make your raft too wide or too long, as this will make it harder to maneuver and more vulnerable to shark attacks.
-
Add more floors: To make your raft taller, you need to add more floors to it. You can use wood or metal as floors. However, be careful not to make your raft too high, as this will make it less stable and more prone to storms.
-
Add more walls and roofs: To make your raft more enclosed and protected, you need to add more walls and roofs to it. You can use wood, metal, glass, or thatch as walls and roofs. However, be careful not to make your raft too dark or too stuffy, as this will affect your visibility and ventilation.
-
Add more features: To make your raft more functional and enjoyable, you need to add more features to it. You can use various items to craft different features, such as a sail, an anchor, a steering wheel, a battery charger, a radio, etc.
-
-
Tip 3: Research New Technologies
-
Raft is a game that has a lot of technology and innovation. You can research new technologies and unlock new recipes and items by using the research table. Here are some ways to research new technologies:
-
-
Gather blueprints: Blueprints are special items that contain information about new technologies. You can find blueprints in various places, such as barrels, crates, ships, islands, etc. You can also buy blueprints from traders.
-
Gather materials: Materials are the basic components that you need to research new technologies. You can find materials by collecting debris, harvesting crops , fishing, hunting, etc. You can also trade materials with other survivors or traders.
-
Use the research table: The research table is the device that allows you to research new technologies. You can craft a research table with wood, plastic, and scrap. To use the research table, you need to:
-
-
Place the blueprint or material that you want to research on the research table.
-
Click on the "Research" button to unlock the new technology.
-
Click on the "Learn" button to learn the new recipe or item.
-
-
Craft the new item: Once you have learned the new recipe or item, you can craft it with the required materials. You can craft items by using your inventory, a crafting menu, or a specific crafting station.
-
-
Tip 4: Follow the Signals
-
Raft is a game that has a lot of exploration and discovery. You can follow the signals and clues that lead you to new destinations and secrets. Here are some ways to follow the signals:
-
-
Gather notes: Notes are special items that contain information about the game's world and story. You can find notes in various places, such as barrels, crates, ships, islands, etc. You can also buy notes from traders.
-
Gather coordinates: Coordinates are special items that contain the location of a specific destination. You can find coordinates in various places, such as notes, signs, maps, etc. You can also buy coordinates from traders.
-
Use the receiver and antennas: The receiver and antennas are devices that allow you to track and locate signals and coordinates. You can craft a receiver and antennas with circuit boards, plastic, scrap, bolts, etc. To use the receiver and antennas, you need to:
-
-
Place the receiver and three antennas on your raft, and connect them with wires.
-
Turn on the receiver and select a signal or coordinate that you want to follow.
-
Adjust the direction and distance of your raft until you reach the destination.
-
-
Explore the destination: Once you have reached the destination, you can explore it and find new resources, enemies, puzzles, secrets, etc.
-
-
Tip 5: Cooperate with Your Friends
-
Raft is a game that has a lot of cooperation and competition. You can play with your friends online, and share the tasks and challenges of surviving on a raft. Here are some ways to cooperate with your friends:
-
-
Create or join a world: To play with your friends online, you need to create or join a world. You can create a world by choosing "New World" in the main menu, and setting a name, password, difficulty, mode, etc. You can join a world by choosing "Join World" in the main menu, and entering the name or IP address of your friend's world.
-
Communicate with your friends: To communicate with your friends online, you need to use voice chat or text chat. You can use voice chat by pressing and holding the "V" key on your keyboard, and speaking into your microphone. You can use text chat by pressing the "T" key on your keyboard, and typing your message.
-
Share your resources and items: To share your resources and items with your friends online, you need to use storage boxes or chests. You can place your resources and items in storage boxes or chests, and allow your friends to access them. You can also drop your resources and items on the ground, and let your friends pick them up.
-
Divide your roles and tasks: To divide your roles and tasks with your friends online , you need to assign different roles and tasks to each player, according to their skills and preferences. For example, you can have one player focus on collecting resources, another player focus on crafting items, another player focus on building the raft, another player focus on exploring the destinations, etc.
-
Help each other out: To help each other out with your friends online, you need to support and assist each other in various situations. For example, you can give each other food and water, heal each other's wounds, rescue each other from shark attacks, defend each other from enemies, etc.
-
-
Conclusion
-
Raft is a game that offers a unique and exciting survival adventure on a raft in the ocean. You can download game raft final chapter pc by following the steps in this article, and enjoy the new content and features that the update brings. You can also use the tips and tricks in this article to play raft effectively and have more fun. Raft is a game that you can play solo or with your friends online, and create your own stories and memories. So what are you waiting for? Download game raft final chapter pc today and start your rafting journey!
-
FAQs
-
Here are some frequently asked questions about Raft:
-
-
How much does Raft cost?
-
Raft costs $19.99 USD on Steam. However, you can also get it for a lower price during sales or discounts.
-
How many players can play Raft online?
-
Raft supports up to 10 players online. You can either create or join a public or private world, and invite or join your friends.
-
How long does it take to finish Raft?
-
Raft does not have a fixed length or ending. You can play it as long as you want, and create your own goals and challenges. However, if you want to complete the main story of the game, it will take you about 20 to 30 hours.
-
Is Raft suitable for children?
-
Raft is rated E10+ by ESRB, which means that it is suitable for everyone 10 years and older. The game contains mild violence, blood, and language, but nothing too graphic or explicit.
-
Is Raft based on a true story?
-
Raft is not based on a true story, but it is inspired by real-life events and phenomena. The game's developers were influenced by the Great Pacific Garbage Patch, which is a large accumulation of plastic and debris in the Pacific Ocean. They also drew inspiration from various survival stories and movies, such as Life of Pi, Cast Away, etc.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Ludo Joker APK and Enjoy the Fun of Ludo Time.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Ludo Joker APK and Enjoy the Fun of Ludo Time.md
deleted file mode 100644
index 11eafc285fc26d40e8417e5282465d16a742e3ec..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Ludo Joker APK and Enjoy the Fun of Ludo Time.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-
Ludo Joker APK Download: How to Play Ludo Online with Friends
-
Ludo is one of the most popular board games in the world. It is a game of strategy, luck, and fun that can be enjoyed by people of all ages. But what if you want to play Ludo online with your friends or other players from around the world? That's where Ludo Joker comes in.
-
Ludo Joker is an online multiplayer game that lets you play Ludo on your Android device. You can join or create a game room, invite your friends or play with random players, chat and voice chat with them, and compete for coins and glory. Ludo Joker is a free game that you can download from the Google Play Store or from the official website. In this article, we will show you how to download and install Ludo Joker APK on your Android device, how to create an account and join a game room, how to play Ludo Joker online with your friends or other players, and some tips and tricks to win more games and earn more coins. Let's get started!
Click on the "Download APK" button and wait for the file to be downloaded on your device.
-
Once the file is downloaded, open it and tap on "Install". You may need to enable "Unknown Sources" in your device settings if you haven't done so before.
-
Wait for the installation process to finish and then launch the game.
-
-
Congratulations! You have successfully downloaded and installed Ludo Joker APK on your Android device. Now you are ready to create an account and join a game room.
-
How to Create an Account and Join a Game Room
-
Creating an account and joining a game room in Ludo Joker is very simple. Just follow these easy steps:
-
-
When you launch the game, you will see a screen where you can choose your username, avatar, country, and language. You can also sign in with your Facebook account if you want.
-
After choosing your details, tap on "Play Now" and you will be taken to the main menu of the game.
-
On the main menu, you will see four options: Play Online, Play Offline, Play With Friends, and Settings. Tap on "Play Online" if you want to play with random players from around the world or tap on "Play With Friends" if you want to invite your friends or join a private game room.
-
If you choose "Play Online", you will see a list of game rooms that are available. You can filter them by game mode (Classic, Quick, Master), number of players (2, 4), and bet amount (100, 500, 1000, 5000, 10000). You can also create your own game room by tapping on the "+" icon at the bottom right corner of the screen.
-
If you choose "Play With Friends", you will see two options: Create Room and Join Room. Tap on "Create Room" if you want to create a private game room and invite your friends or tap on "Join Room" if you want to join a game room that your friend has created. You will need to enter a room code to join a private game room.
-
Once you join or create a game room, you will see the game board and the players. You can chat and voice chat with them by tapping on the chat icon at the bottom left corner of the screen. You can also change your avatar, country, and language by tapping on your profile picture at the top left corner of the screen.
-
When everyone is ready, tap on "Start Game" and the game will begin. You will see a dice on the screen that you can roll by tapping on it. You can move your tokens according to the number on the dice. The objective of the game is to move all your tokens to the center of the board before your opponents do. You can also capture your opponents' tokens by landing on their squares.
-
-
That's it! You have learned how to create an account and join a game room in Ludo Joker. Now let's see how to play Ludo Joker online with your friends or other players.
-
How to Play Ludo Joker Online with Your Friends or Other Players
-
Playing Ludo Joker online with your friends or other players is very fun and easy. Just follow these simple tips and tricks:
-
-
Choose your game mode wisely. There are three game modes in Ludo Joker: Classic, Quick, and Master. Classic mode is the traditional Ludo game where you need to move all four tokens to the center of the board. Quick mode is a faster version of Ludo where you only need to move one token to the center of the board. Master mode is a challenging version of Ludo where you need to move all four tokens to the center of the board but you can only roll the dice once per turn.
-
Use your coins wisely. You can use your coins to bet on each game and win more coins if you win. You can also use your coins to buy power-ups that can help you in the game. There are four power-ups in Ludo Joker: Double Dice, Shield, Swap, and Extra Turn. Double Dice lets you roll two dice instead of one. Shield protects your token from being captured by your opponents. Swap lets you swap your token with any other token on the board. Extra Turn lets you roll the dice again after your turn.
-
Be strategic and lucky. Ludo is a game of strategy and luck, so you need to use both to win. You need to plan your moves ahead and anticipate your opponents' moves. You also need to roll the dice well and hope for the best numbers. Sometimes, you may need to take risks and sometimes, you may need to play safe. The key is to balance both and have fun.
-
-
These are some of the tips and tricks that can help you play Ludo Joker online with your friends or other players. Now let's see what are the features and benefits of Ludo Joker APK.
-
Features and Benefits of Ludo Joker APK
-
Ludo Joker APK is a great game that offers many features and benefits for its players. Here are some of them:
-
ludo joker apk free download
-ludo joker game download for android
-ludo time apk latest version
-ludo king apk download
-ludo joker mod apk unlimited money
-ludo joker online play with friends
-ludo joker voice chat feature
-ludo time game by sonu sharma
-ludo king official game 2023
-ludo joker apk download for pc
-ludo joker app download from play store
-ludo time apk update 2023
-ludo king classic board game
-ludo joker hack apk download
-ludo joker best strategy tips
-ludo time game review and rating
-ludo king apk combo downloader
-ludo joker offline mode available
-ludo time apk install guide
-ludo king new features and updates
-ludo joker apk download link
-ludo time game rules and instructions
-ludo king mobile game for android
-ludo joker how to play tutorial
-ludo time game support and feedback
-ludo king xapk installer for android
-ludo joker earn money online
-ludo time game screenshots and videos
-ludo king app size and requirements
-ludo joker download for ios devices
-ludo time game categories and genres
-ludo king language options and settings
-ludo joker referral code and bonus
-ludo time game privacy policy and terms of service
-ludo king social media and contact details
-ludo joker alternatives and similar games
-ludo time game history and origin
-ludo king frequently asked questions and answers
-ludo joker customer care number and email id
-ludo time game bugs and issues report
-
-
High-quality graphics and sound effects. Ludo Joker APK has high-quality graphics and sound effects that make the game more realistic and enjoyable. You can see the details of the game board, the tokens, and the dice. You can also hear the sounds of the dice rolling, the tokens moving, and the players cheering.
-
Multiple game modes and levels of difficulty. Ludo Joker APK has multiple game modes and levels of difficulty that suit different preferences and skills. You can choose from Classic, Quick, or Master mode depending on how long or how challenging you want the game to be. You can also choose from 2, 3, or 4 players depending on how many people you want to play with.
-
Chat and voice chat options to communicate with other players. Ludo Joker APK has chat and voice chat options that let you communicate with other players in real time. You can send text messages or voice messages to your friends or opponents by tapping on the chat icon at the bottom left corner of the screen. You can also use emojis and stickers to express your emotions and have fun.
-
Daily rewards and bonuses for active players. Ludo Joker APK has daily rewards and bonuses for active players that keep them motivated and loyal. You can get free coins, power-ups, and other gifts every day by logging in, playing games, and completing tasks. You can also get more rewards and bonuses by inviting your friends to play with you.
-
Leaderboards and achievements to show off your skills. Ludo Joker APK has leaderboards and achievements that show off your skills and progress. You can see your rank and score on the global, country, and friends leaderboards. You can also see your achievements and badges that you have earned by playing games and winning coins. You can share your leaderboards and achievements with your friends and challenge them to beat you.
-
-
These are some of the features and benefits of Ludo Joker APK that make it a great game to play online with your friends or other players. Now let's see what are the pros and cons of Ludo Joker APK.
-
Pros and Cons of Ludo Joker APK
-
Ludo Joker APK is a game that has many pros and cons that you should consider before downloading and playing it. Here are some of them:
-
-
-
Pros
-
Cons
-
-
-
Fun, easy, and addictive gameplay
-
Requires internet connection
-
-
-
Social and interactive features
-
May contain ads and in-app purchases
-
-
-
Free to download and play
-
-
-
-
As you can see, Ludo Joker APK has more pros than cons, which means that it is a game that is worth trying. However, you should also be aware of the cons and decide if they are acceptable for you or not. Now let's conclude this article and answer some FAQs about Ludo Joker APK.
-
Conclusion and FAQs
-
Ludo Joker APK is an online multiplayer game that lets you play Ludo on your Android device with your friends or other players from around the world. You can download and install Ludo Joker APK from the Google Play Store or from the official website. You can create an account and join a game room, chat and voice chat with other players, use coins and power-ups to enhance your gameplay, earn rewards and bonuses, and show off your skills on the leaderboards and achievements. Ludo Joker APK is a fun, easy, and addictive game that has many features and benefits for its players. It also has some pros and cons that you should consider before playing it. If you are looking for a game that can entertain you, challenge you, and connect you with other people, then Ludo Joker APK is the game for you.
-
Here are some FAQs about Ludo Joker APK that you may have:
-
-
Is Ludo Joker APK safe to download and play?
-
Yes, Ludo Joker APK is safe to download and play. It is verified by Google Play Protect and does not contain any viruses or malware. However, you should always download it from the official website or the Google Play Store to avoid any fake or harmful versions.
-
How can I get more coins in Ludo Joker APK?
-
You can get more coins in Ludo Joker APK by winning games, completing tasks, inviting friends, logging in daily, watching ads, or buying them with real money.
-
How can I contact the support team of Ludo Joker APK?
No, you cannot play Ludo Joker APK offline. You need an internet connection to play online with other players or with your friends.
-
Can I play Ludo Joker APK on PC?
-
No, you cannot play Ludo Joker APK on PC directly. However, you can use an Android emulator such as BlueStacks or NoxPlayer to run it on your PC.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Thrill of Hungry Shark Evolution with MOD Menu APK.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Thrill of Hungry Shark Evolution with MOD Menu APK.md
deleted file mode 100644
index ece667c59d016bf2e53b65fbbabfbb2a03d57d9f..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Thrill of Hungry Shark Evolution with MOD Menu APK.md
+++ /dev/null
@@ -1,145 +0,0 @@
-
-
Download Hungry Shark Evolution Mod Menu Apk
-
If you are a fan of aquatic adventure games, you might have heard of Hungry Shark Evolution, a popular game where you take control of a hungry shark and devour everything in your way. But did you know that there is a way to enhance your gaming experience with a mod menu apk? In this article, we will tell you everything you need to know about Hungry Shark Evolution Mod Menu Apk, including what it is, how to download and install it, what are its benefits and risks, what are some alternatives, and what are some reviews and screenshots of it. Read on to find out more!
-
What is Hungry Shark Evolution?
-
Hungry Shark Evolution is an action-packed aquatic adventure game developed by Ubisoft London and released in 2012. It is the fifth installment in the Hungry Shark series and the sequel to Hungry Shark: Part 3. The game is available for Android and iOS devices and has been downloaded over 100 million times on Google Play Store.
Hungry Shark Evolution has many features that make it an exciting and addictive game. Some of them are:
-
-
More than a dozen unique sharks and other creatures to evolve: You can choose from different types of sharks, such as the Great White, the Hammerhead, the Megalodon, and even the T-Rex shark. You can also unlock other creatures, such as the Crab Boss, the Giant Crab, the Big Daddy, and the Zombie Sharks. Each shark has its own abilities and stats that you can upgrade by feeding them.
-
Discover and devour wild animals from the deep: You can explore a vast underwater world full of fish, whales, dolphins, turtles, humans, and other prey. You can also find hidden treasures, sunken ships, volcanoes, and other secrets. The more you eat, the more points you earn and the longer you survive.
-
Recruit Baby Sharks and grow them to boost your predatory powers: You can buy Baby Sharks from the shop or find them in the ocean. They will follow you around and help you eat more. You can also grow them by feeding them or using gems.
-
Equip yourself with awesome accessories like Lasers, Jetpacks, and Top Hats: You can customize your shark with various items that give you extra abilities or bonuses. For example, you can use a Laser to zap your enemies, a Jetpack to fly above the water, or a Top Hat to look classy.
-
Game modes
-
Hungry Shark Evolution has two main game modes: Survival Mode and Gold Rush Mode.
-
-
Survival Mode: This is the default mode where you have to survive as long as possible by eating and avoiding dangers. You have a health bar that decreases over time and when you get hurt by enemies or obstacles. You can replenish your health by eating prey or collecting health items. You also have a hunger bar that increases when you eat and decreases when you don't. If your hunger bar reaches zero, you will starve and die. You can earn coins and gems by eating certain prey, completing missions, or finding treasure chests.
-
Gold Rush Mode: This is a special mode that activates when you fill up your gold rush meter by eating gold creatures or collecting gold items. In this mode, you become invincible and can eat anything, including mines, jellyfish, and submarines. You also earn extra coins and gems for everything you eat. The gold rush meter drains over time, so you have to keep eating to extend the duration of the mode.
-
-
Game graphics and sound
-
Hungry Shark Evolution has impressive 3D graphics that create a realistic and immersive underwater environment. The game features detailed and diverse shark models, animations, and effects. The game also has dynamic lighting and shadows, water reflections, and bubbles. The game runs smoothly on most devices, but you can adjust the graphics quality in the settings if you experience lag or battery drain.
-
How to download hungry shark evolution mod menu apk for free
-Download hungry shark evolution mod menu apk unlimited coins and gems
-Hungry shark evolution mod menu apk latest version download
-Download hungry shark evolution mod menu apk with god mode and high damage
-Hungry shark evolution mod menu apk download for android and ios
-Download hungry shark evolution mod menu apk no root required
-Hungry shark evolution mod menu apk download link and installation guide
-Download hungry shark evolution mod menu apk and explore the deep sea world
-Hungry shark evolution mod menu apk download with endless gold rush and stamina
-Download hungry shark evolution mod menu apk and unlock all sharks and accessories
-Hungry shark evolution mod menu apk download 2023 updated version
-Download hungry shark evolution mod menu apk and enjoy the mega menu features
-Hungry shark evolution mod menu apk download from modyolo.com[^1^]
-Download hungry shark evolution mod menu apk and play offline mode
-Hungry shark evolution mod menu apk download with anti-ban protection
-Download hungry shark evolution mod menu apk and challenge your friends online
-Hungry shark evolution mod menu apk download with 3D graphics and sound effects
-Download hungry shark evolution mod menu apk and earn achievements and rewards
-Hungry shark evolution mod menu apk download with new sharks and missions added
-Download hungry shark evolution mod menu apk and customize your sharks with skins and hats
-Hungry shark evolution mod menu apk download with easy controls and gameplay
-Download hungry shark evolution mod menu apk and survive as long as you can in the ocean
-Hungry shark evolution mod menu apk download with realistic physics and animations
-Download hungry shark evolution mod menu apk and discover hidden treasures and secrets
-Hungry shark evolution mod menu apk download with support for multiple languages
-
The game also has a fitting and catchy soundtrack that matches the mood and theme of the game. The game features various sound effects that enhance the gameplay, such as the shark's roar, the prey's screams, the explosions, and the water splashes. The game also has voice-overs that narrate the missions, achievements, and tips.
-
What is Hungry Shark Evolution Mod Menu Apk?
-
A Hungry Shark Evolution mod menu apk is a modified version of the original game that offers additional features and benefits that are not available in the official version. A mod menu apk is usually created by third-party developers or hackers who modify the game's code or data to change its behavior or appearance.
-
Mod menu features
-
A Hungry Shark Evolution mod menu apk typically has a mod menu that allows you to access and activate various cheats and hacks in the game. Some of the common features of a mod menu apk are:
-
-
Unlimited coins and gems: You can get unlimited amounts of coins and gems in the game, which are the main currencies used to buy and upgrade sharks and accessories. You can also use coins and gems to revive your shark or skip missions.
-
Increased swimming speed: You can increase your shark's swimming speed to move faster and catch more prey. You can also outrun your enemies and avoid obstacles more easily.
-
Invincibility: You can make your shark invincible to any damage or attack from enemies or obstacles. You can also ignore your health and hunger bars, as they will never decrease.
-
All sharks unlocked: You can unlock all the sharks in the game without having to meet the requirements or pay for them. You can also switch between different sharks at any time.
-
All accessories unlocked: You can unlock all the accessories in the game without having to buy them with coins or gems. You can also equip any accessory to any shark without any restrictions.
-
All missions completed: You can complete all the missions in the game without having to do them yourself. You can also get all the rewards for completing them.
-
No ads: You can remove all the ads in the game that may interrupt your gameplay or consume your data.
-
-
Mod menu benefits
-
A Hungry Shark Evolution mod menu apk can provide you with several benefits that can enhance your gaming experience and enjoyment. Some of them are:
-
-
You can save time and money: You don't have to spend hours or real money to earn coins and gems in the game. You can get unlimited amounts of them for free with a mod menu apk. You can also skip missions or revive your shark without paying anything.
-
You can explore more content: You don't have to wait or grind to unlock new sharks and accessories in the game. You can access all of them with a mod menu apk. You can also try different combinations of sharks and accessories to see how they affect your gameplay.
-
You can have more fun: You
You can have more fun: You don't have to worry about dying or running out of food in the game. You can eat anything and everything with a mod menu apk. You can also experiment with different cheats and hacks to see how they affect your gameplay. You can also challenge yourself by increasing the difficulty or setting your own goals.
-
Mod menu risks
-
A Hungry Shark Evolution mod menu apk can also pose some risks that you should be aware of before using it. Some of them are:
-
-
You can get banned or suspended: Using a mod menu apk is considered cheating and violating the game's terms of service. If you use it online or connect it to your social media accounts, you may get detected and banned or suspended by the game developers or moderators. You may also lose your progress and data if you get banned.
-
You can get viruses or malware: Downloading a mod menu apk from an unknown or untrusted source can expose your device to viruses or malware that can harm your device or steal your personal information. You should always scan the apk file with an antivirus software before installing it.
-
You can lose interest in the game: Using a mod menu apk can make the game too easy or boring for you. You may lose the sense of challenge or achievement that comes from playing the game normally. You may also miss out on some of the game's features or updates that are not compatible with the mod menu apk.
-
-
How to download and install Hungry Shark Evolution Mod Menu Apk?
-
If you want to try Hungry Shark Evolution Mod Menu Apk, you need to follow these steps to download and install it on your device:
-
Download link
-
You can download Hungry Shark Evolution Mod Menu Apk from this link: Hungry Shark Evolution Mod Menu Apk Download. This is a trusted and verified source that provides the latest version of the mod menu apk. The file size is about 100 MB and it requires Android 4.1 or higher to run.
-
Installation guide
-
After downloading the apk file, you need to install it on your device. Here is how:
-
-
Enable unknown sources: Go to your device's settings and look for security or privacy options. Find the option that says "unknown sources" or "allow installation from unknown sources" and enable it. This will allow you to install apps that are not from Google Play Store.
-
Locate the apk file: Go to your device's file manager and look for the folder where you downloaded the apk file. Tap on the file to open it.
-
Install the apk file: Follow the instructions on the screen to install the apk file. It may take a few seconds or minutes depending on your device's speed and memory.
-
Launch the game: After the installation is complete, you can launch the game from your app drawer or home screen. You will see a mod menu icon on the top left corner of the screen. Tap on it to access and activate the mod menu features.
-
-
Permissions required
-
Hungry Shark Evolution Mod Menu Apk requires some permissions from your device to function properly. Some of them are:
-
-
Storage: This permission allows the app to read and write data on your device's storage, such as saving your progress, downloading additional files, or caching data.
-
Wi-Fi connection information: This permission allows the app to view information about your Wi-Fi network, such as whether it is connected or not, or what devices are connected to it.
-
Full network access: This permission allows the app to access the internet, such as downloading updates, connecting to servers, or sending data.
-
View network connections: This permission allows the app to view information about your network connections, such as which networks are available or not, or what type of network it is.
-
Prevent device from sleeping: This permission allows the app to prevent your device from going into sleep mode while you are playing the game.
-
Control vibration: This permission allows the app to control your device's vibration feature, such as when you get hit by an enemy or complete a mission.
-
-
Alternatives to Hungry Shark Evolution Mod Menu Apk
-
If you are looking for some alternatives to Hungry Shark Evolution Mod Menu Apk, you can try these apps that offer similar features and benefits
If you are looking for some alternatives to Hungry Shark Evolution Mod Menu Apk, you can try these apps that offer similar features and benefits:
-
Hungry Shark World
-
Hungry Shark World is the sequel to Hungry Shark Evolution and the sixth installment in the Hungry Shark series. It was released in 2016 and has over 50 million downloads on Google Play Store. The game features more than 30 different sharks and other creatures to collect and evolve, as well as four different worlds to explore: Pacific Islands, Arctic Ocean, Arabian Sea, and South China Sea. The game also has improved graphics, sound, and gameplay, as well as new enemies, bosses, missions, and events.
-
You can download Hungry Shark World from Google Play Store or from this link: Hungry Shark World Download. You can also download Hungry Shark World Mod Apk from this link: Hungry Shark World Mod Apk Download. The mod apk offers unlimited coins and gems, unlocked sharks and accessories, increased speed and damage, and no ads.
-
Gems of Hungry Shark Evolution
-
Gems of Hungry Shark Evolution is a puzzle game that is based on the Hungry Shark Evolution universe. It was released in 2020 and has over 1 million downloads on Google Play Store. The game features hundreds of levels where you have to match three or more gems of the same color to clear them and earn points. You can also use special gems and power-ups to create combos and boost your score. The game also has a story mode where you can unlock new sharks and accessories, as well as a multiplayer mode where you can compete with other players online.
-
You can download Gems of Hungry Shark Evolution from Google Play Store or from this link: Gems of Hungry Shark Evolution Download. You can also download Gems of Hungry Shark Evolution Mod Apk from this link: Gems of Hungry Shark Evolution Mod Apk Download. The mod apk offers unlimited coins and gems, unlocked sharks and accessories, unlimited lives and moves, and no ads.
-
Diamond for hungry shark Evo
-
Diamond for hungry shark Evo is a simulation game that is inspired by the Hungry Shark Evolution game. It was released in 2021 and has over 100 thousand downloads on Google Play Store. The game features a realistic 3D shark simulator where you can control a hungry shark and eat everything in your way. You can also customize your shark with different skins, accessories, and weapons. The game also has various missions, challenges, and achievements to complete.
-
You can download Diamond for hungry shark Evo from Google Play Store or from this link: Diamond for hungry shark Evo Download. You can also download Diamond for hungry shark Evo Mod Apk from this link: Diamond for hungry shark Evo Mod Apk Download. The mod apk offers unlimited coins and diamonds, unlocked sharks and accessories, increased speed and damage, and no ads.
-
Reviews of Hungry Shark Evolution Mod Menu Apk
-
Hungry Shark Evolution Mod Menu Apk has received mixed reviews from users who have tried it. Some of them are positive, while some of them are negative. Here are some examples of the reviews:
-
Positive reviews
-
-
"This mod menu apk is awesome! I can get unlimited coins and gems, unlock all the sharks and accessories, and have fun with the cheats. I love this game so much!"
-
"I recommend this mod menu apk to anyone who loves Hungry Shark Evolution. It makes the game more enjoyable and easy. You can explore more content and have more fun with the mod menu features."
-
"This mod menu apk is the best thing ever! I can play with any shark I want, equip any accessory I want, and eat anything I want. I can also complete all the missions without any hassle. This is a great mod menu apk!"
-
-
Negative reviews
-
-
"This mod menu apk is terrible! I got banned from the game after using it online. I lost all my progress and data. This is a scam mod menu apk!"
-
"I don't like this mod menu apk. It makes the game too easy and boring. There is no challenge or achievement in playing with cheats. It ruins the fun of the game."
-
"This mod menu apk is not working for me. It crashes every time I try to launch it. It also has viruses that infected my device. This is a bad mod menu apk!"
-
-
Screenshots of Hungry Shark Evolution Mod Menu Apk
Screenshots of Hungry Shark Evolution Mod Menu Apk
-
To give you a better idea of how Hungry Shark Evolution Mod Menu Apk looks and works, here are some screenshots of the game with the mod menu features enabled:
-
Image 1
-
This image shows the mod menu icon on the top left corner of the screen. You can tap on it to open the mod menu and activate or deactivate the cheats and hacks. You can also see the unlimited coins and gems on the top right corner of the screen.
-
-
Image 2
-
This image shows the shark selection screen with all the sharks unlocked. You can choose any shark you want without having to pay for them or meet the requirements. You can also switch between different sharks at any time during the game.
-
-
Image 3
-
This image shows the game play screen with the invincibility and increased speed features enabled. You can see that the shark is not affected by any damage or attack from enemies or obstacles. You can also see that the shark is moving faster than normal and catching more prey.
-
-
FAQs about Hungry Shark Evolution Mod Menu Apk
-
Here are some frequently asked questions and answers about Hungry Shark Evolution Mod Menu Apk that you may find useful:
-
Q1: Is Hungry Shark Evolution Mod Menu Apk safe to use?
-
A1: Hungry Shark Evolution Mod Menu Apk is not an official version of the game and it is not endorsed or supported by the game developers or publishers. It is a modified version of the game that may contain viruses or malware that can harm your device or steal your personal information. It may also cause your account to get banned or suspended from the game. Therefore, it is not safe to use and we do not recommend it.
-
Q2: Do I need to root my device to use Hungry Shark Evolution Mod Menu Apk?
-
A2: No, you do not need to root your device to use Hungry Shark Evolution Mod Menu Apk. However, you do need to enable unknown sources in your device's settings to install it. You also need to disable any antivirus software or firewall that may prevent it from running.
-
Q3: Can I play Hungry Shark Evolution Mod Menu Apk offline?
-
A3: Yes, you can play Hungry Shark Evolution Mod Menu Apk offline without any internet connection. However, you may not be able to access some of the game's features or updates that require online connection, such as leaderboards, events, or cloud save.
-
Q4: How can I update Hungry Shark Evolution Mod Menu Apk?
-
A4: You can update Hungry Shark Evolution Mod Menu Apk by downloading and installing the latest version of the mod menu apk from the same source where you got it. However, you may lose your progress and data if you update it, as it may not be compatible with the previous version. You may also encounter errors or bugs if you update it, as it may not be compatible with the official version of the game.
-
Q5: How can I contact the developer of Hungry Shark Evolution Mod Menu Apk?
-
A5: You can contact the developer of Hungry Shark Evolution Mod Menu Apk by visiting their website or social media pages, if they have any. However, you may not get a response or support from them, as they are not affiliated with the game developers or publishers. You may also get scammed or spammed by them, as they may not be trustworthy or reliable.
Autoturn 6.0.1 is a third-party CAD software that allows you to perform swept path analysis and turning simulation for various vehicles. It is compatible with AutoCAD, MicroStation, BricsCAD, IntelliCAD and other platforms. To install and activate Autoturn 6.0.1, you need to have a license number and a CD key.
A license number is a unique code that identifies your product and allows you to use it on a specific computer. A CD key is a security code that verifies your product and prevents unauthorized copying. You can obtain both of these codes from Transoft Solutions Inc., the developer and seller of Autoturn software.
-
To install Autoturn 6.0.1, follow these steps:
-
-
Insert the Autoturn CD into your computer's CD-ROM drive.
-
Run the setup.exe file from the CD.
-
Follow the instructions on the screen to complete the installation process.
-
When prompted, enter your license number and CD key.
-
Click Next to continue.
-
Choose the CAD platform that you want to use Autoturn with.
-
Select the vehicle libraries that you want to install.
-
Click Finish to complete the installation.
-
-
To activate Autoturn 6.0.1, follow these steps:
-
-
Launch your CAD platform and load Autoturn.
-
Click on the Autoturn menu and select Activate Product.
-
Enter your license number and CD key again.
-
Click OK to activate your product.
-
-
You can now use Autoturn 6.0.1 for your swept path analysis and turning simulation projects. For more information, visit the official website of Autoturn or contact Transoft Solutions Inc.[^5^]
-
-
-
Autoturn 6.0.1 is a powerful and versatile tool that can help you design and evaluate various road and site scenarios involving vehicle movements. You can use Autoturn to analyze intersections, roundabouts, bus terminals, loading bays, parking lots and more. You can also create custom vehicles or modify existing ones to suit your specific needs.
-
Some of the features of Autoturn 6.0.1 are:
-
-
It supports multiple CAD platforms and operating systems.
-
It includes over 1000 standard vehicles from around the world.
-
It allows you to define individual wheel axles on all vehicles.
-
It enables you to run simultaneous simulations of two vehicles in forward and reverse directions.
-
It reports conflicts with geometry in multiple layers or levels.
-
It provides a search function to quickly find the vehicles you are looking for.
-
-
To learn more about Autoturn 6.0.1 and how to use it effectively, you can refer to the user manual, the online help, the tutorial videos or the case studies available on the official website of Autoturn. You can also contact Transoft Solutions Inc. for technical support or feedback.
EPLAN Fluid P8: A Powerful Tool for Fluid Power Systems Design
-
EPLAN Fluid P8 is a software solution that helps engineers design and document fluid power systems, such as hydraulics, pneumatics, cooling and lubrication. EPLAN Fluid P8 supports a variety of engineering methods, from manual creation to standardized and template-based work. With EPLAN Fluid P8, engineers can benefit from features such as:
-
-
Automatic generation of schematics and reports based on component data and logic
-
Integration with EPLAN Electric P8 for cross-disciplinary engineering
-
Access to a comprehensive database of fluid power components from leading manufacturers
-
Simulation and validation of fluid power circuits
-
Import and export of data in various formats, such as DXF, DWG, PDF, XML and STEP
-
-
EPLAN Fluid P8 is compatible with Windows operating systems and requires a 32-bit or 64-bit processor. The latest version of EPLAN Fluid P8 is V2.2.5.6338, which was released in October 2020. To download EPLAN Fluid P8, visit the official website of EPLAN Software[^1^].
Fluid power systems are widely used in various industries and applications, such as aerospace, automotive, construction, manufacturing, mining and agriculture. Fluid power systems can provide high power and high forces in small volumes, compared with electrically-driven systems. The forces that are exerted can be easily monitored and controlled within a system by gauges, meters and valves.
-
Some examples of fluid power systems are:
-
-
Aircraft: Modern aircraft use hydraulic systems to activate their controls and to operate landing gears and brakes[^3^].
-
Missiles: Nearly all missiles, as well as their ground-support equipment, use fluid power[^3^].
-
Automobiles: Automobiles use hydraulic power systems in their transmissions, brakes and steering mechanisms[^3^].
-
Earthmoving equipment: Earthmoving equipment such as bulldozers, backhoes and others use powerful hydraulic systems for digging and also for propulsion[^2^].
-
Manufacturing: Manufacturing processes such as metal forming, plastic injection molding, stamping and assembly use fluid power systems to provide precise and reliable motion control[^1^].
-
-
Fluid power systems have many advantages over other types of power transmission, such as:
-
-
High power-to-weight ratio: Fluid power systems can deliver high power with relatively low weight and size.
-
High efficiency: Fluid power systems can have high volumetric and mechanical efficiency, minimizing energy losses.
-
High flexibility: Fluid power systems can be easily adapted to different configurations and layouts, as well as variable speed and force requirements.
-
High reliability: Fluid power systems can operate in harsh environments and withstand shock loads and vibrations.
-
High safety: Fluid power systems can be designed with fail-safe features and overload protection.
-
cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Fpwin Gr 7 Serial 64l.md b/spaces/stomexserde/gpt4-ui/Examples/Fpwin Gr 7 Serial 64l.md
deleted file mode 100644
index fd9a0b98f055746756f2a6ced4e30a435cb0aa1e..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Fpwin Gr 7 Serial 64l.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
How to Download and Install Control FPWIN GR7 Software for Panasonic PLCs
-
Control FPWIN GR7 is a programming tool software for Panasonic PLCs that supports the IEC 61131-3 standard. It allows you to create programs using various languages, such as IL, LD, FBD, SFC and ST. You can also use encryption function to protect your programs from unauthorized access.
In this article, we will show you how to download and install Control FPWIN GR7 software on your Windows PC. You will need a serial number to activate the software after installation.
On the website, you will see two versions of Control FPWIN GR7 software: one with encryption function and one without encryption function. Choose the version that suits your needs and click on the "Download" button. You will need to register an account or log in with your existing account to access the download page.
-
The file size of Control FPWIN GR7 software is about 193 MB. It may take some time to download depending on your internet speed.
-
Step 2: Install Control FPWIN GR7 Software
-
After downloading Control FPWIN GR7 software, you need to unzip the file and run the setup.exe file. Follow the instructions on the screen to install the software on your PC. You may need to restart your PC after installation.
-
When you launch Control FPWIN GR7 software for the first time, you will be asked to enter a serial number to activate the software. You can find the serial number on the CD case or on the email confirmation that you received after purchasing the software. Enter the serial number and click on "OK".
-
-
Congratulations! You have successfully installed Control FPWIN GR7 software on your PC. You can now start creating programs for your Panasonic PLCs using Control FPWIN GR7 software.
-
-
Step 3: Create a New Project
-
To create a new project in Control FPWIN GR7 software, you need to click on the "File" menu and select "New Project". You will see a dialog box where you can enter the name and location of your project. You can also choose the PLC model and type that you want to use for your project.
-
After creating a new project, you will see the main window of Control FPWIN GR7 software. On the left side, you will see the project explorer where you can manage your programs, variables, libraries and other resources. On the right side, you will see the editor where you can write your programs using different languages.
-
Step 4: Write Your Program
-
To write your program in Control FPWIN GR7 software, you need to select the language that you want to use from the toolbar. You can choose from IL, LD, FBD, SFC and ST. Each language has its own syntax and features that you need to learn and follow.
-
You can also use the built-in functions and libraries that Control FPWIN GR7 software provides. You can access them from the "Insert" menu or from the toolbox on the right side. You can drag and drop the functions and libraries to your editor and configure their parameters.
-
When you write your program, you need to follow the IEC 61131-3 standard and the rules of Control FPWIN GR7 software. You can check the syntax and errors of your program by clicking on the "Check" button on the toolbar. You can also debug your program by using breakpoints, watchpoints and other tools.
-
Step 5: Transfer Your Program to PLC
-
To transfer your program to PLC in Control FPWIN GR7 software, you need to connect your PLC to your PC using a communication cable or a network. You can check the connection status by clicking on the "Online" button on the toolbar.
-
Before transferring your program to PLC, you need to compile your program by clicking on the "Compile" button on the toolbar. This will generate a binary file that can be executed by your PLC. You can also optimize your program by selecting different options in the compile settings.
-
After compiling your program, you need to click on the "Transfer" button on the toolbar. This will open a dialog box where you can select the destination PLC and other options. You can also choose to transfer only selected parts of your program or compare your program with the existing one in PLC.
-
When you transfer your program to PLC, you need to make sure that your PLC is in stop mode and that there are no errors or conflicts. You can also verify your program by clicking on the "Verify" button on the toolbar. This will compare your program with the one in PLC and show any differences.
cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/stratussox/yolov5_inference/data/scripts/get_coco.sh b/spaces/stratussox/yolov5_inference/data/scripts/get_coco.sh
deleted file mode 100644
index 0d388b0a12a84c504a2b12e85e3edcac5d78530c..0000000000000000000000000000000000000000
--- a/spaces/stratussox/yolov5_inference/data/scripts/get_coco.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-# Download COCO 2017 dataset http://cocodataset.org
-# Example usage: bash data/scripts/get_coco.sh
-# parent
-# ├── yolov5
-# └── datasets
-# └── coco ← downloads here
-
-# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments
-if [ "$#" -gt 0 ]; then
- for opt in "$@"; do
- case "${opt}" in
- --train) train=true ;;
- --val) val=true ;;
- --test) test=true ;;
- --segments) segments=true ;;
- esac
- done
-else
- train=true
- val=true
- test=false
- segments=false
-fi
-
-# Download/unzip labels
-d='../datasets' # unzip directory
-url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
-if [ "$segments" == "true" ]; then
- f='coco2017labels-segments.zip' # 168 MB
-else
- f='coco2017labels.zip' # 46 MB
-fi
-echo 'Downloading' $url$f ' ...'
-curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
-
-# Download/unzip images
-d='../datasets/coco/images' # unzip directory
-url=http://images.cocodataset.org/zips/
-if [ "$train" == "true" ]; then
- f='train2017.zip' # 19G, 118k images
- echo 'Downloading' $url$f '...'
- curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
-fi
-if [ "$val" == "true" ]; then
- f='val2017.zip' # 1G, 5k images
- echo 'Downloading' $url$f '...'
- curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
-fi
-if [ "$test" == "true" ]; then
- f='test2017.zip' # 7G, 41k images (optional)
- echo 'Downloading' $url$f '...'
- curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
-fi
-wait # finish background tasks
diff --git a/spaces/sub314xxl/MetaGPT/metagpt/actions/design_api_review.py b/spaces/sub314xxl/MetaGPT/metagpt/actions/design_api_review.py
deleted file mode 100644
index 687a33652c119006558ddfef5b6150f5599f2947..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/metagpt/actions/design_api_review.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/11 19:31
-@Author : alexanderwu
-@File : design_api_review.py
-"""
-from metagpt.actions.action import Action
-
-
-class DesignReview(Action):
- def __init__(self, name, context=None, llm=None):
- super().__init__(name, context, llm)
-
- async def run(self, prd, api_design):
- prompt = f"Here is the Product Requirement Document (PRD):\n\n{prd}\n\nHere is the list of APIs designed " \
- f"based on this PRD:\n\n{api_design}\n\nPlease review whether this API design meets the requirements" \
- f" of the PRD, and whether it complies with good design practices."
-
- api_review = await self._aask(prompt)
- return api_review
diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_debug_error.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_debug_error.py
deleted file mode 100644
index 555c84e4e21286f369a218161ec8f6a3581d5751..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_debug_error.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/11 17:46
-@Author : alexanderwu
-@File : test_debug_error.py
-"""
-import pytest
-
-from metagpt.actions.debug_error import DebugError
-
-EXAMPLE_MSG_CONTENT = '''
----
-## Development Code File Name
-player.py
-## Development Code
-```python
-from typing import List
-from deck import Deck
-from card import Card
-
-class Player:
- """
- A class representing a player in the Black Jack game.
- """
-
- def __init__(self, name: str):
- """
- Initialize a Player object.
-
- Args:
- name (str): The name of the player.
- """
- self.name = name
- self.hand: List[Card] = []
- self.score = 0
-
- def draw(self, deck: Deck):
- """
- Draw a card from the deck and add it to the player's hand.
-
- Args:
- deck (Deck): The deck of cards.
- """
- card = deck.draw_card()
- self.hand.append(card)
- self.calculate_score()
-
- def calculate_score(self) -> int:
- """
- Calculate the score of the player's hand.
-
- Returns:
- int: The score of the player's hand.
- """
- self.score = sum(card.value for card in self.hand)
- # Handle the case where Ace is counted as 11 and causes the score to exceed 21
- if self.score > 21 and any(card.rank == 'A' for card in self.hand):
- self.score -= 10
- return self.score
-
-```
-## Test File Name
-test_player.py
-## Test Code
-```python
-import unittest
-from blackjack_game.player import Player
-from blackjack_game.deck import Deck
-from blackjack_game.card import Card
-
-class TestPlayer(unittest.TestCase):
- ## Test the Player's initialization
- def test_player_initialization(self):
- player = Player("Test Player")
- self.assertEqual(player.name, "Test Player")
- self.assertEqual(player.hand, [])
- self.assertEqual(player.score, 0)
-
- ## Test the Player's draw method
- def test_player_draw(self):
- deck = Deck()
- player = Player("Test Player")
- player.draw(deck)
- self.assertEqual(len(player.hand), 1)
- self.assertEqual(player.score, player.hand[0].value)
-
- ## Test the Player's calculate_score method
- def test_player_calculate_score(self):
- deck = Deck()
- player = Player("Test Player")
- player.draw(deck)
- player.draw(deck)
- self.assertEqual(player.score, sum(card.value for card in player.hand))
-
- ## Test the Player's calculate_score method with Ace card
- def test_player_calculate_score_with_ace(self):
- deck = Deck()
- player = Player("Test Player")
- player.hand.append(Card('A', 'Hearts', 11))
- player.hand.append(Card('K', 'Hearts', 10))
- player.calculate_score()
- self.assertEqual(player.score, 21)
-
- ## Test the Player's calculate_score method with multiple Aces
- def test_player_calculate_score_with_multiple_aces(self):
- deck = Deck()
- player = Player("Test Player")
- player.hand.append(Card('A', 'Hearts', 11))
- player.hand.append(Card('A', 'Diamonds', 11))
- player.calculate_score()
- self.assertEqual(player.score, 12)
-
-if __name__ == '__main__':
- unittest.main()
-
-```
-## Running Command
-python tests/test_player.py
-## Running Output
-standard output: ;
-standard errors: ..F..
-======================================================================
-FAIL: test_player_calculate_score_with_multiple_aces (__main__.TestPlayer)
-----------------------------------------------------------------------
-Traceback (most recent call last):
- File "tests/test_player.py", line 46, in test_player_calculate_score_with_multiple_aces
- self.assertEqual(player.score, 12)
-AssertionError: 22 != 12
-
-----------------------------------------------------------------------
-Ran 5 tests in 0.007s
-
-FAILED (failures=1)
-;
-## instruction:
-The error is in the development code, specifically in the calculate_score method of the Player class. The method is not correctly handling the case where there are multiple Aces in the player's hand. The current implementation only subtracts 10 from the score once if the score is over 21 and there's an Ace in the hand. However, in the case of multiple Aces, it should subtract 10 for each Ace until the score is 21 or less.
-## File To Rewrite:
-player.py
-## Status:
-FAIL
-## Send To:
-Engineer
----
-'''
-
-@pytest.mark.asyncio
-async def test_debug_error():
-
- debug_error = DebugError("debug_error")
-
- file_name, rewritten_code = await debug_error.run(context=EXAMPLE_MSG_CONTENT)
-
- assert "class Player" in rewritten_code # rewrite the same class
- assert "while self.score > 21" in rewritten_code # a key logic to rewrite to (original one is "if self.score > 12")
diff --git a/spaces/sudokush/goofyai-3d_render_style_xl__generator/app.py b/spaces/sudokush/goofyai-3d_render_style_xl__generator/app.py
deleted file mode 100644
index 4f2d3011c603b276c7800e5d1e9de8bf628eeda2..0000000000000000000000000000000000000000
--- a/spaces/sudokush/goofyai-3d_render_style_xl__generator/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/goofyai/3d_render_style_xl").launch()
\ No newline at end of file
diff --git a/spaces/sunil448832/retrieval-augment-generation/app.py b/spaces/sunil448832/retrieval-augment-generation/app.py
deleted file mode 100644
index 592ca177378ce185539b67ee9f70e6c74c2f3df6..0000000000000000000000000000000000000000
--- a/spaces/sunil448832/retrieval-augment-generation/app.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import gradio as gr
-from models import EmbeddingModel, LLM
-from utils import MistralPrompts
-from vector_store import FaissVectorStore
-from chat import ChatBot
-
-VECTOR_DATABASE_PATH = 'vector_db'
-# Initialize models and vector store
-embedding_model = EmbeddingModel(model_name='sentence-transformers/all-MiniLM-L6-v2')
-llm = LLM("mistralai/Mistral-7B-Instruct-v0.1")
-vector_store = FaissVectorStore.as_retriever(database_path=VECTOR_DATABASE_PATH)
-
-# Create a ChatBot instance
-chat_bot = ChatBot(llm, embedding_model, vector_store)
-
-# Function to handle the user's input and generate a response
-def chat_bot(input_text):
- response = chat_bot.chat(input_text)
-
- return response
-
-# Create a Gradio interface
-chatbot_interface = gr.Interface(
- fn=chat_bot,
- inputs=gr.inputs.Textbox(prompt="User:"),
- outputs=gr.inputs.Textbox(prompt="Bot:"),
- title="Chatbot Assitant for PAN card related query",
- theme="compact"
-)
-
-# Launch the Gradio interface
-chatbot_interface.launch()
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Xforce BEST Keygen AutoCAD Electrical 2019 64 Bit Patch.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Xforce BEST Keygen AutoCAD Electrical 2019 64 Bit Patch.md
deleted file mode 100644
index e06c8f4b366195e4a106bec5a62734b9e6b96cc0..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Download Xforce BEST Keygen AutoCAD Electrical 2019 64 Bit Patch.md
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
How to Download X-force Keygen AutoCAD Electrical 2019 64 Bit Patch
-
-
If you are looking for a way to activate AutoCAD Electrical 2019, one of the best products of Autodesk for electrical design and engineering, you might want to try X-force Keygen. This is a tool that can generate keys for any Autodesk product, including AutoCAD Electrical 2019. In this article, we will show you how to download X-force Keygen AutoCAD Electrical 2019 64 bit patch and use it to activate your software.
-
-
What is X-force Keygen?
-
-
X-force Keygen is a software that can generate product keys for any Autodesk product. It is also known as a crack or a patch, as it bypasses the activation process of the software. X-force Keygen can be used for both 32-bit and 64-bit versions of Autodesk products, and it works for all versions from 2010 to 2019.
-
download xforce keygen AutoCAD Electrical 2019 64 bit patch
X-force Keygen is not an official product of Autodesk, and it is not supported or endorsed by them. It is a third-party tool that can violate the terms and conditions of the software license agreement. Therefore, using X-force Keygen is at your own risk and responsibility.
-
-
How to Download X-force Keygen AutoCAD Electrical 2019 64 Bit Patch
-
-
To download X-force Keygen AutoCAD Electrical 2019 64 bit patch, you need to follow these steps:
-
-
-
Go to one of the websites that offer X-force Keygen download links, such as IGG Tech, Davi24, Google Sites, Civil MDC, or Civil Arq. You can find these websites by searching for "download xforce keygen AutoCAD Electrical 2019 64 bit patch" on Google or any other search engine.
-
Choose the link that matches your system requirements and click on it. You will be redirected to another page where you can download the file.
-
Click on the download button and wait for the file to be downloaded. The file size is about 1.6 MB and it is in ZIP format.
-
Extract the ZIP file using a program like WinRAR or 7-Zip. You will get an executable file named "xf-adsk2019_x64.exe" or "xf-adsk2019_x86.exe" depending on your system.
-
-
-
You have successfully downloaded X-force Keygen AutoCAD Electrical 2019 64 bit patch. Now you need to use it to activate your software.
-
-
How to Use X-force Keygen AutoCAD Electrical 2019 64 Bit Patch
-
-
To use X-force Keygen AutoCAD Electrical 2019 64 bit patch, you need to follow these steps:
-
-
-
Install AutoCAD Electrical 2019 on your computer if you haven't done so already. You can download the trial version from the official website of Autodesk or use a DVD installer.
-
Run AutoCAD Electrical 2019 and click on "Enter a Serial Number". Enter one of these serial numbers:
666-69696969
667-98989898
400-45454545
066-66666666
-
Enter this product key: 225K1
-
Click on "Next" and then on "Request an activation code using an offline method". Click on "Close" and exit the program.
-
Run X-force Keygen AutoCAD Electrical 2019 64 bit patch as administrator. Right-click on the executable file and select "Run as administrator".
-
Click on "Patch" and wait for the message "Successfully patched" to appear.
-
Copy the request code from AutoCAD Electrical 2019 and paste it into the keygen. Click on "Generate" and copy the activation code from the keygen.
-
Run AutoCAD Electrical 2019 again and click on "I have an activation code from Autodesk". Paste the activation code into the fields and click on "Next".
-
-
-
You have successfully activated AutoCAD Electrical 2019 with X-force Keygen. You can now enjoy all the features of the software without any limitations.
-
-
The Benefits of Using X-force Keygen AutoCAD Electrical 2019 64 Bit Patch
-
-
X-force Keygen AutoCAD Electrical 2019 64 bit patch offers many benefits for users who want to activate their software. Some of them are:
-
-
-
It can generate keys for any Autodesk product, including AutoCAD Electrical 2019.
-
It can be used for both 32-bit and 64-bit versions of Autodesk products.
-
It can work for all versions from 2010 to 2019.
-
It is easy to download and use.
-
It can activate the software without any internet connection or registration.
-
It can bypass the activation process of the software and make it fully functional.
-
-
-
If you are looking for a way to activate AutoCAD Electrical 2019, one of the best products of Autodesk for electrical design and engineering, you might want to try X-force Keygen. It is a tool that can generate keys for any Autodesk product, including AutoCAD Electrical 2019. In this article, we have shown you how to download X-force Keygen AutoCAD Electrical 2019 64 bit patch and use it to activate your software.
-
What is AutoCAD Electrical 2019?
-
-
AutoCAD Electrical 2019 is a software that helps you design and document electrical systems. It is part of the Autodesk family of products, which includes AutoCAD, Revit, Maya, 3ds Max, and more. AutoCAD Electrical 2019 is specially designed for electrical engineers and designers who need to create accurate and efficient electrical drawings and schematics.
-
-
-
AutoCAD Electrical 2019 has many features and benefits that make it a powerful tool for electrical design and engineering. Some of them are:
-
-
-
It supports both English and Chinese languages, as well as multiple units and standards.
-
It has a comprehensive library of symbols, components, wires, cables, and connectors that you can use in your projects.
-
It has a smart panel layout feature that allows you to create and edit panel layouts with ease.
-
It has a circuit builder feature that helps you design and analyze circuits with interactive simulation and testing.
-
It has a project manager feature that helps you organize and manage your projects with folders, subfolders, drawings, reports, and cross-references.
-
It has a collaboration feature that allows you to share your projects with other users and work together in real time.
-
-
-
If you want to learn more about AutoCAD Electrical 2019, you can visit the official website of Autodesk to find more information about the product, such as FAQs, tutorials, manuals, and forums.
-
-
Why Do You Need X-force Keygen AutoCAD Electrical 2019 64 Bit Patch?
-
-
X-force Keygen AutoCAD Electrical 2019 64 bit patch is a tool that can help you activate your software without paying for a license. This can save you money and time, as well as give you access to all the features and functions of the software. However, there are also some risks and disadvantages of using X-force Keygen AutoCAD Electrical 2019 64 bit patch. Some of them are:
-
-
-
It is not an official product of Autodesk, and it is not supported or endorsed by them. It is a third-party tool that can violate the terms and conditions of the software license agreement.
-
It can expose your computer to viruses or malware that can harm your system or steal your data.
-
It can cause errors or glitches in the software that can affect your work or performance.
-
It can be detected by Autodesk or other authorities and result in legal actions or penalties.
-
-
-
Therefore, using X-force Keygen AutoCAD Electrical 2019 64 bit patch is at your own risk and responsibility. You should always use the software legally and ethically, and respect the rights and interests of the developers and owners of the software.
-
How to Install AutoCAD Electrical 2019
-
-
Before you can use X-force Keygen AutoCAD Electrical 2019 64 bit patch, you need to install AutoCAD Electrical 2019 on your computer. You can do this by following these steps:
-
-
-
Download the trial version of AutoCAD Electrical 2019 from the official website of Autodesk or use a DVD installer. You can find the download link by searching for "AutoCAD Electrical 2019" on Google or any other search engine.
-
Run the installer and follow the instructions on the screen. You will need to accept the license agreement and choose the installation type and location.
-
Wait for the installation to complete. It may take some time depending on your system and internet speed.
-
Restart your computer if prompted.
-
-
-
You have successfully installed AutoCAD Electrical 2019 on your computer. Now you need to download X-force Keygen AutoCAD Electrical 2019 64 bit patch and use it to activate your software.
-
-
The Features and Functions of AutoCAD Electrical 2019
-
-
AutoCAD Electrical 2019 is a software that helps you design and document electrical systems. It has many features and functions that make it a powerful tool for electrical design and engineering. Some of them are:
-
-
-
It has a comprehensive library of symbols, components, wires, cables, and connectors that you can use in your projects. You can also create and edit your own symbols and components using the symbol builder and catalog browser.
-
It has a smart panel layout feature that allows you to create and edit panel layouts with ease. You can drag and drop components from the catalog browser or the schematic list onto the panel layout. You can also use the panel layout tools to align, distribute, rotate, flip, scale, or copy components.
-
It has a circuit builder feature that helps you design and analyze circuits with interactive simulation and testing. You can use the circuit builder to create circuits from predefined templates or from scratch. You can also use the circuit analysis tools to check for errors, calculate voltages, currents, resistances, power factors, and more.
-
It has a project manager feature that helps you organize and manage your projects with folders, subfolders, drawings, reports, and cross-references. You can use the project manager to create new projects, add or remove drawings, generate reports, update cross-references, and more.
-
It has a collaboration feature that allows you to share your projects with other users and work together in real time. You can use the collaboration feature to invite other users to join your project, chat with them, share files, view changes, resolve conflicts, and more.
-
-
-
If you want to learn more about the features and functions of AutoCAD Electrical 2019, you can visit the official website of Autodesk to find more information about the product, such as FAQs, tutorials, manuals, and forums.
-
Conclusion
-
-
X-force Keygen AutoCAD Electrical 2019 64 bit patch is a tool that can help you activate AutoCAD Electrical 2019, one of the best products of Autodesk for electrical design and engineering. It can generate keys for any Autodesk product, including AutoCAD Electrical 2019. In this article, we have shown you how to download X-force Keygen AutoCAD Electrical 2019 64 bit patch and use it to activate your software.
-
-
AutoCAD Electrical 2019 is a software that helps you design and document electrical systems. It has many features and functions that make it a powerful tool for electrical design and engineering. It supports both English and Chinese languages, as well as multiple units and standards. It has a comprehensive library of symbols, components, wires, cables, and connectors that you can use in your projects. It has a smart panel layout feature that allows you to create and edit panel layouts with ease. It has a circuit builder feature that helps you design and analyze circuits with interactive simulation and testing. It has a project manager feature that helps you organize and manage your projects with folders, subfolders, drawings, reports, and cross-references. It has a collaboration feature that allows you to share your projects with other users and work together in real time.
-
-
If you want to learn more about X-force Keygen AutoCAD Electrical 2019 64 bit patch or AutoCAD Electrical 2019, you can visit the official website of Autodesk to find more information about the products, such as FAQs, tutorials, manuals, and forums. You can also contact the support team of Autodesk if you have any questions or issues regarding the products.
-
-
X-force Keygen AutoCAD Electrical 2019 64 bit patch is a tool that can help you activate AutoCAD Electrical 2019 and enjoy all the features and functions of the software without any limitations. Try it out today and see for yourself!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (Phir Hera Pheri Download Kickass Mov).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (Phir Hera Pheri Download Kickass Mov).md
deleted file mode 100644
index 19356de433c2fba216e139716a8fffac1cb24d97..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (Phir Hera Pheri Download Kickass Mov).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
HD Online Player (Phir Hera Pheri Download Kickass Mov)
-
-Downloadhub.. Rang De Basanti 2006 - Full Movie | FREE DOWNLOAD | TORRENT | HD 1080p | x264 | WEB-DL | DD5.1 | ... Phir Hera Pheri full movie download 1080p kickass torrent · HD Online Player (The The Secret Life Of Pets English ). 4d29de3e1b
-
-
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (rab Ne Bana Di Jodi Movie Tamil Subt).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (rab Ne Bana Di Jodi Movie Tamil Subt).md
deleted file mode 100644
index b4c4c88cc94cd70867b2cd002f4d619b239fbb43..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/HD Online Player (rab Ne Bana Di Jodi Movie Tamil Subt).md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
the role of kishore bhanjpuri :- vidyut jamani, mahesh anand, devanshi dwivedi. pankaj jha. what song is this! https://www.moviezonestar.com/videos/i-love-my-little-princess-song-from-dubbed-film-rab-ne-bana-di-jodi-song-tamil/. shah rukh khan on rab ne bana di jodi song : http://www.com/videos/srk-khan-on-taj-pe-chance-song-from-film-rab-ne-bana-di-jodi-jodiy/ anushka sharma's first public appearance: https://www.com/videos/anushka-sharmas-first-public-appearance-from-dubbed-film-rab-ne-bana-di-jodi-song/ rab ne bana di jodi subtitle: https://www.com/videos/tujh-mein-rab-dikhta-hai-song-from-dubbed-film-rab-ne-bana-di-jodi-song-video/ tujh mein rab dikhta hai song rab ne bana di jodi shah rukh khan, anushka sharma roop kumar. watch & download rab ne bana di jodi (2008) movie hd online. movie. rab ne bana di jodi movie tamil dubbed link. the original hindi version of rab ne bana di jodi (with pankaj jha, anjana sukhani, soha ali, neha dhupia). the song is used in a remix version in the telugu film r..
-
HD Online Player (rab ne bana di jodi movie tamil subt)
watch the best of rab ne bana di jodi film songs free online from the original song. bollywood song.. https://www.saujana.com/songs-watch/rab-ne-bana-di-jodi-song-dubbed-tamil-kannada-2/ disclaimer: the contents of this video is fictional. the video is just a hindi song from a dubbed movie on the youtube moviezoom video streaming website. this song is not. song download : rab ne bana di jodi (2010) song from dhoom 2 hindi dubbed film hindi dubbed vodafone song. song link : https://youtu.be/ssyrjr-c_zs
-
samaad mithai - rab ne bana di jodi 2012 full movie (thai dub). http://www.shahrukhnadeep.in/rab-ne-bana-di-jodi-thai-dubbed-movie/ more rab ne bana di jodi (2008) songs: https://www.youtube.com/playlist?list=plc6f8de45b2bad15f pankaj jha - aryan - rab ne bana di jodi (hindi dubbed version).. shahrukh khan (srk) is one of the most revered.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Jiban Mukhopadhyay History Book 70.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Jiban Mukhopadhyay History Book 70.md
deleted file mode 100644
index dbb83fa6145d74062544d71f3daea30b9b4e437a..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Jiban Mukhopadhyay History Book 70.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-This revised edition of the Official BSF's history book, which was ... 70. — Terrorism in Punjab and the Role of BSF (1989-1993) 88. — Kashmir Militancy: A ... The Jeevan Reddy Committee of the Government of ... Mukherjee on 26 April 2014. 1fdad05405
-
-
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/JustCause2UpdatedALLDLCsrepackMrDJfree [WORK].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/JustCause2UpdatedALLDLCsrepackMrDJfree [WORK].md
deleted file mode 100644
index 9377783030aca4ae0cfc17f924bd56454bddaf0e..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/JustCause2UpdatedALLDLCsrepackMrDJfree [WORK].md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Free PDF Neplan User Manual Pdf book free to read online on the web. ... about Das lied von eis und feuer band 11 ebook gratis download ready for download. ... To create more accurate search results for Neplan 5.5 try to exclude using ... download, serial, keygen, torrent, warez, etc. karim hamdadi Recommended for you. 1fdad05405
-
-
-
diff --git a/spaces/thelou1s/yamnet/app.py b/spaces/thelou1s/yamnet/app.py
deleted file mode 100644
index bb0a49e63bb4c1e59f5df784843076201aca82d3..0000000000000000000000000000000000000000
--- a/spaces/thelou1s/yamnet/app.py
+++ /dev/null
@@ -1,149 +0,0 @@
-
-# https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1
-
-import tensorflow as tf
-import tensorflow_hub as hub
-import numpy as np
-import csv
-
-# import matplotlib.pyplot as plt
-# from IPython.display import Audio
-from scipy.io import wavfile
-import scipy
-
-# import soundfile as sf
-# import audio2numpy as a2n
-import os
-
-import gradio as gr
-
-# import audio2numpy
-# import numpy as np
-
-from pydub import AudioSegment
-from matplotlib import pyplot as plt
-
-
-# https://stackoverflow.com/questions/53633177/how-to-read-a-mp3-audio-file-into-a-numpy-array-save-a-numpy-array-to-mp3
-# def read(f, normalized=False):
-# """MP3 to numpy array"""
-# a = pydub.AudioSegment.from_mp3(f)
-# y = np.array(a.get_array_of_samples())
-# if a.channels == 2:
-# y = y.reshape((-1, 2))
-# if normalized:
-# return a.frame_rate, np.float32(y) / 2**15
-# else:
-# return a.frame_rate, y
-#
-#
-# def write(f, sr, x, normalized=False):
-# """numpy array to MP3"""
-# channels = 2 if (x.ndim == 2 and x.shape[1] == 2) else 1
-# if normalized: # normalized array - each item should be a float in [-1, 1)
-# y = np.int16(x * 2 ** 15)
-# else:
-# y = np.int16(x)
-# song = pydub.AudioSegment(y.tobytes(), frame_rate=sr, sample_width=2, channels=channels)
-# song.export(f, format="mp3", bitrate="320k")
-
-
-# Load the model.
-model = hub.load('https://tfhub.dev/google/yamnet/1')
-
-debug = True
-
-
-# Find the name of the class with the top score when mean-aggregated across frames.
-def class_names_from_csv(class_map_csv_text):
- """Returns list of class names corresponding to score vector."""
- class_names = []
- with tf.io.gfile.GFile(class_map_csv_text) as csvfile:
- reader = csv.DictReader(csvfile)
- for row in reader:
- class_names.append(row['display_name'])
-
- return class_names
-
-
-class_map_path = model.class_map_path().numpy()
-class_names = class_names_from_csv(class_map_path)
-
-
-def ensure_sample_rate(original_sample_rate, waveform,
- desired_sample_rate=16000):
- """Resample waveform if required."""
- if original_sample_rate != desired_sample_rate:
- desired_length = int(round(float(len(waveform)) /
- original_sample_rate * desired_sample_rate))
- waveform = scipy.signal.resample(waveform, desired_length)
- return desired_sample_rate, waveform
-
-
-os.system("wget https://storage.googleapis.com/audioset/miaow_16k.wav")
-
-
-def inference(audio):
- # wav_file_name = 'speech_whistling2.wav'
- wav_file_name = audio
- if debug: print(f'read, wav_file_name: {wav_file_name}')
-
- if wav_file_name.endswith('.mp3'):
- # files
- new_wav = convMp3ToWav(wav_file_name)
- os.remove(wav_file_name)
- wav_file_name = new_wav
- if debug: print(f'covMp3ToWav, wav_file_name: {wav_file_name}')
-
- sample_rate, wav_data = wavfile.read(wav_file_name, 'rb')
-
- if debug: print(f'read, wav_data: {wav_data}')
- if debug: print(f'read, sample_rate: {sample_rate}, wav_data: {wav_data.shape}')
- sample_rate, wav_data = ensure_sample_rate(sample_rate, wav_data)
- if debug: print(f'ensure_sample_rate, sample_rate: {sample_rate}, wav_data: {wav_data.shape}')
- if debug: print(f'ensure_single_channel, wav_data.ndim: {wav_data.ndim}')
- if wav_data.ndim >= 2: wav_data = wav_data[:, 0]
- if debug: print(f'ensure_single_channel, wav_data: {wav_data.shape}')
- if debug: print(f'ensured, wav_data: {wav_data}')
-
- waveform = wav_data / tf.int16.max
-
- # Run the model, check the output.
- scores, embeddings, spectrogram = model(waveform)
-
- scores_np = scores.numpy()
- spectrogram_np = spectrogram.numpy()
-
- scores_np_sorted = np.sort(scores_np.mean(axis=0))
- scores_np_arg_sorted = np.argsort(scores_np.mean(axis=0))
-
- class_index_array = [scores_np_arg_sorted[-1], scores_np_arg_sorted[-2], scores_np_arg_sorted[-3], scores_np_arg_sorted[-4], scores_np_arg_sorted[-5]]
- infered_class = class_names[class_index_array[0]]
- second_class = class_names[class_index_array[1]]
-
- float_formatter = "{:.4f}".format
- np.set_printoptions(formatter={'float_kind':float_formatter})
- class_names_str = str(f'[{class_names[class_index_array[0]]}], [{class_names[class_index_array[1]]}], [{class_names[class_index_array[2]]}], [{class_names[class_index_array[3]]}], [{class_names[class_index_array[4]]}]')
- # class_names_shape_str = str(len(class_names))
- # scores_str = str(np.max(scores_np, axis=1)[:3])
- scores_str = str('[{:.4f}'.format(scores_np_sorted[-1]) + '], [{:.4f}'.format(scores_np_sorted[-2]) + '], [{:.4f}'.format(scores_np_sorted[-3]) + '], [{:.4f}'.format(scores_np_sorted[-4]) + '], [{:.4f}'.format(scores_np_sorted[-5])) + ']'
- # scores_shape_str = str(scores_np.shape)
-
- return f'The main sound is: [{infered_class}], \n\nthe second sound is: [{second_class}]. \n\n classes: {class_names_str}, \n\n scores: {scores_str}'
-
-
-def convMp3ToWav(wav_file_name):
- src = wav_file_name
- dst = wav_file_name + ".wav"
- # convert wav to mp3
- sound = AudioSegment.from_file(src)
- sound.export(dst, format="wav")
- return dst
-
-
-examples = [['miaow_16k.wav']]
-title = "yamnet"
-description = "An audio event classifier trained on the AudioSet dataset to predict audio events from the AudioSet ontology."
-gr.Interface(inference, gr.inputs.Audio(type="filepath"), "text", examples=examples, title=title,
- description=description).launch(enable_queue=True)
-
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Atheros AR5B225 Driver Windows 7 Review and Comparison with Other Wireless Drivers.md b/spaces/tialenAdioni/chat-gpt-api/logs/Atheros AR5B225 Driver Windows 7 Review and Comparison with Other Wireless Drivers.md
deleted file mode 100644
index 32b281fea6419d365cd4861da52496987838f282..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Atheros AR5B225 Driver Windows 7 Review and Comparison with Other Wireless Drivers.md
+++ /dev/null
@@ -1,141 +0,0 @@
-
-
Atheros AR5B225 Driver Windows 7: How to Download and Install
-
If you have a laptop or notebook that uses the Atheros AR5B225 wireless card, you may need to download and install the Atheros AR5B225 driver Windows 7 to ensure the proper functioning of your device. The Atheros AR5B225 is a dual-band WiFi and Bluetooth 4.0 card that is based on the Atheros AR9462 chipset. It supports 802.11 b/g/n wireless standards and can deliver up to 300 Mbps data transfer rate. It also has a low power consumption and a high compatibility with various devices and systems.
In this article, we will show you how to download and install the Atheros AR5B225 driver Windows 7 for your laptop or notebook. We will also provide some useful tips and troubleshooting methods to help you solve any issues that may arise during or after the installation process.
-
How to Download Atheros AR5B225 Driver Windows 7
-
To download the Atheros AR5B225 driver Windows 7, you have several options to choose from. You can download it from the official website of your laptop or notebook manufacturer, where you can also find more information about your device model and specifications. You can also download it from the official website of Qualcomm Atheros, where you can find more information about the product and its features. You can also download it from other trusted sources, such as Device Drivers or FileCR, where you can find the latest version of the driver and its installation instructions.
-
However, you should be careful when downloading from third-party sources, as they may contain malware or viruses that can harm your computer. You should always scan the downloaded files with a reliable antivirus program before installing them.
-
How to Install Atheros AR5B225 Driver Windows 7
-
To install the Atheros AR5B225 driver Windows 7, you need to follow these simple steps:
-
-
Download the Atheros AR5B225 driver Windows 7 from the official website or any other trusted source.
-
Extract the downloaded file using WinRAR or any other file archiver.
-
Run the setup file and follow the on-screen instructions to install the driver on your computer.
-
Restart your computer to complete the installation process.
-
Check if your wireless card is working properly by connecting to a WiFi network or pairing with a Bluetooth device.
-
-
Tips and Troubleshooting Methods for Atheros AR5B225 Driver Windows 7
-
If you encounter any problems or issues with your Atheros AR5B225 driver Windows 7, you can try some of these tips and troubleshooting methods to fix them:
-
qualcomm atheros ar5b225 driver windows 7
-atheros ar5b225 wifi bluetooth driver windows 7
-atheros ar5b225 wireless lan driver windows 7
-atheros ar5b225 wlan bt driver windows 7
-atheros ar5b225 wifi adapter driver windows 7
-atheros ar9462 ar5b225 driver windows 7
-hp atheros ar5b225 driver windows 7
-asus atheros ar5b225 driver windows 7
-acer atheros ar5b225 driver windows 7
-samsung atheros ar5b225 driver windows 7
-sony atheros ar5b225 driver windows 7
-lenovo atheros ar5b225 driver windows 7
-toshiba atheros ar5b225 driver windows 7
-dell atheros ar5b225 driver windows 7
-packard bell atheros ar5b225 driver windows 7
-fujitsu atheros ar5b225 driver windows 7
-msi atheros ar5b225 driver windows 7
-lg atheros ar5b225 driver windows 7
-medion atheros ar5b225 driver windows 7
-gateway atheros ar5b225 driver windows 7
-emachines atheros ar5b225 driver windows 7
-clevo atheros ar5b225 driver windows 7
-nec atheros ar5b225 driver windows 7
-haier atheros ar5b225 driver windows 7
-hasee atheros ar5b225 driver windows 7
-download atheros ar5b225 driver windows 7
-install atheros ar5b225 driver windows 7
-update atheros ar5b225 driver windows 7
-fix atheros ar5b225 driver windows 7
-uninstall atheros ar5b225 driver windows 7
-reinstall atheros ar5b225 driver windows 7
-enable atheros ar5b225 driver windows 7
-disable atheros ar5b225 driver windows 7
-configure atheros ar5b225 driver windows 7
-troubleshoot atheros ar5b225 driver windows 7
-repair atheros ar5b225 driver windows 7
-optimize atheros ar5b225 driver windows 7
-test atheros ar5b225 driver windows 7
-scan atheros ar5b225 driver windows 7
-backup atheros ar5b225 driver windows 7
-restore atheros ar5b225 driver windows 7
-upgrade atheros ar5b225 driver windows 7
-downgrade atheros ar5b225 driver windows 7
-rollback atheros ar5b225 driver windows 7
-review atheros ar5b225 driver windows 7
-compare atheros ar5b225 driver windows 7
-benchmark atheros ar5b225 driver windows 7
-verify atheros ar5b225 driver windows 7
-diagnose atheros ar5b225 driver windows 7
-resolve atheros ar5b225 driver windows 7
-
-
Make sure you have downloaded and installed the correct version of the driver that matches your device model and operating system.
-
Make sure you have disabled any antivirus or firewall programs that may interfere with the installation process.
-
Make sure you have uninstalled any previous versions of the driver that may cause conflicts or errors.
-
Make sure you have updated your BIOS and chipset drivers to the latest version.
-
If your wireless card is not detected or recognized by your computer, you can try to reseat it or replace it with another one.
-
If your wireless card is not working properly or has poor performance, you can try to update or roll back the driver to a previous version.
-
If your wireless card is causing blue screen errors or system crashes, you can try to disable or uninstall the driver and use another one.
-
-
Conclusion
-
Atheros AR5B225 driver Windows 7 is a necessary software that enables your laptop or notebook to communicate with your wireless card and access WiFi and Bluetooth networks. It is easy to download and install from various sources, but you should always be careful and scan the files before installing them. You should also follow some useful tips and troubleshooting methods to solve any issues that may arise during or after the installation process. If you are looking for a reliable and effective solution for your wireless card problems, you should definitely try Atheros AR5B225 driver Windows 7!
-
How to Update Atheros AR5B225 Driver Windows 7
-
If you already have the Atheros AR5B225 driver Windows 7 installed on your computer, you may need to update it from time to time to ensure the optimal performance and security of your wireless card. Updating your driver can also fix some of the issues or errors that you may encounter with your wireless card. There are two ways to update your Atheros AR5B225 driver Windows 7: manually or automatically.
-
To update your driver manually, you need to follow these steps:
-
-
Go to the Device Manager on your computer and find your wireless card under the Network adapters category.
-
Right-click on your wireless card and select Update driver software.
-
Choose Browse my computer for driver software and locate the folder where you have downloaded the latest version of the driver.
-
Follow the on-screen instructions to install the updated driver on your computer.
-
Restart your computer to apply the changes.
-
-
To update your driver automatically, you need to use a reliable and trusted driver update tool, such as Driver Booster or Driver Easy. These tools can scan your computer and detect all the outdated, missing, or corrupted drivers on your system. They can also download and install the latest and compatible drivers for your devices with just one click. To use these tools, you need to follow these steps:
-
-
Download and install a driver update tool on your computer.
-
Launch the tool and click on the Scan button to scan your computer for driver issues.
-
Review the scan results and click on the Update button next to your wireless card to update its driver.
-
Wait for the tool to download and install the updated driver on your computer.
-
Restart your computer to apply the changes.
-
-
How to Uninstall Atheros AR5B225 Driver Windows 7
-
If you want to uninstall the Atheros AR5B225 driver Windows 7 from your computer, you can do so by following these steps:
-
-
Go to the Control Panel on your computer and select Programs and Features.
-
Find and select Qualcomm Atheros WiFi Driver Installation from the list of installed programs.
-
Click on the Uninstall button and follow the on-screen instructions to uninstall the driver from your computer.
-
Restart your computer to complete the uninstallation process.
-
-
Frequently Asked Questions about Atheros AR5B225 Driver Windows 7
-
Here are some of the most frequently asked questions about Atheros AR5B225 driver Windows 7 and their answers:
-
-
Q: Is Atheros AR5B225 driver Windows 7 compatible with other operating systems?
-
A: Yes, Atheros AR5B225 driver Windows 7 is compatible with other operating systems, such as Windows 8, Windows 8.1, and Windows 10. However, you may need to download and install different versions of the driver that match your operating system.
-
Q: How can I check if my wireless card is working properly with Atheros AR5B225 driver Windows 7?
-
A: You can check if your wireless card is working properly with Atheros AR5B225 driver Windows 7 by performing some simple tests, such as connecting to a WiFi network or pairing with a Bluetooth device. You can also check the status of your wireless card in the Device Manager or in the Network and Sharing Center. If you see any error messages or yellow exclamation marks next to your wireless card, it means that there is a problem with your wireless card or its driver.
-
Q: What should I do if I encounter any problems or issues with Atheros AR5B225 driver Windows 7?
-
A: If you encounter any problems or issues with Atheros AR5B225 driver Windows 7, you can try some of the tips and troubleshooting methods that we have provided in this article. You can also contact Qualcomm Atheros or your laptop or notebook manufacturer for further assistance or support.
-
-
How to Fix Atheros AR5B225 Driver Windows 7 Issues
-
Sometimes, you may experience some issues with your Atheros AR5B225 driver Windows 7 that can affect the performance and functionality of your wireless card. Some of the common issues that you may encounter are:
-
-
Your wireless card is not detected or recognized by your computer.
-
Your wireless card is not working properly or has poor performance.
-
Your wireless card is causing blue screen errors or system crashes.
-
Your wireless card is not compatible with your device or operating system.
-
-
To fix these issues, you can try some of the following solutions:
-
-
Update your driver to the latest version that matches your device model and operating system.
-
Roll back your driver to a previous version that worked well for your device and operating system.
-
Disable or uninstall any conflicting or incompatible drivers or programs that may interfere with your wireless card.
-
Run a system scan and repair any corrupted or missing system files that may affect your wireless card.
-
Reset your network settings and reconnect to your WiFi network or Bluetooth device.
-
Reinstall your driver using a clean installation method that removes any traces of the previous driver.
-
-
How to Contact Qualcomm Atheros or Your Laptop or Notebook Manufacturer for Support
-
If none of the above solutions work for you, or if you have any other questions or concerns about your Atheros AR5B225 driver Windows 7, you can contact Qualcomm Atheros or your laptop or notebook manufacturer for further assistance or support. You can find their contact information on their official websites or in their user manuals. You can also visit their online forums or communities where you can find other users who may have similar issues or questions as you. You can also check their FAQs or knowledge base articles where you can find answers to some of the most common questions or issues related to your wireless card.
-
Conclusion
-
Atheros AR5B225 driver Windows 7 is a vital software that enables your laptop or notebook to communicate with your wireless card and access WiFi and Bluetooth networks. It is easy to download and install from various sources, but you should always be careful and scan the files before installing them. You should also update your driver regularly to ensure the optimal performance and security of your wireless card. You should also follow some useful tips and troubleshooting methods to solve any issues that may arise during or after the installation process. If you need any further assistance or support, you can contact Qualcomm Atheros or your laptop or notebook manufacturer for help. We hope this article has helped you understand and use Atheros AR5B225 driver Windows 7 better!
-
Conclusion
-
Atheros AR5B225 driver Windows 7 is a vital software that enables your laptop or notebook to communicate with your wireless card and access WiFi and Bluetooth networks. It is easy to download and install from various sources, but you should always be careful and scan the files before installing them. You should also update your driver regularly to ensure the optimal performance and security of your wireless card. You should also follow some useful tips and troubleshooting methods to solve any issues that may arise during or after the installation process. If you need any further assistance or support, you can contact Qualcomm Atheros or your laptop or notebook manufacturer for help. We hope this article has helped you understand and use Atheros AR5B225 driver Windows 7 better!
679dcb208e
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Autodata 3.39 Srpski Download Free The Ultimate Resource for Car Technical Information and Data.md b/spaces/tialenAdioni/chat-gpt-api/logs/Autodata 3.39 Srpski Download Free The Ultimate Resource for Car Technical Information and Data.md
deleted file mode 100644
index 4e83e22662df3ae3a5a8dbcc7a870382b052549a..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Autodata 3.39 Srpski Download Free The Ultimate Resource for Car Technical Information and Data.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
How to Download and Install Autodata 3.39 for Free
-
Autodata is a leading provider of vehicle service, repair, diagnostics and maintenance information for thousands of vehicles worldwide[^2^]. If you are looking for a reliable and comprehensive source of automotive data, you might be interested in downloading and installing Autodata 3.39, the latest version of the software.
However, Autodata 3.39 is not available for free on the official website[^2^], where you need to register and pay a subscription fee to access the online workshop application. So how can you get Autodata 3.39 for free?
-
One way is to look for a torrent or a direct download link on the internet. However, this method is risky and illegal, as you might end up downloading a virus or malware that can harm your computer or compromise your personal data. Moreover, you might face legal consequences for violating the intellectual property rights of Autodata.
-
A safer and more ethical way is to use a Serbian forum[^1^] that offers a free download link for Autodata 3.39 in Serbian language. This forum also provides a detailed installation guide and a crack file to activate the software without paying any fees. Here are the steps to follow:
-
-
Go to the Serbian forum[^1^] and find the thread titled "AutoData 3.39 Na Hrvatskom".
-
Download the ISO file from the link provided in the first post. The file size is about 1.4 GB.
-
Mount the ISO file using a virtual drive software such as Daemon Tools or PowerISO.
-
Copy the file named "ADCDA2" from the ISO image to your C drive.
-
Run the file named "Install.cmd" as administrator and wait for a few minutes until the installation is complete.
-
Copy the file named "crack" from the ISO image to your C drive and overwrite the existing file.
-
Run the file named "RegSettings.reg" as administrator and confirm the registry changes.
-
Enjoy using Autodata 3.39 for free!
-
-
Note: This method is only for educational purposes and we do not endorse or encourage any illegal activities. Use Autodata 3.39 at your own risk and responsibility.
-
autodata 3.39 srpski full version free download
-autodata 3.39 srpski crack download free
-autodata 3.39 srpski iso download free
-autodata 3.39 srpski torrent download free
-autodata 3.39 srpski software download free
-autodata 3.39 srpski manual download free
-autodata 3.39 srpski keygen download free
-autodata 3.39 srpski activation code download free
-autodata 3.39 srpski patch download free
-autodata 3.39 srpski serial number download free
-autodata 3.39 srpski online download free
-autodata 3.39 srpski offline download free
-autodata 3.39 srpski windows 10 download free
-autodata 3.39 srpski windows 7 download free
-autodata 3.39 srpski windows xp download free
-autodata 3.39 srpski mac download free
-autodata 3.39 srpski linux download free
-autodata 3.39 srpski android download free
-autodata 3.39 srpski ios download free
-autodata 3.39 srpski mobile download free
-autodata 3.39 srpski tablet download free
-autodata 3.39 srpski laptop download free
-autodata 3.39 srpski pc download free
-autodata 3.39 srpski usb download free
-autodata 3.39 srpski cd download free
-autodata 3.39 srpski dvd download free
-autodata 3.39 srpski rar download free
-autodata 3.39 srpski zip download free
-autodata 3.39 srpski mega.nz download free
-autodata 3.39 srpski google drive download free
-autodata 3.39 srpski mediafire download free
-autodata 3.39 srpski zippyshare download free
-autodata 3.39 srpski dropbox download free
-autodata 3.39 srpski direct link download free
-autodata 3.39 srpski fast speed download free
-autodata 3.39 srpski no survey download free
-autodata 3.39 srpski no password download free
-autodata 3.39 srpski no virus download free
-autodata 3.39 srpski safe and secure download free
-autodata 3.39 srpski latest version download free
-autodata 3.39 srpski updated version download free
-autodata 3.39 srpski original version download free
-autodata 3.39 srpski official version download free
-autodata 3.39 srpski verified version download free
-autodata 3.39 srpski working version download free
-how to install autodata 3.39 srpski for free
-how to use autodata 3.39 srpski for free
-how to get autodata 3.39 srpski for free
-how to uninstall autodata 3.39 srpski for free
-
-
What is Autodata 3.39 and what are its features?
-
Autodata 3.39 is the latest version of the popular automotive software that provides technical information and solutions for thousands of vehicles from over 80 manufacturers. It covers cars, vans, motorcycles, trucks and buses from 1959 to 2013. It includes data on service schedules, wiring diagrams, diagnostics, repair times, technical specifications, fault codes, component locations and more.
-
Some of the features of Autodata 3.39 are:
-
-
A user-friendly interface that allows you to easily navigate through the various modules and functions.
-
A comprehensive database that is updated regularly with the latest vehicle models and information.
-
A fast and accurate search engine that lets you find the information you need in seconds.
-
A multilingual support that allows you to switch between different languages such as English, German, French, Spanish, Italian, Portuguese, Dutch, Swedish, Norwegian, Finnish, Danish, Polish, Hungarian, Czech, Slovak, Slovenian, Croatian and Serbian.
-
A printing option that lets you print out the information you need in a clear and professional format.
-
-
-
Why should you use Autodata 3.39?
-
Autodata 3.39 is a must-have tool for anyone who works in the automotive industry or has an interest in vehicles. Whether you are a professional mechanic, a workshop owner, a car enthusiast or a student, Autodata 3.39 can help you save time and money by providing you with the most accurate and up-to-date information on any vehicle.
-
Some of the benefits of using Autodata 3.39 are:
-
-
You can access a wealth of information on any vehicle with just a few clicks.
-
You can diagnose and fix any problem with your vehicle using the detailed instructions and illustrations provided by Autodata 3.39.
-
You can perform routine maintenance and service on your vehicle according to the manufacturer's recommendations using the service schedules and repair times provided by Autodata 3.39.
-
You can improve your skills and knowledge on automotive technology by learning from the experts at Autodata 3.39.
-
You can increase your customer satisfaction and loyalty by providing them with high-quality service and solutions using Autodata 3.39.
-
-
-
How to get started with Autodata 3.39?
-
If you want to start using Autodata 3.39 for free, you can follow the steps mentioned above to download and install it from the Serbian forum. However, if you want to get the full benefits of Autodata 3.39 and support its development, you should consider subscribing to the official website where you can access the online workshop application that offers more features and advantages such as:
-
-
A cloud-based platform that lets you access Autodata 3.39 from any device and location.
-
A secure and reliable service that protects your data and privacy.
-
A constant update that ensures you always have the latest information and data on any vehicle.
-
A customer support that helps you with any issue or question you might have regarding Autodata 3.39.
-
-
To subscribe to the official website, you need to create an account and choose a plan that suits your needs and budget. You can also request a free trial to test out the online workshop application before making a purchase.
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Descargar Coleccion Completa De Memin Pinguin Un Clsico de la Cultura Popular Mexicana.md b/spaces/tialenAdioni/chat-gpt-api/logs/Descargar Coleccion Completa De Memin Pinguin Un Clsico de la Cultura Popular Mexicana.md
deleted file mode 100644
index 7120dfbc37766ec3d80df0d8b2666617f418d489..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Descargar Coleccion Completa De Memin Pinguin Un Clsico de la Cultura Popular Mexicana.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
Descargar Coleccion Completa De Memin Pinguinl
-
Si eres fanático de las historietas clásicas mexicanas, seguramente conoces a Memín Pinguín, el personaje creado por Yolanda Vargas Dulché en 1943 y que ha sido protagonista de cientos de aventuras junto a sus amigos Carlangas, Ernestillo y Ricardo. Memín Pinguín es un niño de origen afrodescendiente que vive en la Ciudad de México con su madre Eufrosina, una lavandera que trabaja duro para darle una educación. Memín es travieso, noble, generoso y muy divertido, y siempre se mete en problemas por su curiosidad e ingenio.
-
Memín Pinguín ha sido publicado por varias editoriales a lo largo de los años, y ha tenido diferentes versiones en color, sepia y blanco y negro. Su popularidad ha trascendido las fronteras de México y ha llegado a otros países de América Latina, Estados Unidos y Europa. Incluso ha sido objeto de polémica por su representación racial, que algunos consideran estereotipada y ofensiva, mientras que otros la defienden como una muestra de cariño y respeto.
Si quieres disfrutar de las historietas de Memín Pinguín en tu computadora, tablet o celular, te tenemos una buena noticia: puedes descargar la colección completa de Memín Pinguín en formato digital y gratis. Así es, puedes acceder a todos los números de Memín Pinguín desde el primero hasta el último, y leerlos cuando quieras y donde quieras. Además, también puedes descargar una edición especial del bicentenario titulada "Memín para Presidente" y un CD de música inspirado en el personaje.
-
Cómo descargar la colección completa de Memín Pinguín
-
Para descargar la colección completa de Memín Pinguín solo tienes que seguir estos sencillos pasos:
-
-
Entra al sitio web Memin Pinguin para todos, donde encontrarás los enlaces para descargar la colección completa de Memín Pinguín en dos versiones: la de Editorial Vid a color (443 números) y la de Editora Cinco en sepia (343 números).
-
Elige la versión que prefieras y haz clic en el enlace correspondiente. Te llevará a una página de Mega, donde podrás descargar los archivos en formato CBR o CBZ (para la versión a color) o PDF (para la versión en sepia).
-
Descarga los archivos en tu dispositivo y ábrelos con el programa o aplicación que prefieras para leer cómics digitales. Si no tienes uno, te recomendamos usar CDisplayEx para Windows o CDisplayEx Comic Reader para Android.
-
Disfruta de las historietas de Memín Pinguín y compártelas con tus amigos y familiares.
-
-
También puedes descargar la edición especial del bicentenario "Memín para Presidente" y el CD de música desde el mismo sitio web, siguiendo el mismo procedimiento.
-
Por qué descargar la colección completa de Memín Pinguín
-
Descargar la colección completa de Memín Pinguín tiene muchas ventajas, entre las que se encuentran:
-
-
Tendrás acceso a todas las historietas de Memín Pinguín desde el principio hasta el final, sin perderte ningún detalle ni episodio.
-
Podrás leer las historietas de Memín Pinguín cuando quieras y donde quieras, sin depender de una conexión a internet ni de tener los cómics físicos.
-
Ahorras espacio y dinero al no tener que comprar ni almacenar los cómics impresos, que además pueden deteriorarse con el tiempo.
-
Ayudas a preservar el medio ambiente al evitar el consumo de papel y tinta.
-
Ayudas a difundir la cultura mexicana y el legado de Yolanda Vargas Dulché, una de las escritoras más importantes e influyentes del país.
-
Te diviertes y aprendes con las aventuras de Memín Pinguín y sus amigos, que te harán reír, llorar, reflexionar y emocionarte.
-
-
Descargar la colección completa de Memín Pinguín es una oportunidad única para disfrutar de uno de los personajes más queridos y emblemáticos de las historietas mexicanas. No esperes más y descarga la colección completa de Memín Pinguín hoy mismo. ¡No te arrepentirás!
-
Qué encontrarás en la colección completa de Memín Pinguín
-
La colección completa de Memín Pinguín te ofrece la oportunidad de leer todas las historietas de este personaje desde su origen hasta su final. Podrás conocer la vida y las peripecias de Memín y sus amigos, que se enfrentan a todo tipo de situaciones y desafíos, desde los problemas escolares y familiares hasta las aventuras internacionales y los misterios sobrenaturales. Algunas de las historias más famosas de Memín Pinguín son:
-
-
Memín en el Congo: Memín viaja al continente africano para ayudar a un amigo que está en peligro. Allí se encuentra con una tribu de pigmeos que lo adoptan como uno de los suyos.
-
Memín y el tesoro de Moctezuma: Memín y sus amigos descubren una antigua pirámide azteca que esconde un fabuloso tesoro. Pero también se topan con una banda de ladrones que quiere robarlo.
-
Memín y el fantasma del colegio: Memín y sus amigos se quedan encerrados en el colegio durante una noche de tormenta. Pronto se dan cuenta de que no están solos, sino que hay un fantasma que los acecha.
-
Memín y el hombre lobo: Memín y sus amigos se van de campamento al bosque. Allí se encuentran con un hombre que dice ser un licántropo y que les pide ayuda para romper su maldición.
-
Memín y el circo: Memín y sus amigos se unen a un circo ambulante que los lleva por diferentes ciudades. Allí aprenden a hacer malabares, acrobacias y otras habilidades circenses.
-
-
Estas son solo algunas de las historias que podrás leer en la colección completa de Memín Pinguín. Hay muchas más que te sorprenderán, divertirán y emocionarán.
-
Descargar todos los numeros de Memin Pinguinl
-Como bajar la coleccion completa de Memin Pinguinl gratis
-Donde puedo descargar Memin Pinguinl coleccion completa en PDF
-Memin Pinguinl coleccion completa online sin descargar
-Descargar coleccion de Memin Pinguinl por Mega
-Memin Pinguinl coleccion completa en español descargar
-Descargar coleccion de Memin Pinguinl en formato CBR
-Memin Pinguinl coleccion completa para leer en el celular
-Descargar coleccion de Memin Pinguinl en alta calidad
-Memin Pinguinl coleccion completa torrent descargar
-Descargar coleccion de Memin Pinguinl desde Google Drive
-Memin Pinguinl coleccion completa epub descargar
-Descargar coleccion de Memin Pinguinl en un solo archivo
-Memin Pinguinl coleccion completa Kindle descargar
-Descargar coleccion de Memin Pinguinl sin registrarse
-Memin Pinguinl coleccion completa zip descargar
-Descargar coleccion de Memin Pinguinl por partes
-Memin Pinguinl coleccion completa rar descargar
-Descargar coleccion de Memin Pinguinl desde Mediafire
-Memin Pinguinl coleccion completa mobi descargar
-Descargar coleccion de Memin Pinguinl con ilustraciones
-Memin Pinguinl coleccion completa para imprimir descargar
-Descargar coleccion de Memin Pinguinl con portadas originales
-Memin Pinguinl coleccion completa para PC descargar
-Descargar coleccion de Memin Pinguinl con indice
-Memin Pinguinl coleccion completa para tablet descargar
-Descargar coleccion de Memin Pinguinl con comentarios
-Memin Pinguinl coleccion completa para Android descargar
-Descargar coleccion de Memin Pinguinl con extras
-Memin Pinguinl coleccion completa para iPhone descargar
-Descargar coleccion de Memin Pinguinl con introduccion
-Memin Pinguinl coleccion completa para iPad descargar
-Descargar coleccion de Memin Pinguinl con biografia del autor
-Memin Pinguinl coleccion completa para Mac descargar
-Descargar coleccion de Mem
-
Cómo disfrutar al máximo de la colección completa de Memín Pinguín
-
Descargar la colección completa de Memín Pinguín es solo el primer paso para disfrutar de este personaje y sus historietas. Aquí te damos algunos consejos para aprovechar al máximo esta colección:
-
-
Lee las historietas en orden cronológico, para seguir la evolución del personaje y sus amigos.
-
Presta atención a los detalles del dibujo y el guion, que reflejan la época y el contexto en el que se crearon las historietas.
-
Compara las diferentes versiones de las historietas, tanto en color como en sepia o blanco y negro, y observa cómo cambian los estilos y las técnicas.
-
Comparte las historietas con otras personas, ya sean familiares, amigos o conocidos, y comenta con ellos tus impresiones y opiniones.
-
Aprende más sobre la historia y la cultura mexicana, así como sobre otros países y temas que aparecen en las historietas.
-
-
Descargar la colección completa de Memín Pinguín es una forma de acercarse a una parte importante del patrimonio cultural mexicano. No te pierdas esta oportunidad de conocer a este personaje tan entrañable y sus historietas tan divertidas e interesantes.
-
Qué beneficios tiene leer la colección completa de Memín Pinguín
-
Leer la colección completa de Memín Pinguín no solo es una forma de entretenimiento, sino también de aprendizaje y crecimiento personal. Al leer las historietas de Memín Pinguín, podrás obtener los siguientes beneficios:
-
-
Desarrollarás tu imaginación y creatividad, al sumergirte en las historias y los mundos que crea Yolanda Vargas Dulché.
-
Mejorarás tu comprensión lectora y tu vocabulario, al familiarizarte con el lenguaje y el estilo de las historietas.
-
Conocerás aspectos de la historia y la cultura mexicana, así como de otros países y regiones que visitan Memín y sus amigos.
-
Aprenderás valores y lecciones de vida, al identificarte con las vivencias y los sentimientos de los personajes.
-
Te divertirás y te relajarás, al reírte con las ocurrencias y las travesuras de Memín y sus amigos.
-
-
Leer la colección completa de Memín Pinguín es una actividad que puedes hacer solo o en compañía, y que te aportará muchos beneficios para tu mente y tu corazón.
-
Cómo apoyar el trabajo de Yolanda Vargas Dulché y Memín Pinguín
-
Yolanda Vargas Dulché fue una escritora mexicana que dedicó su vida a crear historietas y novelas que cautivaron a millones de lectores. Su obra más famosa es Memín Pinguín, pero también creó otros personajes como Lágrimas y Risas, Rubí, María Isabel, Yesenia y El pecado de Oyuki. Su trabajo se caracteriza por su sensibilidad, su humor, su crítica social y su amor por México.
-
Si quieres apoyar el trabajo de Yolanda Vargas Dulché y Memín Pinguín, puedes hacer lo siguiente:
-
-
Comprar los cómics originales o las reediciones que se venden en librerías o en línea.
-
Visitar el Museo Yolanda Vargas Dulché, ubicado en la Ciudad de México, donde podrás ver objetos personales, dibujos, fotografías y documentos relacionados con su obra.
-
Seguir las redes sociales oficiales de Memín Pinguín, donde podrás estar al tanto de las novedades, los eventos y las promociones.
-
Difundir el trabajo de Yolanda Vargas Dulché y Memín Pinguín entre tus amigos, familiares y conocidos, para que más personas lo conozcan y lo aprecien.
-
-
Apoyar el trabajo de Yolanda Vargas Dulché y Memín Pinguín es una forma de reconocer su talento y su aportación a la cultura mexicana. También es una forma de mantener vivo el espíritu de Memín Pinguín, ese niño que nos enseñó a reír, a llorar, a soñar y a vivir.
679dcb208e
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Drivereasy49keygentorrent Get the Latest Drivers for Your Hardware in Minutes.md b/spaces/tialenAdioni/chat-gpt-api/logs/Drivereasy49keygentorrent Get the Latest Drivers for Your Hardware in Minutes.md
deleted file mode 100644
index f14ed81374394c57b590dad166d56ae574215c2b..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Drivereasy49keygentorrent Get the Latest Drivers for Your Hardware in Minutes.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-
Milan Sikirica Stehiometrija Download
-
If you are interested in learning more about stehiometrija, a branch of chemistry that deals with mass and volume ratios in chemical compounds, you might want to download Milan Sikirica Stehiometrija, a textbook that covers various topics in this field. In this article, we will tell you what stehiometrija is, who Milan Sikirica is, why you should download his book, and how you can do it.
A branch of chemistry that studies mass and volume ratios in chemical compounds
-
Stehiometrija is a word derived from the Greek words "stechion" (element) and "metron" (measure). It is a branch of chemistry that studies the quantitative relationships between the reactants and products in chemical reactions. It involves calculating the amounts of substances involved in a reaction, based on their relative atomic or molecular masses, their chemical equivalents, their oxidation and reduction states, their concentrations, their gas laws, their physical properties, their thermodynamics, and their equilibrium constants.
-
A textbook by Milan Sikirica that covers various topics in stehiometrija
-
Milan Sikirica Stehiometrija is a textbook that was first published in 1974 and has since been revised and updated several times. It is one of the most popular and widely used textbooks on stehiometrija in Croatia and other countries. It covers all the topics mentioned above, with clear explanations, illustrations, examples, exercises, and solutions. It is designed for students and teachers of chemistry at different levels, from secondary school to university.
-
Who is Milan Sikirica?
-
A Croatian chemist and educator who wrote several books and manuals on chemistry
-
Milan Sikirica was born in 1934 in Zagreb, Croatia. He graduated from the Faculty of Science at the University of Zagreb in 1958, where he later obtained his PhD in chemistry in 1967. He worked as a professor of chemistry at several schools and faculties, including the Faculty of Chemical Engineering and Technology at the University of Zagreb. He also served as the director of the Institute for Chemistry Education at the Croatian Academy of Sciences and Arts.
-
A recipient of several awards and honors for his contributions to chemistry education
-
Milan Sikirica was recognized for his achievements and contributions to chemistry education by various institutions and organizations. He received several awards, such as the Order of Danica Hrvatska with the face of Ruđer Bošković (the highest state award for science in Croatia), the Award for Lifetime Achievement from the Croatian Chemical Society, the Award for Excellence in Teaching from the University of Zagreb, and the Medal for Merit from the International Union of Pure and Applied Chemistry. He was also elected as a member of the Croatian Academy of Sciences and Arts.
-
Why should you download Milan Sikirica Stehiometrija?
-
A comprehensive and clear guide to stehiometrija with solved examples and exercises
-
Milan Sikirica Stehiometrija is a book that will help you understand and master stehiometrija with ease. It covers all the essential concepts and principles of stehiometrija, with detailed explanations and diagrams. It also provides numerous solved examples and exercises that will help you practice your skills and test your knowledge. The book is organized into 12 chapters, each covering a specific topic in stehiometrija.
-
A useful resource for students and teachers of chemistry at different levels
-
Milan Sikirica Stehiometrija is a book that will suit your needs whether you are a student or a teacher of chemistry. If you are a student, you will find this book as a valuable companion for your studies. It will help you prepare for your exams, assignments, projects, and competitions. It will also enrich your interest and curiosity in chemistry. If you are a teacher, you will find this book as a handy reference for your lectures, demonstrations, experiments, and assessments. It will also inspire you to create new ways of teaching stehiometrija to your students.
-
A digital format that is easy to access and read on various devices
-
Milan Sikirica Stehiometrija is available as a PDF file that you can download from Scribd or other sources online. This means that you can access it anytime and anywhere on your computer, tablet, smartphone, or e-reader. You can also print it out if you prefer a hard copy. The PDF file has 342 pages and is about 20 MB in size.
-
Milan Sikirica Stehiometrija PDF free download
-Milan Sikirica Stehiometrija book online
-Milan Sikirica Stehiometrija solutions manual
-Milan Sikirica Stehiometrija zadaci i rješenja
-Milan Sikirica Stehiometrija ebook
-Milan Sikirica Stehiometrija knjiga
-Milan Sikirica Stehiometrija chemistry textbook
-Milan Sikirica Stehiometrija skripta
-Milan Sikirica Stehiometrija zadaci za vježbu
-Milan Sikirica Stehiometrija pdf download
-Milan Sikirica Stehiometrija 2. izdanje
-Milan Sikirica Stehiometrija zadaci iz kemije
-Milan Sikirica Stehiometrija besplatno preuzimanje
-Milan Sikirica Stehiometrija online čitanje
-Milan Sikirica Stehiometrija rješenja zadataka
-Milan Sikirica Stehiometrija zadaci i odgovori
-Milan Sikirica Stehiometrija elektronska knjiga
-Milan Sikirica Stehiometrija knjiga za kemiju
-Milan Sikirica Stehiometrija udžbenik
-Milan Sikirica Stehiometrija vježbe
-Milan Sikirica Stehiometrija download free
-Milan Sikirica Stehiometrija read online
-Milan Sikirica Stehiometrija answer key
-Milan Sikirica Stehiometrija problems and solutions
-Milan Sikirica Stehiometrija epub
-Milan Sikirica Stehiometrija book pdf
-Milan Sikirica Stehiometrija chemistry book
-Milan Sikirica Stehiometrija script
-Milan Sikirica Stehiometrija practice problems
-Milan Sikirica Stehiometrija 2nd edition
-Milan Sikirica Stehiometrija chemistry problems
-Milan Sikirica Stehiometrija free download pdf
-Milan Sikirica Stehiometrija read online free
-Milan Sikirica Stehiometrija solutions pdf
-Milan Sikirica Stehiometrija questions and answers
-Milan Sikirica Stehiometrija ebook pdf
-Milan Sikirica Stehiometrija chemistry textbook pdf
-Milan Sikirica Stehiometrija textbook pdf
-Milan Sikirica Stehiometrija exercises pdf
-Milan Sikirica Stehiometrija download pdf free
-Milan Sikirica Stehiometrija online book free
-Milan Sikirica Stehiometrija solutions manual pdf
-Milan Sikirica Stehiometrija problems and answers pdf
-Milan Sikirica Stehiometrija epub download free
-Milan Sikirica Stehiometrija book download free pdf
-
How can you download Milan Sikirica Stehiometrija?
-
The steps to download the PDF file from Scribd
-
One of the easiest ways to download Milan Sikirica Stehiometrija is from Scribd, a website that hosts millions of books, documents, audiobooks, podcasts, magazines, etc. To download it from Scribd, follow these steps:
-
-
Go to https://www.scribd.com/document/459468794/Milan-Sikirica-Stehiometrija-rijeseni-zadaci
-
Click on "Download" or "Download Now" button on the top right corner.
-
If you have an account on Scribd, log in with your email or Facebook account. If you don't have an account on Scribd, sign up for free with your email or Facebook account.
-
Choose a payment method (credit card or PayPal) or start a free trial (you can cancel anytime) to access unlimited downloads on Scribd.
-
After confirming your payment or trial, click on "Download PDF" button.
-
Save the file on your device or open it with your preferred PDF reader.
-
-
The alternative sources to find the book online or offline
-
If you don't want to download Milan Sikirica Stehiometrija from Scribd or if you encounter any problems with it, you can try other sources online or offline. Here are some suggestions:
-```html file that you can download from Scribd or other sources online or offline. We highly recommend you to download this book and learn more about stehiometrija.
-
FAQs
-
Here are some common questions and answers about Milan Sikirica Stehiometrija:
-
-
What is the difference between stehiometrija and stoichiometry?
-
Stehiometrija and stoichiometry are two words that refer to the same branch of chemistry. Stehiometrija is the Croatian word for stoichiometry, which is the English word derived from the Greek words "stoicheion" (element) and "metron" (measure).
-
What are the prerequisites for studying stehiometrija?
-
To study stehiometrija, you need to have a basic knowledge of chemistry, especially the concepts of atoms, molecules, elements, compounds, chemical formulas, chemical equations, and mole. You also need to have some mathematical skills, such as arithmetic, algebra, and logarithms.
-
What are the applications of stehiometrija?
-
Stehiometrija has many applications in various fields of science and engineering, such as analytical chemistry, biochemistry, environmental chemistry, industrial chemistry, materials science, metallurgy, pharmacology, etc. It helps you to determine the amounts of substances involved in a chemical reaction, to analyze the composition of a sample, to calculate the energy changes in a reaction, to predict the direction and extent of a reaction, to control the quality and quantity of a product, etc.
-
What are some tips for solving stehiometrija problems?
-
Some tips for solving stehiometrija problems are:
-
-
Read the problem carefully and identify the given data and the unknown quantity.
-
Write down the balanced chemical equation for the reaction involved.
-
Convert all the given data into moles using the appropriate conversion factors.
-
Use the mole ratio from the balanced equation to relate the moles of one substance to another.
-
Convert the moles of the unknown quantity into the desired units using the appropriate conversion factors.
-
Check your answer for reasonableness and accuracy.
-
-
Where can I find more resources on stehiometrija?
-
Besides Milan Sikirica Stehiometrija, you can find more resources on stehiometrija from various sources, such as:
-
-
Other textbooks on general chemistry or analytical chemistry that have chapters or sections on stehiometrija.
-
Online courses or videos on stehiometrija that explain the concepts and demonstrate the problem-solving techniques.
-
Websites or blogs that provide tutorials, quizzes, worksheets, or interactive simulations on stehiometrija.
-
Forums or communities that allow you to ask questions or share your experiences on stehiometrija.
-
- ``` 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Enjoy Sonic Riders Zero Gravity on Wii with ISO Download.md b/spaces/tialenAdioni/chat-gpt-api/logs/Enjoy Sonic Riders Zero Gravity on Wii with ISO Download.md
deleted file mode 100644
index de3fda8cb955ef6c595dcc1d889bf3d82789fbfb..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Enjoy Sonic Riders Zero Gravity on Wii with ISO Download.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-
How to Download Sonic Riders: Zero Gravity for the Wii
-
Sonic Riders: Zero Gravity is a racing game featuring Sonic the Hedgehog and his friends. It was released in 2008 for the Nintendo Wii and PlayStation 2. In this game, you can use gravity control to perform tricks and speed up on futuristic tracks. You can also customize your gear and unlock new characters and modes.
-
If you want to play Sonic Riders: Zero Gravity on your Wii, you will need a copy of the game disc or an ISO file. An ISO file is a digital image of a disc that can be burned to a blank DVD or loaded on a USB drive. You can download Sonic Riders: Zero Gravity ISO file from various sources online, but be careful of viruses and malware.
One of the websites that offers Sonic Riders: Zero Gravity ISO download is Vimm's Lair[^1^]. This site has a large collection of classic games for various consoles, including the Wii. You can download the game in .wbfs format, which is compatible with most Wii emulators and loaders. The file size is 3.55 GB and the CRC is 675f9e22.
-
Another website that provides Sonic Riders: Zero Gravity ROM download is ConsoleRoms[^2^]. This site has a user-friendly interface and a rating system for each game. You can download the game in .iso format, which is the standard format for Wii discs. The file size is unknown and the region is USA.
-
A third website that hosts Sonic Riders: Zero Gravity torrent download is Archive.org[^3^]. This site is a digital library of various media and software. You can download the game in .wbfs format, which is similar to .iso but smaller in size. The file size is unknown and the region is NTSC.
-
Before you download Sonic Riders: Zero Gravity for the Wii, make sure you have enough space on your device and a reliable internet connection. You will also need a program to extract the compressed files, such as WinRAR or 7-Zip. After you extract the files, you can either burn them to a DVD or copy them to a USB drive.
-
To play Sonic Riders: Zero Gravity on your Wii, you will need a modded console that can run homebrew software. You will also need a Wii emulator or loader, such as Dolphin or USB Loader GX. You can find tutorials on how to install and use these programs online. Once you have everything set up, you can enjoy playing Sonic Riders: Zero Gravity on your Wii.
-
-
Sonic Riders: Zero Gravity Gameplay and Features
-
Sonic Riders: Zero Gravity is a fast-paced and fun racing game that lets you experience the thrill of gravity-defying action. You can choose from 18 playable characters, each with their own unique gear and abilities. You can also unlock more characters and gear by completing missions and collecting rings.
-
sonic riders zero gravity wii rom download
-how to download sonic riders zero gravity on wii
-sonic riders zero gravity wii iso pal download
-sonic riders zero gravity wii iso ntsc download
-sonic riders zero gravity wii iso mega download
-sonic riders zero gravity wii iso direct download
-sonic riders zero gravity wii iso google drive download
-sonic riders zero gravity wii iso free download
-sonic riders zero gravity wii iso torrent download
-sonic riders zero gravity wii iso compressed download
-sonic riders zero gravity wii game download
-where to download sonic riders zero gravity for wii
-best site to download sonic riders zero gravity wii iso
-sonic riders zero gravity wii iso dolphin download
-sonic riders zero gravity wii iso europe download
-sonic riders zero gravity wii iso english download
-sonic riders zero gravity wii iso full download
-sonic riders zero gravity wii iso file download
-sonic riders zero gravity wii iso fast download
-sonic riders zero gravity wii iso high quality download
-sonic riders zero gravity wii iso highly compressed download
-sonic riders zero gravity wii iso online download
-sonic riders zero gravity wii iso rar download
-sonic riders zero gravity wii iso zip download
-sonic riders zero gravity wii u iso download
-how to play sonic riders zero gravity on wii after downloading iso
-how to install sonic riders zero gravity on wii from iso download
-how to burn sonic riders zero gravity wii iso to dvd after downloading
-how to extract sonic riders zero gravity wii iso from rar file after downloading
-how to use usb loader gx to play sonic riders zero gravity wii iso after downloading
-how to update sonic riders zero gravity on wii after downloading iso
-how to fix sonic riders zero gravity black screen on wii after downloading iso
-how to unlock all characters in sonic riders zero gravity on wii after downloading iso
-how to enable cheats in sonic riders zero gravity on wii after downloading iso
-how to change language in sonic riders zero gravity on wii after downloading iso
-how to play multiplayer in sonic riders zero gravity on wii after downloading iso
-how to play online in sonic riders zero gravity on wii after downloading iso
-how to mod sonic riders zero gravity on wii after downloading iso
-how to backup sonic riders zero gravity save data on wii after downloading iso
-how to restore sonic riders zero gravity save data on wii after downloading iso
-how to transfer sonic riders zero gravity save data from one wii to another after downloading iso
-how to delete sonic riders zero gravity save data on wii after downloading iso
-how to format sd card for sonic riders zero gravity on wii after downloading iso
-how to create custom tracks for sonic riders zero gravity on wii after downloading iso
-how to edit music for sonic riders zero gravity on wii after downloading iso
-how to change graphics settings for sonic riders zero gravity on wii after downloading iso
-how to improve performance for sonic riders zero gravity on wii after downloading iso
-how to troubleshoot problems for sonic riders zero gravity on wii after downloading iso
-
The game has two main modes: Story Mode and Free Race Mode. In Story Mode, you can follow the adventures of Sonic and his friends as they discover the secrets of a mysterious meteorite that grants them gravity control. You can play as either the Heroes or the Babylon Rogues, and face different challenges and enemies along the way.
-
In Free Race Mode, you can race against up to three other players or computer opponents on any of the 16 tracks available. You can customize the rules and settings of each race, such as the number of laps, the items, and the difficulty. You can also play online with other players via Nintendo Wi-Fi Connection.
-
One of the main features of Sonic Riders: Zero Gravity is the gravity control system. By using the Wii Remote or the Nunchuk, you can manipulate gravity to perform amazing stunts and gain speed. You can also use gravity to switch between different paths and shortcuts on the tracks. You will need to master the gravity control to win the races and unlock new gear.
-
Another feature of Sonic Riders: Zero Gravity is the gear customization system. You can equip your character with different types of gear, such as boards, bikes, skates, or yachts. Each gear has its own stats and abilities, such as speed, acceleration, handling, and gravity control. You can also upgrade your gear by collecting rings and using them at the shop.
-
Sonic Riders: Zero Gravity is a game that will appeal to fans of Sonic and racing games alike. It has colorful graphics, catchy music, and smooth gameplay. It also has a lot of replay value, thanks to the multiple modes, characters, gear, and tracks. If you are looking for a fun and exciting game for your Wii, you should give Sonic Riders: Zero Gravity a try.
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download HOT! Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number.md b/spaces/tialenAdioni/chat-gpt-api/logs/Free Download HOT! Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number.md
deleted file mode 100644
index 4a7ff662716e17b5c01dea9dbaed3bb9e707c5f5..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download HOT! Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number.md
+++ /dev/null
@@ -1,60 +0,0 @@
-
- - Hundreds of templates and effects - Full compatibility with other software - Export to various formats | | H2: How to Download Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number | - Step 1: Visit the official website or a trusted download site - Step 2: Choose the version that suits your system and click download - Step 3: Install the software by following the instructions - Step 4: Enter the key serial number to activate the full version | | H2: How to Use Aurora 3D Text Logo Maker to Create Stunning Text and Logos | - Step 1: Launch the software and choose a template or start from scratch - Step 2: Customize your text or logo with fonts, colors, shapes, effects, etc. - Step 3: Preview your design and make adjustments if needed - Step 4: Save and export your design to your preferred format | | H2: Tips and Tricks for Using Aurora 3D Text Logo Maker | - Use the alignment and layout tools to arrange your elements - Use the animation tools to add motion and interactivity to your design - Use the import and export functions to integrate your design with other projects - Use the help and tutorials functions to learn more about the software features | | H2: Pros and Cons of Aurora 3D Text Logo Maker | A table that compares the advantages and disadvantages of the software | | H2: Conclusion | A summary of the main points and a call to action | | H2: FAQs | Five frequently asked questions and answers about Aurora 3D Text Logo Maker | Article
Free Download Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number
-
If you are looking for a way to create eye-catching and professional-looking text and logos for your website, blog, video, presentation, or any other project, you might want to check out Aurora 3D Text Logo Maker. This is a powerful and easy-to-use software that lets you design stunning 3D text effects, logos, icons, and animations in minutes. You don't need any prior experience or skills in graphic design or 3D modeling to use this software. All you need is your creativity and imagination.
-
In this article, we will show you what Aurora 3D Text Logo Maker can do for you, how to download it for free with a key serial number, how to use it to create amazing text and logos, and some tips and tricks to make the most out of it. We will also compare the pros and cons of this software, and answer some frequently asked questions about it. By the end of this article, you will be able to decide if Aurora 3D Text Logo Maker is the right tool for you.
-
Free Download Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number
Aurora 3D Text Logo Maker is a software that offers a lot of features and functions to help you create stunning text effects, logos, icons, banners, buttons, titles, headers, slogans, signatures, watermarks, and more. Here are some of the main features of this software:
-
-
Easy to use interface and tools: The software has a user-friendly interface that allows you to access all the tools and options easily. You can drag-and-drop elements, resize them, rotate them, zoom in and out, change lighting, adjust colors, apply effects, etc. You can also undo and redo your actions, save your projects as templates, preview your design in real-time, and more.
-
Hundreds of templates and effects: The software comes with over 200 templates and over 800 effects that you can use to create stunning text and logos. You can choose from different styles, themes, categories, and genres. You can also customize the templates and effects to suit your needs and preferences. You can add shadows, reflections, textures, gradients, bevels, distortions, animations, and more.
-
Full compatibility with other software: The software is compatible with other popular software such as Photoshop, Illustrator, CorelDraw, InDesign, Flash, Premiere, After Effects, etc. You can import and export your designs in various formats such as PNG, JPG, BMP, TIFF, GIF, SVG, SWF, PDF, etc. You can also copy and paste your designs between different applications.
-
Export to various formats: The software allows you to export your designs to different formats depending on your needs and purposes. You can export your designs as images, vectors, animations, videos, or web pages. You can also adjust the quality, resolution, size, frame rate, background color, etc. of your exports.
-
-
How to Download Aurora 3D Text Logo Maker Full Version 13.04.18 With Key Serial Number
-
If you want to download Aurora 3D Text Logo Maker full version 13.04.18 with key serial number for free, you can follow these simple steps:
-
-
Visit the official website of Aurora 3D Text Logo Maker or a trusted download site such as [Softonic] or [FileHippo].
-
Choose the version that suits your system (Windows or Mac) and click download. You will be redirected to a page where you can enter your email address to receive the download link and the key serial number.
-
Install the software by following the instructions on the screen. You will need to agree to the terms and conditions and choose a destination folder for the installation.
-
Enter the key serial number that you received in your email to activate the full version of the software. You can now enjoy all the features and functions of Aurora 3D Text Logo Maker without any limitations.
-
-
How to Use Aurora 3D Text Logo Maker to Create Stunning Text and Logos
-
Once you have downloaded and installed Aurora 3D Text Logo Maker full version 13.04.18 with key serial number for free, you can start using it to create stunning text and logos for your projects. Here are some steps to guide you:
-
-
Launch the software and choose a template or start from scratch. You can browse through the hundreds of templates available in different categories such as business, technology, sports, entertainment, etc. You can also create your own design by clicking on the "New" button.
-
Customize your text or logo with fonts, colors, shapes, effects, etc. You can edit your text or logo by using the tools on the left panel of the interface. You can change the font type, size, style, alignment, spacing, etc. You can also change the color of your text or logo by using the color picker or the gradient editor. You can also add shapes such as rectangles, circles, stars, etc. to your design by using the shape library or the shape editor. You can also apply effects such as shadows, reflections, textures, gradients, bevels, distortions, animations, etc. to your design by using the effect library or the effect editor.
-
Preview your design and make adjustments if needed. You can preview your design in real-time by using the preview window on the right panel of the interface. You can also change the view angle, perspective, lighting, and background of your design by using the tools on the top panel of the interface. You can also zoom in and out, rotate, and move your design by using the mouse or the keyboard.
-
Save and export your design to your preferred format. You can save your design as a project file by clicking on the "Save" button on the top panel of the interface. You can also export your design as an image, vector, animation, video, or web page by clicking on the "Export" button on the top panel of the interface. You can choose from different formats such as PNG, JPG, BMP, TIFF, GIF, SVG, SWF, PDF, etc. You can also adjust the quality, resolution, size, frame rate, background color, etc. of your exports.
-
-
Tips and Tricks for Using Aurora 3D Text Logo Maker
-
Aurora 3D Text Logo Maker is a software that offers a lot of possibilities and options for creating stunning text and logos. However, to make the most out of it, you need to know some tips and tricks that can help you improve your skills and results. Here are some of them:
-
-
Use the alignment and layout tools to arrange your elements: The software has a set of tools that can help you align and layout your elements in a neat and organized way. You can use the grid and guide lines to snap your elements to a certain position. You can also use the alignment buttons to align your elements to the left, right, center, top, bottom, or middle. You can also use the distribute buttons to distribute your elements evenly horizontally or vertically.
-
Use the animation tools to add motion and interactivity to your design: The software has a set of tools that can help you add animation and interactivity to your design. You can use the timeline editor to create keyframes and adjust the speed and duration of your animation. You can also use the animation library to choose from different types of animation such as rotate, scale, move, fade, etc. You can also use the interactive function to make your design respond to mouse events such as click, hover, etc.
-
Use the import and export functions to integrate your design with other projects: The software has a set of functions that can help you import and export your design with other projects. You can import images, vectors, fonts, etc. from other sources such as Photoshop, Illustrator, CorelDraw, InDesign, Flash, Premiere, After Effects, etc. You can also export your design to different formats such as PNG, JPG, BMP, TIFF, GIF, SVG, SWF, PDF, etc. and use them in other applications such as web browsers, video editors, presentation software, etc.
-
Use the help and tutorials functions to learn more about the software features: The software has a set of functions that can help you learn more about the software features and how to use them. You can use the help function to access the user manual, the FAQ, the online support, and the contact information. You can also use the tutorials function to access the video tutorials, the online tutorials, and the sample projects.
-
-
Pros and Cons of Aurora 3D Text Logo Maker
-
Aurora 3D Text Logo Maker is a software that has many advantages and disadvantages. Here is a table that compares them:
- | Pros | Cons | | --- | --- | | - It is easy to use and has a user-friendly interface. - It has hundreds of templates and effects to choose from. - It is compatible with other popular software and formats. - It allows you to export your design to various formats and purposes. | - It is not free and requires a key serial number to activate the full version. - It may take up a lot of disk space and memory. - It may not support some fonts or languages. - It may have some bugs or errors that need to be fixed. |
Conclusion
-
Aurora 3D Text Logo Maker is a powerful and easy-to-use software that lets you create stunning 3D text effects, logos, icons, and animations in minutes. You can download it for free with a key serial number and enjoy all its features and functions. You can also use it to create amazing text and logos for your website, blog, video, presentation, or any other project. You can customize your design with fonts, colors, shapes, effects, etc. and export it to different formats and purposes. You can also learn more about the software features and how to use them by using the help and tutorials functions.
-
If you are looking for a way to create eye-catching and professional-looking text and logos for your projects, you might want to check out Aurora 3D Text Logo Maker. This software can help you unleash your creativity and imagination and make your projects stand out from the crowd.
-
So what are you waiting for? Download Aurora 3D Text Logo Maker full version 13.04.18 with key serial number for free today and start creating stunning text and logos for your projects.
-
-
FAQs
-
Here are some frequently asked questions and answers about Aurora 3D Text Logo Maker:
-
-
Q: How much does Aurora 3D Text Logo Maker cost?
-A: Aurora 3D Text Logo Maker is not free software. It costs $49.95 for a single user license. However, you can download it for free with a key serial number from the official website or a trusted download site.
-
Q: What are the system requirements for Aurora 3D Text Logo Maker?
-A: Aurora 3D Text Logo Maker works on Windows XP/Vista/7/8/10 or Mac OS X 10.6 or later. It requires at least 1 GB of RAM and 150 MB of disk space.
-
Q: How can I contact the support team of Aurora 3D Text Logo Maker?
-A: You can contact the support team of Aurora 3D Text Logo Maker by email at support@presentation-3d.com or by phone at +86-28-8535-4545.
-
Q: How can I get more templates and effects for Aurora 3D Text Logo Maker?
-A: You can get more templates and effects for Aurora 3D Text Logo Maker by visiting the official website or the online store of the software. You can also create your own templates and effects by using the template editor or the effect editor.
-
Q: How can I share my design with others?
-A: You can share your design with others by exporting it to different formats such as PNG, JPG, BMP, TIFF, GIF, SVG, SWF, PDF, etc. You can also upload your design to social media platforms such as Facebook, Twitter, YouTube, etc.
- b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/GTA 4 V1 0 Crack Play the Complete Edition of Grand Theft Auto IV with No DRM.md b/spaces/tialenAdioni/chat-gpt-api/logs/GTA 4 V1 0 Crack Play the Complete Edition of Grand Theft Auto IV with No DRM.md
deleted file mode 100644
index f55acb48dafea3976b7f636a7e9a96cd6a0bcbc9..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/GTA 4 V1 0 Crack Play the Complete Edition of Grand Theft Auto IV with No DRM.md
+++ /dev/null
@@ -1,58 +0,0 @@
-
-
AGi32 Lighting Software: A Powerful Tool for Lighting Design and Analysis
-
AGi32 is a software tool used to predict the lighting performance of electric luminaires and/or daylight in a simulated interior or exterior environment[^3^]. The environments that may be considered in AGi32 can range from a simple interior space to an exterior site lighting or a complex auditorium.
AGi32 can provide numeric and rendered solutions for almost any lighting application, interior or exterior, including roadway, sports and daylighting[^1^]. AGi32 is first and foremost, a calculation tool for accurate photometric predictions: A technical tool that can compute illuminance, luminance, exitance, daylight factor, glare rating and other metrics in any situation, assist in luminaire placement and aiming, and validate adherence to any number of lighting criteria[^1^]. However, there is so much more that can be done to enhance the understanding of photometric results. Visualization is extremely important to comprehend changes in luminance for different materials and surface properties and predict the effect of various luminaire designs in real-world, light and surface interaction. With the ability to see results clearly for an entire project, AGi32 becomes a mock-up substitute that can save time and money by creating a virtual model of a proposed design. It can reveal characteristics and effects that would be hard to detect in anything short of the completed installation[^1^].
-
AGi32 allows you to select from two calculation techniques when simulating a lighting application: Direct Calculation Method and Full Radiosity Method. The use of either method has its merits and is an important choice you will need to make for any application. It is easy to move from one to the other and simply repeat the calculation[^2^]. Direct Calculation Method considers only light directly from luminaires to calculation points and is capable of point-by-point results only, it cannot be rendered. This method lends itself particularly well to exterior lighting projects such as site lighting, roadway and sports applications. It may also be utilized for fast direct light calculations in interior facilities. The obstructive nature of surfaces is considered[^2^]. Full Radiosity Method enables all features for the accurate computation of interreflected light. Full Radiosity Method is required for interior lighting applications where interreflected light and indirect lighting are important or when rendering is desired. Due to the rigorous nature of interreflected lighting calculations, Full Radiosity Method projects require a little additional run time over Direct Method projects of the same scale. Switching between Direct and Full Radiosity Method is as simple as a mouse click and can be performed anytime[^2^].
-
AGi32 has the ability to automatically place specific grids of calculation points on any surface as well as on designated workplanes. This command is very powerful and convenient and saves the time it would normally take to place a conventional grid of points on the surface and eliminate unwanted points. Curved and sloping surfaces are easily accommodated. In fact, using the Automatic Placement command is the method of choice when placing calculation points on irregular surfaces[^2^]. AGi32 can also utilize a variety of techniques, such as polygons, grids, lines and single point specification to place calculation points anywhere in 3-dimensional space. These capabilities allow AGi32 to predict or quantify the distribution of artificial light in any environment. An unlimited number of unique calculation planes can be specified. Calculation points can be aimed in any direction and elevations can be considered to track irregular topography[^2^].
-
agi32 lighting design software free download with crack
-agi32 lighting simulation software full version with keygen
-agi32 lighting calculation software cracked license key
-agi32 lighting analysis software serial number generator
-agi32 lighting modeling software activation code crack
-agi32 lighting rendering software patch keygen download
-agi32 lighting planning software crack torrent link
-agi32 lighting optimization software keygen online
-agi32 lighting engineering software crack file
-agi32 lighting visualization software keygen zip
-agi32 lighting calculation tool crack registration code
-agi32 lighting design tool keygen installer
-agi32 lighting simulation tool crack setup
-agi32 lighting analysis tool keygen exe
-agi32 lighting modeling tool crack rar
-agi32 lighting rendering tool keygen iso
-agi32 lighting planning tool crack dmg
-agi32 lighting optimization tool keygen mac
-agi32 lighting engineering tool crack windows
-agi32 lighting visualization tool keygen linux
-how to crack agi32 lighting software with keygen
-where to download agi32 lighting software crack and keygen
-why use agi32 lighting software crack and keygen
-what is agi32 lighting software crack and keygen
-who can provide agi32 lighting software crack and keygen
-best site for agi32 lighting software crack and keygen
-latest version of agi32 lighting software crack and keygen
-working method for agi32 lighting software crack and keygen
-safe way to get agi32 lighting software crack and keygen
-easy steps to install agi32 lighting software crack and keygen
-benefits of using agi32 lighting software crack and keygen
-features of agi32 lighting software crack and keygen
-reviews of agi32 lighting software crack and keygen
-alternatives to agi32 lighting software crack and keygen
-comparison of agi32 lighting software crack and keygen with other software
-advantages of agi32 lighting software over other software with or without crack and keygen
-disadvantages of using cracked and keygen version of agi32 lighting software
-risks of using cracked and keygen version of agi32 lighting software
-solutions for common problems with cracked and keygen version of agi32 lighting software
-tips and tricks for using cracked and keygen version of agi32 lighting software
-tutorials for using cracked and keygen version of agi32 lighting software
-guides for using cracked and keygen version of agi32 lighting software
-manuals for using cracked and keygen version of agi32 lighting software
-videos for using cracked and keygen version of agi32 lighting software
-blogs for using cracked and keygen version of agi32 lighting software
-forums for using cracked and keygen version of agi32 lighting software
-groups for using cracked and keygen version of agi32 lighting software
-communities for using cracked and keygen version of agi32 lighting software
-
AGi32 is a simulation tool used for designing lighting projects and calculating the amount of light that will be delivered based on user-set parameters. The resulting calculations are commonly referred to as lighting layouts or point-by-points[^4^]. AGi32 is a computational program that performs numerical point-by-point calculations of incident direct or reflected light on any real surface or imaginary plane[^5^]. AGi32 is a software product developed by Lighting Analysts Inc., a company that has been developing software tools for lighting professionals since 1984[^1^]. AGi32 is available for purchase or trial download from their website: https://lightinganalysts.com/software-products/agi32/overview/.
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/About Him Alex Isley Instrumental Christmas _TOP_.md b/spaces/tioseFevbu/cartoon-converter/scripts/About Him Alex Isley Instrumental Christmas _TOP_.md
deleted file mode 100644
index cdc3d51f53a57f74dc905dba2b0e70385da83002..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/About Him Alex Isley Instrumental Christmas _TOP_.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
About Him: A Christmas Song by Alex Isley
-
Alex Isley is a singer-songwriter who comes from a musical family. She is the daughter of Ernie Isley, one of the members of the legendary soul group the Isley Brothers. She grew up in New Jersey and Los Angeles, and was influenced by artists like Stevie Wonder, Prince, Ella Fitzgerald, Toni Braxton, Aaliyah, and Mariah Carey.
One of her songs, "About Him", is a romantic ballad that expresses her feelings for a special someone. She sings about how he makes her heart say "ooh, yeah, yeah, yeah" and how he is like sugar, so sweet. She also compares him to a Christmas gift that she can't wait to open.
-
The song was released in 2013 as part of her debut EP, "Dreams In Analog", which she wrote and produced herself. The song has a smooth and soulful vibe, with a simple piano accompaniment and Alex's silky vocals. The song also has an official video, which shows Alex singing in a cozy apartment decorated with Christmas lights and ornaments.
-
"About Him" is a perfect song for the holiday season, as it captures the joy and excitement of being in love. It also showcases Alex's talent and creativity as an independent artist. You can listen to the song on YouTube[^1^] or Genius[^2^], or learn more about Alex on Wikipedia[^3^].
One of her latest songs is "Good & Plenty", which features Masego and Jack Dine. The song is a catchy and upbeat tune that celebrates self-love and confidence. She sings about how she is "good and plenty" and how she doesn't need anyone to validate her worth. She also encourages others to feel the same way about themselves. The song has a colorful and fun video, which shows Alex and Masego dancing and having a good time.
-
Alex Isley is a versatile and talented artist who can create different genres and moods with her music. She has a unique style and voice that sets her apart from other singers. She is also passionate about her craft and dedicated to her fans. She often interacts with them on social media and shares snippets of her upcoming songs. She is an artist to watch and support in the music industry.
If you want to listen to more of Alex Isley's music, you can find her on various streaming platforms, such as Spotify, Apple Music, YouTube, and SoundCloud. You can also follow her on social media, such as Instagram, Twitter, and Facebook. She often posts updates on her music and personal life, as well as inspirational messages and tips for aspiring artists. She also has a website, where you can learn more about her background, discography, and upcoming events.
-
Alex Isley is a rising star in the music scene, who has a lot to offer to her listeners. She has a rich musical heritage, a distinctive voice, and a creative vision. She makes music that is soulful, honest, and relatable. She is also a humble and gracious person, who appreciates her fans and supporters. She is an artist who deserves more recognition and appreciation for her work.
-
If you are looking for a new artist to add to your playlist, or if you are already a fan of Alex Isley, you should check out her latest song, "About Him". It is a beautiful song that will make you feel the love this Christmas season. It is also a song that showcases Alex's talent and potential as an artist. You will not regret listening to it.
Imogen Heap is a Grammy-nominated singer-songwriter and producer from England. She is known for her eclectic and experimental musical style, blending elements of pop, rock, electronica, and classical music. In 2009, she released her third solo album, Ellipse, which won her a Grammy Award for Best Engineered Album, Non-Classical.
-
Ellipse is a concept album that explores the themes of time, space, and relationships. It features Heap's signature use of vocal loops, layered harmonies, and organic sounds. The album also includes a bonus disc of instrumental versions of all the songs, which showcase Heap's intricate and inventive arrangements. The instrumental disc is a rare and valuable item for fans and collectors of Heap's music.
The instrumental disc is available in FLAC format, which is a lossless audio codec that preserves the original quality and detail of the recordings. FLAC files are larger than MP3 files, but they offer a superior listening experience for audiophiles and music enthusiasts. You can stream or download the instrumental disc from SoundCloud[^1^] [^2^] [^3^], where you can also find other works by Imogen Heap and similar artists.
If you want to learn more about Imogen Heap and her creative process, you can watch her vlogs on YouTube, where she documents the making of her albums and songs. You can also follow her on Twitter and Instagram, where she shares updates and insights on her life and music. She is also active on Patreon, where she offers exclusive content and rewards for her supporters.
-
Imogen Heap is not only a talented musician, but also a visionary and innovator. She is the founder of the Mi.Mu Gloves, which are wearable musical instruments that allow her to control sound and visuals with gestures and movements. She is also the creator of the Creative Passport, which is a digital identity platform for music makers that aims to empower them and improve the music industry. She is constantly exploring new ways to express herself and connect with her fans and peers.
-
Imogen Heap is one of the most original and influential artists of our time. Her music is a blend of artistry, technology, and emotion that transcends genres and boundaries. Her instrumental disc of Ellipse is a testament to her musical genius and a gift to her listeners. Whether you are a fan of her vocals or her instrumentals, you will find something to enjoy and appreciate in her work.
One of the reasons why Imogen Heap's music is so captivating and inspiring is that she draws from her own experiences and emotions. She writes songs about love, loss, joy, pain, and everything in between. She is not afraid to be vulnerable and honest in her lyrics, which resonate with many listeners. She also uses her music as a way to raise awareness and support for various causes and issues that she cares about, such as environmentalism, human rights, and mental health.
-
Some of her most popular and acclaimed songs include "Hide and Seek", "Let Go", "Goodnight and Go", "Speeding Cars", "Tiny Human", and "Lifeline". She has also collaborated with other artists such as Frou Frou, Jeff Beck, Deadmau5, Ariana Grande, Taylor Swift, and Jon Hopkins. She has performed at various venues and events around the world, such as the Royal Albert Hall, the Sydney Opera House, the Grammy Awards, and the Opening Ceremony of the 2012 London Olympics.
-
Imogen Heap is a musical phenomenon that deserves recognition and admiration. Her instrumental disc of Ellipse is a rare opportunity to experience her music in a different way. It is a showcase of her skills and creativity as a composer and producer. It is also a reminder of her passion and dedication to her craft. If you are looking for a musical adventure that will challenge and delight you, you should give it a listen.
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/tnt2011/dog_cat_classifier/app.py b/spaces/tnt2011/dog_cat_classifier/app.py
deleted file mode 100644
index d7843f0b52598421964e7021ec56a39772881a6a..0000000000000000000000000000000000000000
--- a/spaces/tnt2011/dog_cat_classifier/app.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# AUTOGENERATED! DO NOT EDIT! File to edit: dog_v_cat.ipynb.
-
-# %% auto 0
-__all__ = ['learn', 'categories', 'image', 'label', 'examples', 'intf', 'is_cat', 'classify_image']
-
-# %% dog_v_cat.ipynb 1
-from fastai.vision.all import *
-import gradio as gr
-
-def is_cat(x):
- return x[0].isupper()
-
-# %% dog_v_cat.ipynb 12
-learn = load_learner('model.pkl')
-
-# %% dog_v_cat.ipynb 14
-categories = ('Dog', 'Cat')
-def classify_image(img):
- pred, idx, probs = learn.predict(img)
- return dict(zip(categories, map(float, probs)))
-
-
-# %% dog_v_cat.ipynb 17
-image = gr.inputs.Image(shape=(192,192))
-label = gr.outputs.Label()
-examples = ['dog.jpg','cat.jpg', 'challenge.jpg']
-
-intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
-intf.launch(inline=False)
diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/decoders/base_decoder.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/decoders/base_decoder.py
deleted file mode 100644
index 09e2db88fde3c6ca02f20f3bb57ee0da0f8b1ce7..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/decoders/base_decoder.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmcv.runner import BaseModule
-
-from mmocr.models.builder import DECODERS
-
-
-@DECODERS.register_module()
-class BaseDecoder(BaseModule):
- """Base decoder class for text recognition."""
-
- def __init__(self, init_cfg=None, **kwargs):
- super().__init__(init_cfg=init_cfg)
-
- def forward_train(self, feat, out_enc, targets_dict, img_metas):
- raise NotImplementedError
-
- def forward_test(self, feat, out_enc, img_metas):
- raise NotImplementedError
-
- def forward(self,
- feat,
- out_enc,
- targets_dict=None,
- img_metas=None,
- train_mode=True):
- self.train_mode = train_mode
- if train_mode:
- return self.forward_train(feat, out_enc, targets_dict, img_metas)
-
- return self.forward_test(feat, out_enc, img_metas)
diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/encoders/base_encoder.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/encoders/base_encoder.py
deleted file mode 100644
index 726c78a8c938e8feb6423f91ace4ebf319f167c7..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/encoders/base_encoder.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmcv.runner import BaseModule
-
-from mmocr.models.builder import ENCODERS
-
-
-@ENCODERS.register_module()
-class BaseEncoder(BaseModule):
- """Base Encoder class for text recognition."""
-
- def forward(self, feat, **kwargs):
- return feat
diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py
deleted file mode 100644
index 1a4f790de16f5808908285ada394234ca40e1ada..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_predictors.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-from maskrcnn_benchmark.layers import Conv2d, ConvTranspose2d
-from torch import nn
-from torch.nn import functional as F
-
-from .roi_seq_predictors import make_roi_seq_predictor
-
-
-class MaskRCNNC4Predictor(nn.Module):
- def __init__(self, cfg):
- super(MaskRCNNC4Predictor, self).__init__()
- num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
- dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
-
- if cfg.MODEL.ROI_HEADS.USE_FPN:
- if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
- num_inputs = dim_reduced + 1
- elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
- num_inputs = dim_reduced * 2
- else:
- num_inputs = dim_reduced
- else:
- stage_index = 4
- stage2_relative_factor = 2 ** (stage_index - 1)
- res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
- num_inputs = res2_out_channels * stage2_relative_factor
-
- self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
-
- for name, param in self.named_parameters():
- if "bias" in name:
- nn.init.constant_(param, 0)
- elif "weight" in name:
- # Caffe2 implementation uses MSRAFill, which in fact
- # corresponds to kaiming_normal_ in PyTorch
- nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
-
- def forward(self, x):
- x = F.relu(self.conv5_mask(x))
- return self.mask_fcn_logits(x)
-
-
-class CharMaskRCNNC4Predictor(nn.Module):
- def __init__(self, cfg):
- super(CharMaskRCNNC4Predictor, self).__init__()
- # num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
- num_classes = 1
- char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
- dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
-
- if cfg.MODEL.ROI_HEADS.USE_FPN:
- if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
- num_inputs = dim_reduced + 1
- elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
- num_inputs = dim_reduced * 2
- else:
- num_inputs = dim_reduced
- else:
- stage_index = 4
- stage2_relative_factor = 2 ** (stage_index - 1)
- res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
- num_inputs = res2_out_channels * stage2_relative_factor
-
- self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
- if cfg.MODEL.CHAR_MASK_ON:
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
- self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0)
- else:
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
-
- for name, param in self.named_parameters():
- if "bias" in name:
- nn.init.constant_(param, 0)
- elif "weight" in name:
- # Caffe2 implementation uses MSRAFill, which in fact
- # corresponds to kaiming_normal_ in PyTorch
- nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
-
- def forward(self, x):
- x = F.relu(self.conv5_mask(x))
- return self.mask_fcn_logits(x), self.char_mask_fcn_logits(x)
-
-
-class SeqCharMaskRCNNC4Predictor(nn.Module):
- def __init__(self, cfg):
- super(SeqCharMaskRCNNC4Predictor, self).__init__()
- # num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
- num_classes = 1
- char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
- dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
-
- if cfg.MODEL.ROI_HEADS.USE_FPN:
- if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
- num_inputs = dim_reduced + 1
- elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or 'ATTENTION_CHANNEL' in cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION:
- num_inputs = dim_reduced * 2
- else:
- num_inputs = dim_reduced
- else:
- stage_index = 4
- stage2_relative_factor = 2 ** (stage_index - 1)
- res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
- num_inputs = res2_out_channels * stage2_relative_factor
-
- self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
- if cfg.MODEL.CHAR_MASK_ON:
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
- self.char_mask_fcn_logits = Conv2d(dim_reduced, char_num_classes, 1, 1, 0)
- self.seq = make_roi_seq_predictor(cfg, dim_reduced)
- else:
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
-
- for name, param in self.named_parameters():
- if "bias" in name:
- nn.init.constant_(param, 0)
- elif "weight" in name:
- # Caffe2 implementation uses MSRAFill, which in fact
- # corresponds to kaiming_normal_ in PyTorch
- nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
-
- def forward(self, x, decoder_targets=None, word_targets=None):
- x = F.relu(self.conv5_mask(x))
- if self.training:
- loss_seq_decoder = self.seq(
- x, decoder_targets=decoder_targets, word_targets=word_targets
- )
- return (
- self.mask_fcn_logits(x),
- self.char_mask_fcn_logits(x),
- loss_seq_decoder,
- )
- else:
- decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(
- x, use_beam_search=True
- )
- return (
- self.mask_fcn_logits(x),
- self.char_mask_fcn_logits(x),
- decoded_chars,
- decoded_scores,
- detailed_decoded_scores,
- )
-
-class SeqMaskRCNNC4Predictor(nn.Module):
- def __init__(self, cfg):
- super(SeqMaskRCNNC4Predictor, self).__init__()
- num_classes = 1
- # char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
- dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
-
- if cfg.MODEL.ROI_HEADS.USE_FPN:
- if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
- num_inputs = dim_reduced + 1
- elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
- num_inputs = dim_reduced * 2
- else:
- num_inputs = dim_reduced
- else:
- stage_index = 4
- stage2_relative_factor = 2 ** (stage_index - 1)
- res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
- num_inputs = res2_out_channels * stage2_relative_factor
-
- self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
- if cfg.SEQUENCE.SEQ_ON:
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
- self.seq = make_roi_seq_predictor(cfg, dim_reduced)
- else:
- self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
-
- for name, param in self.named_parameters():
- if "bias" in name:
- nn.init.constant_(param, 0)
- elif "weight" in name:
- # Caffe2 implementation uses MSRAFill, which in fact
- # corresponds to kaiming_normal_ in PyTorch
- nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
-
- def forward(self, x, decoder_targets=None, word_targets=None):
- x = F.relu(self.conv5_mask(x))
- if self.training:
- loss_seq_decoder = self.seq(
- x, decoder_targets=decoder_targets, word_targets=word_targets
- )
- return (
- self.mask_fcn_logits(x),
- loss_seq_decoder,
- )
- else:
- decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(
- x, use_beam_search=True
- )
- return (
- self.mask_fcn_logits(x),
- decoded_chars,
- decoded_scores,
- detailed_decoded_scores,
- )
-
-class SeqRCNNC4Predictor(nn.Module):
- def __init__(self, cfg):
- super(SeqRCNNC4Predictor, self).__init__()
- num_classes = 1
- # char_num_classes = cfg.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES
- dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
-
- if cfg.MODEL.ROI_HEADS.USE_FPN:
- if cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'CAT':
- num_inputs = dim_reduced + 1
- elif cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'MIX' or cfg.MODEL.ROI_MASK_HEAD.MIX_OPTION == 'ATTENTION_CHANNEL':
- num_inputs = dim_reduced * 2
- else:
- num_inputs = dim_reduced
- else:
- stage_index = 4
- stage2_relative_factor = 2 ** (stage_index - 1)
- res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
- num_inputs = res2_out_channels * stage2_relative_factor
-
- self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
- if cfg.SEQUENCE.SEQ_ON:
- # self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
- self.seq = make_roi_seq_predictor(cfg, dim_reduced)
- # else:
- # self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
-
- for name, param in self.named_parameters():
- if "bias" in name:
- nn.init.constant_(param, 0)
- elif "weight" in name:
- # Caffe2 implementation uses MSRAFill, which in fact
- # corresponds to kaiming_normal_ in PyTorch
- nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
-
- def forward(self, x, decoder_targets=None, word_targets=None):
- x = F.relu(self.conv5_mask(x))
- if self.training:
- loss_seq_decoder = self.seq(
- x, decoder_targets=decoder_targets, word_targets=word_targets
- )
- return (
- None,
- loss_seq_decoder,
- )
- else:
- decoded_chars, decoded_scores, detailed_decoded_scores = self.seq(
- x, use_beam_search=True
- )
- return (
- None,
- decoded_chars,
- decoded_scores,
- detailed_decoded_scores,
- )
-
-_ROI_MASK_PREDICTOR = {
- "MaskRCNNC4Predictor": MaskRCNNC4Predictor,
- "CharMaskRCNNC4Predictor": CharMaskRCNNC4Predictor,
- "SeqCharMaskRCNNC4Predictor": SeqCharMaskRCNNC4Predictor,
- "SeqMaskRCNNC4Predictor": SeqMaskRCNNC4Predictor,
- "SeqRCNNC4Predictor": SeqRCNNC4Predictor,
-}
-
-
-def make_roi_mask_predictor(cfg):
- func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
- return func(cfg)
diff --git a/spaces/tonyassi/video-face-swap/DeepFakeAI/uis/components/benchmark.py b/spaces/tonyassi/video-face-swap/DeepFakeAI/uis/components/benchmark.py
deleted file mode 100644
index 450cdd0dc82cf74fa203698b66b8860d913917a8..0000000000000000000000000000000000000000
--- a/spaces/tonyassi/video-face-swap/DeepFakeAI/uis/components/benchmark.py
+++ /dev/null
@@ -1,116 +0,0 @@
-from typing import Any, Optional, List
-import time
-import tempfile
-import statistics
-import gradio
-
-import DeepFakeAI.globals
-from DeepFakeAI import wording
-from DeepFakeAI.capturer import get_video_frame_total
-from DeepFakeAI.core import conditional_process
-from DeepFakeAI.uis.typing import Update
-from DeepFakeAI.utilities import normalize_output_path, clear_temp
-
-BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None
-BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
-BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
-BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
-
-
-def render() -> None:
- global BENCHMARK_RESULT_DATAFRAME
- global BENCHMARK_CYCLES_SLIDER
- global BENCHMARK_START_BUTTON
- global BENCHMARK_CLEAR_BUTTON
-
- with gradio.Box():
- BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe(
- label = wording.get('benchmark_result_dataframe_label'),
- headers =
- [
- 'target_path',
- 'benchmark_cycles',
- 'average_run',
- 'fastest_run',
- 'slowest_run',
- 'relative_fps'
- ],
- col_count = (6, 'fixed'),
- row_count = (7, 'fixed'),
- datatype =
- [
- 'str',
- 'number',
- 'number',
- 'number',
- 'number',
- 'number'
- ]
- )
- BENCHMARK_CYCLES_SLIDER = gradio.Slider(
- label = wording.get('benchmark_cycles_slider_label'),
- minimum = 1,
- step = 1,
- value = 3,
- maximum = 10
- )
- with gradio.Row():
- BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label'))
- BENCHMARK_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label'))
-
-
-def listen() -> None:
- BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME)
- BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULT_DATAFRAME)
-
-
-def update(benchmark_cycles : int) -> Update:
- DeepFakeAI.globals.source_path = '.assets/examples/source.jpg'
- target_paths =\
- [
- '.assets/examples/target-240p.mp4',
- '.assets/examples/target-360p.mp4',
- '.assets/examples/target-540p.mp4',
- '.assets/examples/target-720p.mp4',
- '.assets/examples/target-1080p.mp4',
- '.assets/examples/target-1440p.mp4',
- '.assets/examples/target-2160p.mp4'
- ]
- value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ]
- return gradio.update(value = value)
-
-
-def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
- process_times = []
- total_fps = 0.0
- for i in range(benchmark_cycles + 1):
- DeepFakeAI.globals.target_path = target_path
- DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, tempfile.gettempdir())
- video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path)
- start_time = time.perf_counter()
- conditional_process()
- end_time = time.perf_counter()
- process_time = end_time - start_time
- fps = video_frame_total / process_time
- if i > 0:
- process_times.append(process_time)
- total_fps += fps
- average_run = round(statistics.mean(process_times), 2)
- fastest_run = round(min(process_times), 2)
- slowest_run = round(max(process_times), 2)
- relative_fps = round(total_fps / benchmark_cycles, 2)
- return\
- [
- DeepFakeAI.globals.target_path,
- benchmark_cycles,
- average_run,
- fastest_run,
- slowest_run,
- relative_fps
- ]
-
-
-def clear() -> Update:
- if DeepFakeAI.globals.target_path:
- clear_temp(DeepFakeAI.globals.target_path)
- return gradio.update(value = None)
diff --git a/spaces/training-transformers-together/calc/Makefile b/spaces/training-transformers-together/calc/Makefile
deleted file mode 100644
index 679d7b6e723dcb2d8fe7a137e856f5276c393078..0000000000000000000000000000000000000000
--- a/spaces/training-transformers-together/calc/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-
-.PHONY: quality style test test-examples
-
-# Check that source code meets quality standards
-
-quality:
- python -m black --check --line-length 119 --target-version py38 .
- python -m isort --check-only .
- python -m flake8 --max-line-length 119
-
-# Format source code automatically
-
-style:
- python -m black --line-length 119 --target-version py38 .
- python -m isort .
\ No newline at end of file
diff --git a/spaces/trttung1610/musicgen/CONTRIBUTING.md b/spaces/trttung1610/musicgen/CONTRIBUTING.md
deleted file mode 100644
index a3e9507643d4439f509a8fc8b87dc73417ef9822..0000000000000000000000000000000000000000
--- a/spaces/trttung1610/musicgen/CONTRIBUTING.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Contributing to AudioCraft
-
-We want to make contributing to this project as easy and transparent as
-possible.
-
-## Pull Requests
-
-AudioCraft is the implementation of a research paper.
-Therefore, we do not plan on accepting many pull requests for new features.
-We certainly welcome them for bug fixes.
-
-1. Fork the repo and create your branch from `main`.
-2. If you've added code that should be tested, add tests.
-3. If you've changed APIs, update the documentation.
-4. Ensure the test suite passes.
-5. Make sure your code lints.
-6. If you haven't already, complete the Contributor License Agreement ("CLA").
-
-## Contributor License Agreement ("CLA")
-In order to accept your pull request, we need you to submit a CLA. You only need
-to do this once to work on any of Meta's open source projects.
-
-Complete your CLA here:
-
-## Issues
-We use GitHub issues to track public bugs. Please ensure your description is
-clear and has sufficient instructions to be able to reproduce the issue.
-
-Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
-disclosure of security bugs. In those cases, please go through the process
-outlined on that page and do not file a public issue.
-
-## License
-By contributing to encodec, you agree that your contributions will be licensed
-under the LICENSE file in the root directory of this source tree.
diff --git a/spaces/trysem/image-matting-app/ppmatting/transforms/__init__.py b/spaces/trysem/image-matting-app/ppmatting/transforms/__init__.py
deleted file mode 100644
index 7986cdd642998fb0638a81c9ea22615faf8bad0b..0000000000000000000000000000000000000000
--- a/spaces/trysem/image-matting-app/ppmatting/transforms/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .transforms import *
diff --git a/spaces/ttt246/brain/sauce_tests/01 Send Notification/README.md b/spaces/ttt246/brain/sauce_tests/01 Send Notification/README.md
deleted file mode 100644
index 533f637638a1db6c301a9ac48c10d07dc4964db9..0000000000000000000000000000000000000000
--- a/spaces/ttt246/brain/sauce_tests/01 Send Notification/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Basics: Send Notification
-
-## What you'll find
-
-This folder contains an example test, and is composed of the following elements:
-- `README.md`: (This file) Contains the purpose of the test.
-- `input.yaml`: Sets variables that will provided to test.
-- `unit.yaml`: Sets the steps to perform while executing the test.
-
-You can also get familiar with our test script syntax by following our [Synthax Documentation](https://github.com/saucelabs/saucectl-apitest-example/blob/main/README.md).
-
-## Details
-
-As first test, we just would like to hit an API endpoint and verify it responds as we are expecting.
-The component `get` will perform an HTTP request to the given URL. Let's discard the other parameters for now.
-__Note:__ By default we expect a request to respond with a valid HTTP status code. If so, the request is considered as "passed" and so will the test.
diff --git a/spaces/ucalyptus/PTI/torch_utils/ops/upfirdn2d.py b/spaces/ucalyptus/PTI/torch_utils/ops/upfirdn2d.py
deleted file mode 100644
index ceeac2b9834e33b7c601c28bf27f32aa91c69256..0000000000000000000000000000000000000000
--- a/spaces/ucalyptus/PTI/torch_utils/ops/upfirdn2d.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Custom PyTorch ops for efficient resampling of 2D images."""
-
-import os
-import warnings
-import numpy as np
-import torch
-import traceback
-
-from .. import custom_ops
-from .. import misc
-from . import conv2d_gradfix
-
-#----------------------------------------------------------------------------
-
-_inited = False
-_plugin = None
-
-def _init():
- global _inited, _plugin
- if not _inited:
- sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
- sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
- try:
- _plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
- except:
- warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
- return _plugin is not None
-
-def _parse_scaling(scaling):
- if isinstance(scaling, int):
- scaling = [scaling, scaling]
- assert isinstance(scaling, (list, tuple))
- assert all(isinstance(x, int) for x in scaling)
- sx, sy = scaling
- assert sx >= 1 and sy >= 1
- return sx, sy
-
-def _parse_padding(padding):
- if isinstance(padding, int):
- padding = [padding, padding]
- assert isinstance(padding, (list, tuple))
- assert all(isinstance(x, int) for x in padding)
- if len(padding) == 2:
- padx, pady = padding
- padding = [padx, padx, pady, pady]
- padx0, padx1, pady0, pady1 = padding
- return padx0, padx1, pady0, pady1
-
-def _get_filter_size(f):
- if f is None:
- return 1, 1
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
- fw = f.shape[-1]
- fh = f.shape[0]
- with misc.suppress_tracer_warnings():
- fw = int(fw)
- fh = int(fh)
- misc.assert_shape(f, [fh, fw][:f.ndim])
- assert fw >= 1 and fh >= 1
- return fw, fh
-
-#----------------------------------------------------------------------------
-
-def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
- r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
-
- Args:
- f: Torch tensor, numpy array, or python list of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable),
- `[]` (impulse), or
- `None` (identity).
- device: Result device (default: cpu).
- normalize: Normalize the filter so that it retains the magnitude
- for constant input signal (DC)? (default: True).
- flip_filter: Flip the filter? (default: False).
- gain: Overall scaling factor for signal magnitude (default: 1).
- separable: Return a separable filter? (default: select automatically).
-
- Returns:
- Float32 tensor of the shape
- `[filter_height, filter_width]` (non-separable) or
- `[filter_taps]` (separable).
- """
- # Validate.
- if f is None:
- f = 1
- f = torch.as_tensor(f, dtype=torch.float32)
- assert f.ndim in [0, 1, 2]
- assert f.numel() > 0
- if f.ndim == 0:
- f = f[np.newaxis]
-
- # Separable?
- if separable is None:
- separable = (f.ndim == 1 and f.numel() >= 8)
- if f.ndim == 1 and not separable:
- f = f.ger(f)
- assert f.ndim == (1 if separable else 2)
-
- # Apply normalize, flip, gain, and device.
- if normalize:
- f /= f.sum()
- if flip_filter:
- f = f.flip(list(range(f.ndim)))
- f = f * (gain ** (f.ndim / 2))
- f = f.to(device=device)
- return f
-
-#----------------------------------------------------------------------------
-
-def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
- r"""Pad, upsample, filter, and downsample a batch of 2D images.
-
- Performs the following sequence of operations for each channel:
-
- 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
-
- 2. Pad the image with the specified number of zeros on each side (`padding`).
- Negative padding corresponds to cropping the image.
-
- 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
- so that the footprint of all output pixels lies within the input image.
-
- 4. Downsample the image by keeping every Nth pixel (`down`).
-
- This sequence of operations bears close resemblance to scipy.signal.upfirdn().
- The fused op is considerably more efficient than performing the same calculation
- using standard PyTorch ops. It supports gradients of arbitrary order.
-
- Args:
- x: Float32/float64/float16 input tensor of the shape
- `[batch_size, num_channels, in_height, in_width]`.
- f: Float32 FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- up: Integer upsampling factor. Can be a single int or a list/tuple
- `[x, y]` (default: 1).
- down: Integer downsampling factor. Can be a single int or a list/tuple
- `[x, y]` (default: 1).
- padding: Padding with respect to the upsampled image. Can be a single number
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- flip_filter: False = convolution, True = correlation (default: False).
- gain: Overall scaling factor for signal magnitude (default: 1).
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- assert isinstance(x, torch.Tensor)
- assert impl in ['ref', 'cuda']
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
- return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
- return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
-
-#----------------------------------------------------------------------------
-
-@misc.profiled_function
-def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
- """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
- """
- # Validate arguments.
- assert isinstance(x, torch.Tensor) and x.ndim == 4
- if f is None:
- f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
- assert f.dtype == torch.float32 and not f.requires_grad
- batch_size, num_channels, in_height, in_width = x.shape
- upx, upy = _parse_scaling(up)
- downx, downy = _parse_scaling(down)
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
-
- # Upsample by inserting zeros.
- x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
- x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
- x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
-
- # Pad or crop.
- x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
- x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
-
- # Setup filter.
- f = f * (gain ** (f.ndim / 2))
- f = f.to(x.dtype)
- if not flip_filter:
- f = f.flip(list(range(f.ndim)))
-
- # Convolve with the filter.
- f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
- if f.ndim == 4:
- x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
- else:
- x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
- x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
-
- # Downsample by throwing away pixels.
- x = x[:, :, ::downy, ::downx]
- return x
-
-#----------------------------------------------------------------------------
-
-_upfirdn2d_cuda_cache = dict()
-
-def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
- """Fast CUDA implementation of `upfirdn2d()` using custom ops.
- """
- # Parse arguments.
- upx, upy = _parse_scaling(up)
- downx, downy = _parse_scaling(down)
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
-
- # Lookup from cache.
- key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
- if key in _upfirdn2d_cuda_cache:
- return _upfirdn2d_cuda_cache[key]
-
- # Forward op.
- class Upfirdn2dCuda(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x, f): # pylint: disable=arguments-differ
- assert isinstance(x, torch.Tensor) and x.ndim == 4
- if f is None:
- f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
- y = x
- if f.ndim == 2:
- y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
- else:
- y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
- y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
- ctx.save_for_backward(f)
- ctx.x_shape = x.shape
- return y
-
- @staticmethod
- def backward(ctx, dy): # pylint: disable=arguments-differ
- f, = ctx.saved_tensors
- _, _, ih, iw = ctx.x_shape
- _, _, oh, ow = dy.shape
- fw, fh = _get_filter_size(f)
- p = [
- fw - padx0 - 1,
- iw * upx - ow * downx + padx0 - upx + 1,
- fh - pady0 - 1,
- ih * upy - oh * downy + pady0 - upy + 1,
- ]
- dx = None
- df = None
-
- if ctx.needs_input_grad[0]:
- dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
-
- assert not ctx.needs_input_grad[1]
- return dx, df
-
- # Add to cache.
- _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
- return Upfirdn2dCuda
-
-#----------------------------------------------------------------------------
-
-def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
- r"""Filter a batch of 2D images using the given 2D FIR filter.
-
- By default, the result is padded so that its shape matches the input.
- User-specified padding is applied on top of that, with negative values
- indicating cropping. Pixels outside the image are assumed to be zero.
-
- Args:
- x: Float32/float64/float16 input tensor of the shape
- `[batch_size, num_channels, in_height, in_width]`.
- f: Float32 FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- padding: Padding with respect to the output. Can be a single number or a
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- flip_filter: False = convolution, True = correlation (default: False).
- gain: Overall scaling factor for signal magnitude (default: 1).
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
- fw, fh = _get_filter_size(f)
- p = [
- padx0 + fw // 2,
- padx1 + (fw - 1) // 2,
- pady0 + fh // 2,
- pady1 + (fh - 1) // 2,
- ]
- return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
- r"""Upsample a batch of 2D images using the given 2D FIR filter.
-
- By default, the result is padded so that its shape is a multiple of the input.
- User-specified padding is applied on top of that, with negative values
- indicating cropping. Pixels outside the image are assumed to be zero.
-
- Args:
- x: Float32/float64/float16 input tensor of the shape
- `[batch_size, num_channels, in_height, in_width]`.
- f: Float32 FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- up: Integer upsampling factor. Can be a single int or a list/tuple
- `[x, y]` (default: 1).
- padding: Padding with respect to the output. Can be a single number or a
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- flip_filter: False = convolution, True = correlation (default: False).
- gain: Overall scaling factor for signal magnitude (default: 1).
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- upx, upy = _parse_scaling(up)
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
- fw, fh = _get_filter_size(f)
- p = [
- padx0 + (fw + upx - 1) // 2,
- padx1 + (fw - upx) // 2,
- pady0 + (fh + upy - 1) // 2,
- pady1 + (fh - upy) // 2,
- ]
- return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
-
-#----------------------------------------------------------------------------
-
-def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
- r"""Downsample a batch of 2D images using the given 2D FIR filter.
-
- By default, the result is padded so that its shape is a fraction of the input.
- User-specified padding is applied on top of that, with negative values
- indicating cropping. Pixels outside the image are assumed to be zero.
-
- Args:
- x: Float32/float64/float16 input tensor of the shape
- `[batch_size, num_channels, in_height, in_width]`.
- f: Float32 FIR filter of the shape
- `[filter_height, filter_width]` (non-separable),
- `[filter_taps]` (separable), or
- `None` (identity).
- down: Integer downsampling factor. Can be a single int or a list/tuple
- `[x, y]` (default: 1).
- padding: Padding with respect to the input. Can be a single number or a
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- flip_filter: False = convolution, True = correlation (default: False).
- gain: Overall scaling factor for signal magnitude (default: 1).
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- downx, downy = _parse_scaling(down)
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
- fw, fh = _get_filter_size(f)
- p = [
- padx0 + (fw - downx + 1) // 2,
- padx1 + (fw - downx) // 2,
- pady0 + (fh - downy + 1) // 2,
- pady1 + (fh - downy) // 2,
- ]
- return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
-
-#----------------------------------------------------------------------------
diff --git a/spaces/unilux/ASR_for_Luxembourgish/README.md b/spaces/unilux/ASR_for_Luxembourgish/README.md
deleted file mode 100644
index 3529b69bd8c080fa88134445c43e7fcf6607e279..0000000000000000000000000000000000000000
--- a/spaces/unilux/ASR_for_Luxembourgish/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ASR for Luxembourgish w\ Whisper & HF pipeline
-emoji: 🏃
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.14.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/AAA.Logo.2014.v4.10-DVT Keygen.md b/spaces/usbethFlerru/sovits-modelsV2/example/AAA.Logo.2014.v4.10-DVT Keygen.md
deleted file mode 100644
index 7ccf324d1f146500b535a330d455ce57c3c8e72e..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/AAA.Logo.2014.v4.10-DVT Keygen.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
Founded in 1902, AAA is the first and largest motoring and leisure travel organization in the world. As the not-for-profit, fully tax paying AAA, we are a part of the largest motoring and leisure travel organization in the world and a leader in safety and service.
Get your annual PAD screening benefits. AAA will cover the cost of AAA screening with no cost sharing for you and your dependents. You don't need to use your Part B premium to pay for this screening benefit.
-
These are some of the special considerations for the AAA screening process.
A person may be enrolled in Medicare for other parts of his or her life, such as Parts A and B, or other services, such as hearing or vision. However, Medicare does not cover the AAA screening as a standalone condition.
A Medicare beneficiary with AAA will need to check in for a physical every two years. A new physical will be needed after this initial screening (under 6 months of enrollment), or when the diagnosis is made or recommended.
Medicare beneficiaries should discuss with their physician if they have known risk factors for AAA, such as:
they are 65 years or older
they are African-American
they have a first degree relative with an AAA
they have a history of aortic aneurysm or dissection
they had a stroke
they have a joint injury, bursitis, or gout
-
AAA offers opportunity to our more than 50,000 employees around the world to transform where mobility meets people and creates value. We partner with individuals, governments and businesses to provide innovative mobility solutions to make communities safer and people more productive. Our membership and customers form the foundation of AAA and deliver the profits that support the AAA Foundation for Traffic Safety and member benefit programs.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Adegan Seks Awie Dan Erra Fazira Free Download Full !EXCLUSIVE!.md b/spaces/usbethFlerru/sovits-modelsV2/example/Adegan Seks Awie Dan Erra Fazira Free Download Full !EXCLUSIVE!.md
deleted file mode 100644
index 5eb1f470303c6a2ae65f5be968e6284bf6955112..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/Adegan Seks Awie Dan Erra Fazira Free Download Full !EXCLUSIVE!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Adegan Seks Awie Dan Erra Fazira Free Download Full
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Descargar Deep Fritz 14 Serial Number BEST.md b/spaces/usbethFlerru/sovits-modelsV2/example/Descargar Deep Fritz 14 Serial Number BEST.md
deleted file mode 100644
index f9b28413b0e875ad4ec5aa8f1f4aeb133489475d..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/Descargar Deep Fritz 14 Serial Number BEST.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-FRITZ 14 serial numbers, cracks and keygens are presented here. No registration. ... Pc - Play Chess Deep Fritz Gr key code generator · Fritz 5.32 ... 4d29de3e1b
-
-
-
diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/clearml.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/clearml.md
deleted file mode 100644
index 54b8bc6afd997b69a082e6084d9c21e87b88af58..0000000000000000000000000000000000000000
--- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/clearml.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-description: Improve your YOLOv5 model training with callbacks from ClearML. Learn about log debug samples, pre-training routines, validation and more.
-keywords: Ultralytics YOLO, callbacks, log plots, epoch monitoring, training end events
----
-
-## _log_debug_samples
----
-### ::: ultralytics.yolo.utils.callbacks.clearml._log_debug_samples
-