a(c,t))e[r]=c,e[s]=t,r=s;else break}}return n}function a(e,n){var t=e.sortIndex-n.sortIndex;return 0!==t?t:e.id-n.id}if("object"==typeof performance&&"function"==typeof performance.now){var u,o=performance;n.unstable_now=function(){return o.now()}}else{var i=Date,s=i.now();n.unstable_now=function(){return i.now()-s}}var c=[],f=[],d=1,p=null,m=3,h=!1,g=!1,v=!1,y="function"==typeof setTimeout?setTimeout:null,b="function"==typeof clearTimeout?clearTimeout:null,k="undefined"!=typeof setImmediate?setImmediate:null;function w(e){for(var n=r(f);null!==n;){if(null===n.callback)l(f);else if(n.startTime<=e)l(f),n.sortIndex=n.expirationTime,t(c,n);else break;n=r(f)}}function S(e){if(v=!1,w(e),!g){if(null!==r(c))g=!0,M(x);else{var n=r(f);null!==n&&F(S,n.startTime-e)}}}function x(e,t){g=!1,v&&(v=!1,b(_),_=-1),h=!0;var a=m;try{for(w(t),p=r(c);null!==p&&(!(p.expirationTime>t)||e&&!z());){var u=p.callback;if("function"==typeof u){p.callback=null,m=p.priorityLevel;var o=u(p.expirationTime<=t);t=n.unstable_now(),"function"==typeof o?p.callback=o:p===r(c)&&l(c),w(t)}else l(c);p=r(c)}if(null!==p)var i=!0;else{var s=r(f);null!==s&&F(S,s.startTime-t),i=!1}return i}finally{p=null,m=a,h=!1}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var E=!1,C=null,_=-1,P=5,N=-1;function z(){return!(n.unstable_now()-Ne||125u?(e.sortIndex=a,t(f,e),null===r(c)&&e===r(f)&&(v?(b(_),_=-1):v=!0,F(S,a-u))):(e.sortIndex=o,t(c,e),g||h||(g=!0,M(x))),e},n.unstable_shouldYield=z,n.unstable_wrapCallback=function(e){var n=m;return function(){var t=m;m=n;try{return e.apply(this,arguments)}finally{m=t}}}},63840:function(e,n,t){e.exports=t(60053)}}]);
\ No newline at end of file
diff --git a/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/ONNXVITS_modules.py b/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/ONNXVITS_modules.py
deleted file mode 100644
index 6cf676ce37c1eaf8428c4094e749f862182cb0c3..0000000000000000000000000000000000000000
--- a/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/ONNXVITS_modules.py
+++ /dev/null
@@ -1,390 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from ONNXVITS_transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/idosal/oai-proxy/src/keys.ts b/spaces/idosal/oai-proxy/src/keys.ts
deleted file mode 100644
index 2ccbc9f3f1d3fd0670afa4a41ecc33cb76b1c38d..0000000000000000000000000000000000000000
--- a/spaces/idosal/oai-proxy/src/keys.ts
+++ /dev/null
@@ -1,130 +0,0 @@
-/* Manages OpenAI API keys. Tracks usage, disables expired keys, and provides
-round-robin access to keys. Keys are stored in the OPENAI_KEY environment
-variable, either as a single key, or a base64-encoded JSON array of keys.*/
-import { logger } from "./logger";
-import crypto from "crypto";
-
-/** Represents a key stored in the OPENAI_KEY environment variable. */
-type KeySchema = {
- /** The OpenAI API key itself. */
- key: string;
- /** Whether this is a free trial key. These are prioritized over paid keys if they can fulfill the request. */
- isTrial?: boolean;
- /** Whether this key has been provisioned for GPT-4. */
- isGpt4?: boolean;
-};
-
-/** Runtime information about a key. */
-export type Key = KeySchema & {
- /** Whether this key is currently disabled. We set this if we get a 429 or 401 response from OpenAI. */
- isDisabled?: boolean;
- /** Threshold at which a warning email will be sent by OpenAI. */
- softLimit?: number;
- /** Threshold at which the key will be disabled because it has reached the user-defined limit. */
- hardLimit?: number;
- /** The maximum quota allocated to this key by OpenAI. */
- systemHardLimit?: number;
- /** The current usage of this key. */
- usage?: number;
- /** The number of prompts that have been sent with this key. */
- promptCount: number;
- /** The time at which this key was last used. */
- lastUsed: number;
- /** Key hash for displaying usage in the dashboard. */
- hash: string;
-};
-
-const keyPool: Key[] = [];
-
-function init() {
- const keyString = process.env.OPENAI_KEY;
- if (!keyString?.trim()) {
- throw new Error("OPENAI_KEY environment variable is not set");
- }
- let keyList: KeySchema[];
- try {
- const decoded = Buffer.from(keyString, "base64").toString();
- keyList = JSON.parse(decoded) as KeySchema[];
- } catch (err) {
- logger.info("OPENAI_KEY is not base64-encoded JSON, assuming bare key");
- // We don't actually know if bare keys are paid/GPT-4 so we assume they are
- keyList = [{ key: keyString, isTrial: false, isGpt4: true }];
- }
- for (const key of keyList) {
- const newKey = {
- ...key,
- isDisabled: false,
- softLimit: 0,
- hardLimit: 0,
- systemHardLimit: 0,
- usage: 0,
- lastUsed: 0,
- promptCount: 0,
- hash: crypto
- .createHash("sha256")
- .update(key.key)
- .digest("hex")
- .slice(0, 6),
- };
- keyPool.push(newKey);
-
- logger.info({ key: newKey.hash }, "Key added");
- }
- // TODO: check each key's usage upon startup.
-}
-
-function list() {
- return keyPool.map((key) => ({
- ...key,
- key: undefined,
- }));
-}
-
-function disable(key: Key) {
- const keyFromPool = keyPool.find((k) => k.key === key.key)!;
- if (keyFromPool.isDisabled) return;
- keyFromPool.isDisabled = true;
- logger.warn({ key: key.hash }, "Key disabled");
-}
-
-function anyAvailable() {
- return keyPool.some((key) => !key.isDisabled);
-}
-
-function get(model: string) {
- const needsGpt4Key = model.startsWith("gpt-4");
- const availableKeys = keyPool.filter(
- (key) => !key.isDisabled && (!needsGpt4Key || key.isGpt4)
- );
- if (availableKeys.length === 0) {
- let message = "No keys available. Please add more keys.";
- if (needsGpt4Key) {
- message =
- "No GPT-4 keys available. Please add more keys or use a non-GPT-4 model.";
- }
- logger.error(message);
- throw new Error(message);
- }
-
- // Prioritize trial keys
- const trialKeys = availableKeys.filter((key) => key.isTrial);
- if (trialKeys.length > 0) {
- logger.info({ key: trialKeys[0].hash }, "Using trial key");
- trialKeys[0].lastUsed = Date.now();
- return trialKeys[0];
- }
-
- // Otherwise, return the oldest key
- const oldestKey = availableKeys.sort((a, b) => a.lastUsed - b.lastUsed)[0];
- logger.info({ key: oldestKey.hash }, "Assigning key to request.");
- oldestKey.lastUsed = Date.now();
- return { ...oldestKey };
-}
-
-function incrementPrompt(keyHash?: string) {
- if (!keyHash) return;
- const key = keyPool.find((k) => k.hash === keyHash)!;
- key.promptCount++;
-}
-
-export const keys = { init, list, get, anyAvailable, disable, incrementPrompt };
diff --git a/spaces/ifey/chatdemo/app.py b/spaces/ifey/chatdemo/app.py
deleted file mode 100644
index 6477ced00ee2adc630415b5387f7a9132e9afc0e..0000000000000000000000000000000000000000
--- a/spaces/ifey/chatdemo/app.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import gradio as gr
-import os
-import time
-
-from UI.MainBlocks import MainBlocks
-
-# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
-
-if __name__ == "__main__":
- mainBlocks1 = MainBlocks("test")
diff --git a/spaces/inamXcontru/PoeticTTS/Adobe Premiere Pro CC 2018 V12.0.1.69 (x64) Crack Serial Key Keygen REPACK.md b/spaces/inamXcontru/PoeticTTS/Adobe Premiere Pro CC 2018 V12.0.1.69 (x64) Crack Serial Key Keygen REPACK.md
deleted file mode 100644
index e60f689cca6e991191b7473943d8939ac526faf0..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Adobe Premiere Pro CC 2018 V12.0.1.69 (x64) Crack Serial Key Keygen REPACK.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-Download adobe premiere cc 2018 for pc windows 8 ultimate 64 bit free download at www.softasm.com. Register now to download. Your Premiere Pro CC 2018 v12.0.1.69 (x64) [Crack Serial Keygen] Keygen Unlocked For You. No Microsoft Anywhere. And the.
-Adobe Premiere Pro CC 2018 v12.0.1.69 (x64) Adobe Premiere Pro CC 2018 (x64) Serial Keygen. Adobe Premiere Pro CC 2018 v12.0.1.69 (x64) Adobe Premiere Pro CC 2018 (x64) Serial Key. Available as a standalone or as a part of Adobe Creative Suite. Download Adobe Premiere Pro CC 2018 v12.0.1.69 (x64) Download Adobe Premiere Pro CC 2018 v12.0.1.69 (x64).
-Adobe Premiere Pro CC 2018 v12.0.1.69 (x64) Crack Serial Key keygen
Download File … https://gohhs.com/2uz4Vi
-Download link for Adobe Premiere Pro CC 2018 x64 v12.0.1 serial key. Full Cracked Adobe Premiere Pro CC 2018 v12.0.1.69. Buy Adobe Premiere Pro CC 2018 x64 v12.0.1.69 [Crack Serial Key]. Anil Kumar on Adobe Premiere Pro CC 2018 Serial Number. Free download of Adobe Premiere Pro CC 2018 v12.0.1.69 full cracked with serial number. Download Adobe Premiere Pro CC 2018 (x64) Serial Keygen.
-You can download the Adobe Premiere Pro CC 2018 v12.0.1.69 (x64) Serial Key [Crack Full Version] For.Best quality Adobe Premiere Pro CC 2018 v12.0.1.69 Full. by [Anil Kumar] [LinkedIn] [Skype] [Google+] [Facebook] [Twitter] [YouTube] [Tumblr]. Step by step [Adobe Premiere Pro CC 2018 x64] - Duration: 10:54. Adobe Premiere Pro CC 2018 Serial Number. [Adobe Premiere Pro CC 2018] [30 May 2020]. By Grace (KO5). No (4).
-http://www.haswap.com/emoticons/smile.png, How do I find serial number / activation code for Adobe Premiere Pro CS5. Premiere Pro CS5 serial number or keygen is available here at our site. We have the largest serial numbers data base.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/innovatorved/whisper.api/app/tests/test_core/test_database.py b/spaces/innovatorved/whisper.api/app/tests/test_core/test_database.py
deleted file mode 100644
index 9cc2019fc8c10a3a6ddc24f19f0e27ae138bb2ad..0000000000000000000000000000000000000000
--- a/spaces/innovatorved/whisper.api/app/tests/test_core/test_database.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from sqlalchemy.orm import Session
-from app.core.database import SessionLocal
-from app.core.models.User import UserController
-
-from app.tests.utils.utils import fake_user_details
-
-
-def test_create_user(db: Session = SessionLocal()):
- test_user_details = fake_user_details()
- USER = UserController(db)
- USER.create(
- test_user_details["username"],
- test_user_details["email"],
- test_user_details["password"],
- )
- data = USER.details()
- assert data.id is not None
- assert data.email == test_user_details["email"]
- assert data.is_active is True
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/4g-systems-xsbox-go-firmware-update.md b/spaces/inplisQlawa/anything-midjourney-v4-1/4g-systems-xsbox-go-firmware-update.md
deleted file mode 100644
index a9eb3398b55a7851c7779dbdf0a89122228bc58e..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/4g-systems-xsbox-go-firmware-update.md
+++ /dev/null
@@ -1,10 +0,0 @@
-4g-systems-xsbox-go-firmware-update
Download ✪✪✪ https://urlin.us/2uEwPS
-
-4g-systems-xsbox-go-firmware-update FIXED. DOWNLOAD: xbox system software update, how to update xbox one system software, . Xbox One software update and how to.
-The system software update will be released within a few days or weeks.
-Instructions for Notebook Dns here.
-Download System software update for Xbox 360 - http: //www.
-To update your Xbox 360 software using a wireless device, follow the instructions below: Make sure the wireless features are turned on on your Xbox One device and Xbox 360 console. 8a78ff9644
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Ajab-Prem-Ki-Ghazab-Kahani-Full-Movie-Torrent-FULL.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Ajab-Prem-Ki-Ghazab-Kahani-Full-Movie-Torrent-FULL.md
deleted file mode 100644
index 4386348468243d239e911e05b6eef59abe6b1eae..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Ajab-Prem-Ki-Ghazab-Kahani-Full-Movie-Torrent-FULL.md
+++ /dev/null
@@ -1,86 +0,0 @@
-## Ajab Prem Ki Ghazab Kahani Full Movie Torrent
-
-
-
-
-
-
-
-
-
-**CLICK HERE >>>>> [https://urlcod.com/2txvLT](https://urlcod.com/2txvLT)**
-
-
-
-
-
-
-
-
-
-
-
- Here is what I came up with:
-
-# Ajab Prem Ki Ghazab Kahani Full Movie Torrent: How to Download and Watch Online
-
-
-
-Ajab Prem Ki Ghazab Kahani is a 2009 Bollywood romantic comedy film starring Ranbir Kapoor and Katrina Kaif. The film follows the hilarious adventures of Prem, a free-spirited Hindu boy who falls in love with Jenny, a Christian girl. However, when he discovers that she is in love with someone else, he decides to help her elope with her lover, unaware of the chaos that will ensue.
-
-
-
-If you are looking for a way to download and watch Ajab Prem Ki Ghazab Kahani full movie online, you might be interested in using a torrent site. Torrent sites are platforms that allow users to share and download files, such as movies, music, games, etc., using a peer-to-peer network. However, torrenting is not legal in many countries and can expose you to various risks, such as malware, viruses, copyright infringement, etc.
-
-
-
-Therefore, before you use any torrent site, make sure you are aware of the laws and regulations in your country and use a reliable VPN service to protect your privacy and security online. A VPN (virtual private network) is a software that encrypts your internet traffic and masks your IP address, making it harder for anyone to track or monitor your online activities.
-
-
-
-One of the torrent sites that you can use to download Ajab Prem Ki Ghazab Kahani full movie is Archive.org[^1^] [^2^]. Archive.org is a non-profit digital library that offers free access to millions of books, movies, music, software, etc. You can find two versions of Ajab Prem Ki Ghazab Kahani on Archive.org: one in 720p resolution[^1^] and one in HEVC format[^2^]. Both versions have English subtitles and are available for free download and streaming.
-
-
-
-To download Ajab Prem Ki Ghazab Kahani full movie from Archive.org, follow these steps:
-
-
-
-1. Go to Archive.org and search for "Ajab Prem Ki Ghazab Kahani" in the search bar.
-
-2. Select the version that you want to download: 720p or HEVC.
-
-3. On the movie page, click on the "TORRENT" link under the "DOWNLOAD OPTIONS" section.
-
-4. A torrent file will be downloaded to your device. Open it with a torrent client, such as BitTorrent or uTorrent.
-
-5. Select the destination folder where you want to save the movie file and start the download.
-
-6. Once the download is complete, you can watch Ajab Prem Ki Ghazab Kahani full movie on your device using any media player.
-
-
-
-Alternatively, you can also watch Ajab Prem Ki Ghazab Kahani full movie online on YouTube[^3^]. YouTube is a popular video-sharing platform that offers a variety of content, including movies, TV shows, music videos, etc. You can find Ajab Prem Ki Ghazab Kahani on YouTube by searching for its title or by following this link: https://www.youtube.com/watch?v=dbTkItiju1w
-
-
-
-However, keep in mind that YouTube may not have the best quality or subtitles for Ajab Prem Ki Ghazab Kahani. Moreover, YouTube may remove or block the movie at any time due to copyright issues. Therefore, if you want to watch Ajab Prem Ki Ghazab Kahani full movie online with better quality and subtitles, you might want to consider using a streaming service that offers legal access to Bollywood movies.
-
-
-
-Some of the streaming services that you can use to watch Ajab Prem Ki Ghazab Kahani full movie online are:
-
-
-
-- Netflix: Netflix is a global streaming service that offers a wide range of content, including movies, TV shows, documentaries, etc. You can watch Ajab Prem Ki Ghazab Kahani on Netflix if you have a subscription and if it is available in your region.
-
-- Amazon Prime Video: Amazon Prime Video is another streaming service that offers a variety of content, including movies, TV shows, originals, etc. You can watch Ajab 1b8d091108
-
-
-
-
-
-
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Gta San Andreas Bosna Mod For Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Gta San Andreas Bosna Mod For Download.md
deleted file mode 100644
index 528557ab9d0a3ce5fa524df696be0b443f63c41d..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Gta San Andreas Bosna Mod For Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-gta san andreas bosna mod for download
Download File 🗸 https://urlin.us/2uEyyG
-
-Download Video View Source & Comments. The police training system in Serbia ... Serbian Police Helicopter for GTA San Andreas. A Belgrade war crimes court ... 1fdad05405
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Music Speed Changer Apk Mod Unlock All.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Music Speed Changer Apk Mod Unlock All.md
deleted file mode 100644
index 408aa144084c4e021aaa6c328545dcc4236ae050..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Music Speed Changer Apk Mod Unlock All.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Music Speed Changer Apk Mod Unlock All
Download File ✫✫✫ https://urlin.us/2uEwij
-
-Change Android app icon, name and version. ... All applications for Android phones are distributed as APK Files. ... APK Editor is a powerful tool that can edit/hack apk files to do lots of things for fun. ... Edit clips, apply filters and add music; Flip, rotate and adjust video speed; Enhance pictures and create photo collages;Â ... 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat XI Pro 11.0.15 Multilingual Incl Patch [SadeemPC].zip LINK.md b/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat XI Pro 11.0.15 Multilingual Incl Patch [SadeemPC].zip LINK.md
deleted file mode 100644
index 1cb5cc4150f4d66addef5e4e77084d3a4709bf02..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat XI Pro 11.0.15 Multilingual Incl Patch [SadeemPC].zip LINK.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Adobe Acrobat XI Pro 11.0.15 Multilingual Incl Patch [SadeemPC].zip
Download ———>>> https://tiurll.com/2uCkGc
-
-... incl patch, adobe acrobat xi pro 11.0.15 multilingual incl patch, adobe acrobat xi pro 11.0.15 multilingual incl patch sadeempc .zip, ... 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Descargar Hysys 72 Gratis.md b/spaces/inreVtussa/clothingai/Examples/Descargar Hysys 72 Gratis.md
deleted file mode 100644
index 7c72e7c7d2430849ba4a8cfad7aa41b949250117..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Descargar Hysys 72 Gratis.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Descargar Hysys 72 Gratis
DOWNLOAD ->>> https://tiurll.com/2uCk8e
-
-How to install aspen 72 windows eight/seven 64/32 bit. Instalacion de ... Descargar e instalar aspen hysys 8.0 windows 7, 8, 8.1 y 10 para 32 bit o 64 bit. Hysys 3.2 tutorial 7.1 ... Free download hysys 3.2 dan cara menginstallnya update v. 8.4 . 4d29de3e1b
-
-
-
diff --git a/spaces/jbilcke-hf/MusicGen/audiocraft/data/audio_utils.py b/spaces/jbilcke-hf/MusicGen/audiocraft/data/audio_utils.py
deleted file mode 100644
index 76d4bc2a33ce722d879db2af33cd1336bd6b1fb3..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/MusicGen/audiocraft/data/audio_utils.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import sys
-import typing as tp
-
-import julius
-import torch
-import torchaudio
-
-
-def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor:
- """Convert audio to the given number of channels.
-
- Args:
- wav (torch.Tensor): Audio wave of shape [B, C, T].
- channels (int): Expected number of channels as output.
- Returns:
- torch.Tensor: Downmixed or unchanged audio wave [B, C, T].
- """
- *shape, src_channels, length = wav.shape
- if src_channels == channels:
- pass
- elif channels == 1:
- # Case 1:
- # The caller asked 1-channel audio, and the stream has multiple
- # channels, downmix all channels.
- wav = wav.mean(dim=-2, keepdim=True)
- elif src_channels == 1:
- # Case 2:
- # The caller asked for multiple channels, but the input file has
- # a single channel, replicate the audio over all channels.
- wav = wav.expand(*shape, channels, length)
- elif src_channels >= channels:
- # Case 3:
- # The caller asked for multiple channels, and the input file has
- # more channels than requested. In that case return the first channels.
- wav = wav[..., :channels, :]
- else:
- # Case 4: What is a reasonable choice here?
- raise ValueError('The audio file has less channels than requested but is not mono.')
- return wav
-
-
-def convert_audio(wav: torch.Tensor, from_rate: float,
- to_rate: float, to_channels: int) -> torch.Tensor:
- """Convert audio to new sample rate and number of audio channels.
- """
- wav = julius.resample_frac(wav, int(from_rate), int(to_rate))
- wav = convert_audio_channels(wav, to_channels)
- return wav
-
-
-def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14,
- loudness_compressor: bool = False, energy_floor: float = 2e-3):
- """Normalize an input signal to a user loudness in dB LKFS.
- Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
-
- Args:
- wav (torch.Tensor): Input multichannel audio data.
- sample_rate (int): Sample rate.
- loudness_headroom_db (float): Target loudness of the output in dB LUFS.
- loudness_compressor (bool): Uses tanh for soft clipping.
- energy_floor (float): anything below that RMS level will not be rescaled.
- Returns:
- output (torch.Tensor): Loudness normalized output data.
- """
- energy = wav.pow(2).mean().sqrt().item()
- if energy < energy_floor:
- return wav
- transform = torchaudio.transforms.Loudness(sample_rate)
- input_loudness_db = transform(wav).item()
- # calculate the gain needed to scale to the desired loudness level
- delta_loudness = -loudness_headroom_db - input_loudness_db
- gain = 10.0 ** (delta_loudness / 20.0)
- output = gain * wav
- if loudness_compressor:
- output = torch.tanh(output)
- assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt())
- return output
-
-
-def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None:
- """Utility function to clip the audio with logging if specified."""
- max_scale = wav.abs().max()
- if log_clipping and max_scale > 1:
- clamp_prob = (wav.abs() > 1).float().mean().item()
- print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):",
- clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr)
- wav.clamp_(-1, 1)
-
-
-def normalize_audio(wav: torch.Tensor, normalize: bool = True,
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
- loudness_compressor: bool = False, log_clipping: bool = False,
- sample_rate: tp.Optional[int] = None,
- stem_name: tp.Optional[str] = None) -> torch.Tensor:
- """Normalize the audio according to the prescribed strategy (see after).
-
- Args:
- wav (torch.Tensor): Audio data.
- normalize (bool): if `True` (default), normalizes according to the prescribed
- strategy (see after). If `False`, the strategy is only used in case clipping
- would happen.
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
- with extra headroom to avoid clipping. 'clip' just clips.
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
- than the `peak_clip` one to avoid further clipping.
- loudness_headroom_db (float): Target loudness for loudness normalization.
- loudness_compressor (bool): If True, uses tanh based soft clipping.
- log_clipping (bool): If True, basic logging on stderr when clipping still
- occurs despite strategy (only for 'rms').
- sample_rate (int): Sample rate for the audio data (required for loudness).
- stem_name (Optional[str]): Stem name for clipping logging.
- Returns:
- torch.Tensor: Normalized audio.
- """
- scale_peak = 10 ** (-peak_clip_headroom_db / 20)
- scale_rms = 10 ** (-rms_headroom_db / 20)
- if strategy == 'peak':
- rescaling = (scale_peak / wav.abs().max())
- if normalize or rescaling < 1:
- wav = wav * rescaling
- elif strategy == 'clip':
- wav = wav.clamp(-scale_peak, scale_peak)
- elif strategy == 'rms':
- mono = wav.mean(dim=0)
- rescaling = scale_rms / mono.pow(2).mean().sqrt()
- if normalize or rescaling < 1:
- wav = wav * rescaling
- _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
- elif strategy == 'loudness':
- assert sample_rate is not None, "Loudness normalization requires sample rate."
- wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor)
- _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
- else:
- assert wav.abs().max() < 1
- assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'"
- return wav
-
-
-def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
- """Convert audio to float 32 bits PCM format.
- """
- if wav.dtype.is_floating_point:
- return wav
- else:
- assert wav.dtype == torch.int16
- return wav.float() / 2**15
-
-
-def i16_pcm(wav: torch.Tensor) -> torch.Tensor:
- """Convert audio to int 16 bits PCM format.
-
- ..Warning:: There exist many formula for doing this convertion. None are perfect
- due to the asymetry of the int16 range. One either have possible clipping, DC offset,
- or inconsistancies with f32_pcm. If the given wav doesn't have enough headroom,
- it is possible that `i16_pcm(f32_pcm)) != Identity`.
- """
- if wav.dtype.is_floating_point:
- assert wav.abs().max() <= 1
- candidate = (wav * 2 ** 15).round()
- if candidate.max() >= 2 ** 15: # clipping would occur
- candidate = (wav * (2 ** 15 - 1)).round()
- return candidate.short()
- else:
- assert wav.dtype == torch.int16
- return wav
diff --git a/spaces/jbilcke-hf/MusicGen/audiocraft/modules/lstm.py b/spaces/jbilcke-hf/MusicGen/audiocraft/modules/lstm.py
deleted file mode 100644
index c0866175950c1ca4f6cca98649525e6481853bba..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/MusicGen/audiocraft/modules/lstm.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch import nn
-
-
-class StreamableLSTM(nn.Module):
- """LSTM without worrying about the hidden state, nor the layout of the data.
- Expects input as convolutional layout.
- """
- def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
- super().__init__()
- self.skip = skip
- self.lstm = nn.LSTM(dimension, dimension, num_layers)
-
- def forward(self, x):
- x = x.permute(2, 0, 1)
- y, _ = self.lstm(x)
- if self.skip:
- y = y + x
- y = y.permute(1, 2, 0)
- return y
diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/app/engine/caption.ts b/spaces/jbilcke-hf/ai-comic-factory/src/app/engine/caption.ts
deleted file mode 100644
index 11e8f78f1de68e7816d1e4e0fec1752c2304cbce..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/ai-comic-factory/src/app/engine/caption.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-"use server"
-
-import { ImageAnalysisRequest, ImageAnalysisResponse } from "@/types"
-
-const apiUrl = `${process.env.RENDERING_VIDEOCHAIN_API_URL || ""}`
-
-export async function see({
- prompt,
- imageBase64
-}: {
- prompt: string
- imageBase64: string
-}): Promise {
- if (!prompt) {
- console.error(`cannot call the API without an image, aborting..`)
- throw new Error(`cannot call the API without an image, aborting..`)
- }
-
- try {
- const request = {
- prompt,
- image: imageBase64
-
- } as ImageAnalysisRequest
-
- console.log(`calling ${apiUrl}/analyze called with: `, {
- prompt: request.prompt,
- image: request.image.slice(0, 20)
- })
-
- const res = await fetch(`${apiUrl}/analyze`, {
- method: "POST",
- headers: {
- Accept: "application/json",
- "Content-Type": "application/json",
- // Authorization: `Bearer ${videochainApi}`,
- },
- body: JSON.stringify(request),
- cache: 'no-store',
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
- // next: { revalidate: 1 }
- })
-
- if (res.status !== 200) {
- throw new Error('Failed to fetch data')
- }
-
- const response = (await res.json()) as ImageAnalysisResponse
- return response.result
- } catch (err) {
- console.error(err)
- return ""
- }
-}
diff --git a/spaces/jdczlx/ChatGPT-chuanhu/modules/llama_func.py b/spaces/jdczlx/ChatGPT-chuanhu/modules/llama_func.py
deleted file mode 100644
index 594e2ab57eb116859c9d63355a8504404db350c0..0000000000000000000000000000000000000000
--- a/spaces/jdczlx/ChatGPT-chuanhu/modules/llama_func.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import os
-import logging
-
-from llama_index import GPTSimpleVectorIndex
-from llama_index import download_loader
-from llama_index import (
- Document,
- LLMPredictor,
- PromptHelper,
- QuestionAnswerPrompt,
- RefinePrompt,
-)
-from langchain.llms import OpenAI
-import colorama
-
-from modules.presets import *
-from modules.utils import *
-
-def get_index_name(file_src):
- index_name = []
- for file in file_src:
- index_name.append(os.path.basename(file.name))
- index_name = sorted(index_name)
- index_name = "".join(index_name)
- index_name = sha1sum(index_name)
- return index_name
-
-def get_documents(file_src):
- documents = []
- logging.debug("Loading documents...")
- logging.debug(f"file_src: {file_src}")
- for file in file_src:
- logging.info(f"loading file: {file.name}")
- if os.path.splitext(file.name)[1] == ".pdf":
- logging.debug("Loading PDF...")
- CJKPDFReader = download_loader("CJKPDFReader")
- loader = CJKPDFReader()
- text_raw = loader.load_data(file=file.name)[0].text
- elif os.path.splitext(file.name)[1] == ".docx":
- logging.debug("Loading DOCX...")
- DocxReader = download_loader("DocxReader")
- loader = DocxReader()
- text_raw = loader.load_data(file=file.name)[0].text
- elif os.path.splitext(file.name)[1] == ".epub":
- logging.debug("Loading EPUB...")
- EpubReader = download_loader("EpubReader")
- loader = EpubReader()
- text_raw = loader.load_data(file=file.name)[0].text
- else:
- logging.debug("Loading text file...")
- with open(file.name, "r", encoding="utf-8") as f:
- text_raw = f.read()
- text = add_space(text_raw)
- documents += [Document(text)]
- return documents
-
-
-def construct_index(
- api_key,
- file_src,
- max_input_size=4096,
- num_outputs=1,
- max_chunk_overlap=20,
- chunk_size_limit=600,
- embedding_limit=None,
- separator=" ",
- num_children=10,
- max_keywords_per_chunk=10,
-):
- os.environ["OPENAI_API_KEY"] = api_key
- chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
- embedding_limit = None if embedding_limit == 0 else embedding_limit
- separator = " " if separator == "" else separator
-
- llm_predictor = LLMPredictor(
- llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
- )
- prompt_helper = PromptHelper(
- max_input_size,
- num_outputs,
- max_chunk_overlap,
- embedding_limit,
- chunk_size_limit,
- separator=separator,
- )
- index_name = get_index_name(file_src)
- if os.path.exists(f"./index/{index_name}.json"):
- logging.info("找到了缓存的索引文件,加载中……")
- return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
- else:
- try:
- documents = get_documents(file_src)
- logging.debug("构建索引中……")
- index = GPTSimpleVectorIndex(
- documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
- )
- os.makedirs("./index", exist_ok=True)
- index.save_to_disk(f"./index/{index_name}.json")
- return index
- except Exception as e:
- print(e)
- return None
-
-
-def chat_ai(
- api_key,
- index,
- question,
- context,
- chatbot,
- reply_language,
-):
- os.environ["OPENAI_API_KEY"] = api_key
-
- logging.info(f"Question: {question}")
-
- response, chatbot_display, status_text = ask_ai(
- api_key,
- index,
- question,
- replace_today(PROMPT_TEMPLATE),
- REFINE_TEMPLATE,
- SIM_K,
- INDEX_QUERY_TEMPRATURE,
- context,
- reply_language,
- )
- if response is None:
- status_text = "查询失败,请换个问法试试"
- return context, chatbot
- response = response
-
- context.append({"role": "user", "content": question})
- context.append({"role": "assistant", "content": response})
- chatbot.append((question, chatbot_display))
-
- os.environ["OPENAI_API_KEY"] = ""
- return context, chatbot, status_text
-
-
-def ask_ai(
- api_key,
- index,
- question,
- prompt_tmpl,
- refine_tmpl,
- sim_k=1,
- temprature=0,
- prefix_messages=[],
- reply_language="中文",
-):
- os.environ["OPENAI_API_KEY"] = api_key
-
- logging.debug("Index file found")
- logging.debug("Querying index...")
- llm_predictor = LLMPredictor(
- llm=OpenAI(
- temperature=temprature,
- model_name="gpt-3.5-turbo-0301",
- prefix_messages=prefix_messages,
- )
- )
-
- response = None # Initialize response variable to avoid UnboundLocalError
- qa_prompt = QuestionAnswerPrompt(prompt_tmpl.replace("{reply_language}", reply_language))
- rf_prompt = RefinePrompt(refine_tmpl.replace("{reply_language}", reply_language))
- response = index.query(
- question,
- llm_predictor=llm_predictor,
- similarity_top_k=sim_k,
- text_qa_template=qa_prompt,
- refine_template=rf_prompt,
- response_mode="compact",
- )
-
- if response is not None:
- logging.info(f"Response: {response}")
- ret_text = response.response
- nodes = []
- for index, node in enumerate(response.source_nodes):
- brief = node.source_text[:25].replace("\n", "")
- nodes.append(
- f"[{index + 1}]\t{brief}...
{node.source_text}
"
- )
- new_response = ret_text + "\n----------\n" + "\n\n".join(nodes)
- logging.info(
- f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}"
- )
- os.environ["OPENAI_API_KEY"] = ""
- return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens"
- else:
- logging.warning("No response found, returning None")
- os.environ["OPENAI_API_KEY"] = ""
- return None
-
-
-def add_space(text):
- punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "}
- for cn_punc, en_punc in punctuations.items():
- text = text.replace(cn_punc, en_punc)
- return text
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/abc/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/abc/__init__.py
deleted file mode 100644
index 72c34e544e1634e4f42c005506bac9b61ab095f5..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/abc/__init__.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from __future__ import annotations
-
-__all__ = (
- "AsyncResource",
- "IPAddressType",
- "IPSockAddrType",
- "SocketAttribute",
- "SocketStream",
- "SocketListener",
- "UDPSocket",
- "UNIXSocketStream",
- "UDPPacketType",
- "ConnectedUDPSocket",
- "UnreliableObjectReceiveStream",
- "UnreliableObjectSendStream",
- "UnreliableObjectStream",
- "ObjectReceiveStream",
- "ObjectSendStream",
- "ObjectStream",
- "ByteReceiveStream",
- "ByteSendStream",
- "ByteStream",
- "AnyUnreliableByteReceiveStream",
- "AnyUnreliableByteSendStream",
- "AnyUnreliableByteStream",
- "AnyByteReceiveStream",
- "AnyByteSendStream",
- "AnyByteStream",
- "Listener",
- "Process",
- "Event",
- "Condition",
- "Lock",
- "Semaphore",
- "CapacityLimiter",
- "CancelScope",
- "TaskGroup",
- "TaskStatus",
- "TestRunner",
- "BlockingPortal",
-)
-
-from typing import Any
-
-from ._resources import AsyncResource
-from ._sockets import (
- ConnectedUDPSocket,
- IPAddressType,
- IPSockAddrType,
- SocketAttribute,
- SocketListener,
- SocketStream,
- UDPPacketType,
- UDPSocket,
- UNIXSocketStream,
-)
-from ._streams import (
- AnyByteReceiveStream,
- AnyByteSendStream,
- AnyByteStream,
- AnyUnreliableByteReceiveStream,
- AnyUnreliableByteSendStream,
- AnyUnreliableByteStream,
- ByteReceiveStream,
- ByteSendStream,
- ByteStream,
- Listener,
- ObjectReceiveStream,
- ObjectSendStream,
- ObjectStream,
- UnreliableObjectReceiveStream,
- UnreliableObjectSendStream,
- UnreliableObjectStream,
-)
-from ._subprocesses import Process
-from ._tasks import TaskGroup, TaskStatus
-from ._testing import TestRunner
-
-# Re-exported here, for backwards compatibility
-# isort: off
-from .._core._synchronization import CapacityLimiter, Condition, Event, Lock, Semaphore
-from .._core._tasks import CancelScope
-from ..from_thread import BlockingPortal
-
-# Re-export imports so they look like they live directly in this package
-key: str
-value: Any
-for key, value in list(locals().items()):
- if getattr(value, "__module__", "").startswith("anyio.abc."):
- value.__module__ = __name__
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_I_N_G_.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_I_N_G_.py
deleted file mode 100644
index 7420da7e5dcec81b835ab0e8e2c775dbce860cbd..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_I_N_G_.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
-from . import DefaultTable
-
-SINGFormat = """
- > # big endian
- tableVersionMajor: H
- tableVersionMinor: H
- glyphletVersion: H
- permissions: h
- mainGID: H
- unitsPerEm: H
- vertAdvance: h
- vertOrigin: h
- uniqueName: 28s
- METAMD5: 16s
- nameLength: 1s
-"""
-# baseGlyphName is a byte string which follows the record above.
-
-
-class table_S_I_N_G_(DefaultTable.DefaultTable):
-
- dependencies = []
-
- def decompile(self, data, ttFont):
- dummy, rest = sstruct.unpack2(SINGFormat, data, self)
- self.uniqueName = self.decompileUniqueName(self.uniqueName)
- self.nameLength = byteord(self.nameLength)
- assert len(rest) == self.nameLength
- self.baseGlyphName = tostr(rest)
-
- rawMETAMD5 = self.METAMD5
- self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
- for char in rawMETAMD5[1:]:
- self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
- self.METAMD5 = self.METAMD5 + "]"
-
- def decompileUniqueName(self, data):
- name = ""
- for char in data:
- val = byteord(char)
- if val == 0:
- break
- if (val > 31) or (val < 128):
- name += chr(val)
- else:
- octString = oct(val)
- if len(octString) > 3:
- octString = octString[1:] # chop off that leading zero.
- elif len(octString) < 3:
- octString.zfill(3)
- name += "\\" + octString
- return name
-
- def compile(self, ttFont):
- d = self.__dict__.copy()
- d["nameLength"] = bytechr(len(self.baseGlyphName))
- d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
- METAMD5List = eval(self.METAMD5)
- d["METAMD5"] = b""
- for val in METAMD5List:
- d["METAMD5"] += bytechr(val)
- assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table"
- data = sstruct.pack(SINGFormat, d)
- data = data + tobytes(self.baseGlyphName)
- return data
-
- def compilecompileUniqueName(self, name, length):
- nameLen = len(name)
- if length <= nameLen:
- name = name[: length - 1] + "\000"
- else:
- name += (nameLen - length) * "\000"
- return name
-
- def toXML(self, writer, ttFont):
- writer.comment("Most of this table will be recalculated by the compiler")
- writer.newline()
- formatstring, names, fixes = sstruct.getformat(SINGFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- writer.simpletag("baseGlyphName", value=self.baseGlyphName)
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- value = attrs["value"]
- if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
- setattr(self, name, value)
- else:
- setattr(self, name, safeEval(value))
diff --git a/spaces/johiny/gsdf-Counterfeit-V2.5/README.md b/spaces/johiny/gsdf-Counterfeit-V2.5/README.md
deleted file mode 100644
index 0f5a2e615f9f1d039eff09d2427e0742b42798b2..0000000000000000000000000000000000000000
--- a/spaces/johiny/gsdf-Counterfeit-V2.5/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Gsdf Counterfeit V2.5
-emoji: 🏢
-colorFrom: pink
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/johnyang/ChatPaper111/run.sh b/spaces/johnyang/ChatPaper111/run.sh
deleted file mode 100644
index 7b87caa9d2828fe7b1016516e8e3d00f949e1c1c..0000000000000000000000000000000000000000
--- a/spaces/johnyang/ChatPaper111/run.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-cd /app/grobid-0.6.2
-./gradlew run &
-cd /app/
-nohup python backend.py &
-streamlit run frontend.py --server.address 0.0.0.0 --server.port 7860 --server.enableCORS true --server.enableXsrfProtection false
\ No newline at end of file
diff --git a/spaces/jone/Music_Source_Separation/scripts/5_inference/musdb18/inference.sh b/spaces/jone/Music_Source_Separation/scripts/5_inference/musdb18/inference.sh
deleted file mode 100644
index 21ecd5a30731343ee9b74e181ef4602b528a87d4..0000000000000000000000000000000000000000
--- a/spaces/jone/Music_Source_Separation/scripts/5_inference/musdb18/inference.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-WORKSPACE=${1:-"./workspaces/bytesep"} # The first argument is workspace directory.
-
-echo "WORKSPACE=${WORKSPACE}"
-
-# Users can modify the following config file.
-TRAIN_CONFIG_YAML="scripts/4_train/musdb18/configs/vocals-accompaniment,unet.yaml"
-
-CHECKPOINT_PATH="${WORKSPACE}/checkpoints/musdb18/train/config=vocals-accompaniment,unet,gpus=1/step=300000.pth"
-
-# Inference
-CUDA_VISIBLE_DEVICES=0 python3 bytesep/inference.py \
- --config_yaml=$TRAIN_CONFIG_YAML \
- --checkpoint_path=$CHECKPOINT_PATH \
- --audio_path="resources/vocals_accompaniment_10s.mp3" \
- --output_path="sep_results/vocals_accompaniment_10s_sep_vocals.mp3"
-
\ No newline at end of file
diff --git a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/toaster.tsx b/spaces/jordonpeter01/ai-comic-factory/src/components/ui/toaster.tsx
deleted file mode 100644
index e2233852a74d4db61ea668a5d43f9681038807cc..0000000000000000000000000000000000000000
--- a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/toaster.tsx
+++ /dev/null
@@ -1,35 +0,0 @@
-"use client"
-
-import {
- Toast,
- ToastClose,
- ToastDescription,
- ToastProvider,
- ToastTitle,
- ToastViewport,
-} from "@/components/ui/toast"
-import { useToast } from "@/components/ui/use-toast"
-
-export function Toaster() {
- const { toasts } = useToast()
-
- return (
-
- {toasts.map(function ({ id, title, description, action, ...props }) {
- return (
-
-
- {title && {title}}
- {description && (
- {description}
- )}
-
- {action}
-
-
- )
- })}
-
-
- )
-}
diff --git "a/spaces/joshen/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py" "b/spaces/joshen/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py"
deleted file mode 100644
index 9579800f2cefa684e38ee74b1cce4ee7db7a11fe..0000000000000000000000000000000000000000
--- "a/spaces/joshen/gpt-academic/crazy_functions/\347\224\237\346\210\220\345\207\275\346\225\260\346\263\250\351\207\212.py"
+++ /dev/null
@@ -1,57 +0,0 @@
-from predict import predict_no_ui
-from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
-fast_debug = False
-
-
-def 生成函数注释(file_manifest, project_folder, top_p, api_key, temperature, chatbot, history, systemPromptTxt):
- import time, glob, os
- print('begin analysis on:', file_manifest)
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8') as f:
- file_content = f.read()
-
- i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
- i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
- print('[1] yield chatbot, history')
- yield chatbot, history, '正常'
-
- if not fast_debug:
- msg = '正常'
- # ** gpt request **
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, api_key, temperature, history=[]) # 带超时倒计时
-
- print('[2] end gpt req')
- chatbot[-1] = (i_say_show_user, gpt_say)
- history.append(i_say_show_user); history.append(gpt_say)
- print('[3] yield chatbot, history')
- yield chatbot, history, msg
- print('[4] next')
- if not fast_debug: time.sleep(2)
-
- if not fast_debug:
- res = write_results_to_file(history)
- chatbot.append(("完成了吗?", res))
- yield chatbot, history, msg
-
-
-
-@CatchException
-def 批量生成函数注释(txt, top_p, api_key, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield chatbot, history, '正常'
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
- [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
-
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield chatbot, history, '正常'
- return
- yield from 生成函数注释(file_manifest, project_folder, top_p, api_key, temperature, chatbot, history, systemPromptTxt)
diff --git a/spaces/jpfearnworks/ai_agents/server.py b/spaces/jpfearnworks/ai_agents/server.py
deleted file mode 100644
index 6f3dba04f45d78aa7c6eb84052d6967ba12948ba..0000000000000000000000000000000000000000
--- a/spaces/jpfearnworks/ai_agents/server.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import argparse
-from dotenv import load_dotenv, find_dotenv
-import os
-import gradio as gr
-from modules.reasoning.component import create_reasoning_router_ui
-from modules.knowledge_retrieval.component import create_knowledge_router_ui
-from modules.settings.component import create_settings_ui
-from modules.settings.user_settings import UserSettings
-load_dotenv(find_dotenv())
-
-openai_api_key = os.getenv("OPENAI_API_KEY")
-
-def create_interface():
- title: str = "Prompt Strategy Demo"
- description: str = "AI Agents Sandbox"
- with gr.Blocks(analytics_enabled=False, capture_session=True, title=title) as interface:
- with gr.Tab("Reasoning Router"):
- create_reasoning_router_ui()
- with gr.Tab("Knowledge Domains"):
- create_knowledge_router_ui()
- with gr.Tab("Settings"):
- create_settings_ui()
-
- interface.queue()
- interface.launch(server_name="0.0.0.0", server_port=port)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--port", type=int, help="Port number to run the server on")
- args = parser.parse_args()
-
- port = args.port
- settings = UserSettings.get_instance()
- if openai_api_key:
- settings.set_api_key(openai_api_key)
- create_interface()
\ No newline at end of file
diff --git a/spaces/jpwahle/field-time-diversity/aclanthology.py b/spaces/jpwahle/field-time-diversity/aclanthology.py
deleted file mode 100644
index cdb903b289c95362c0ed0856cb10eb480d5c950a..0000000000000000000000000000000000000000
--- a/spaces/jpwahle/field-time-diversity/aclanthology.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
-# All rights reserved.
-
-import asyncio
-import json
-
-import aiohttp
-import requests
-from bs4 import BeautifulSoup
-
-
-async def fetch(session, url):
- """Asynchronous function to fetch a URL using aiohttp."""
- async with session.get(url) as response:
- return await response.text()
-
-
-async def async_match_acl_id_to_s2_paper(acl_id):
- """
- Fetches the paper information from the Semantic Scholar API for the given ACL ID.
-
- Args:
- acl_id (str): The ACL ID of the paper to fetch.
-
- Returns:
- dict: A dictionary containing the paper information.
- """
- url = f"https://api.semanticscholar.org/graph/v1/paper/ACL:{acl_id}"
- async with aiohttp.ClientSession() as session:
- res_text = await fetch(session, url)
- return json.loads(res_text)
-
-
-def extract_paper_info(paper_url):
- """
- Extracts information about a paper from its ACL Anthology URL.
-
- Args:
- paper_url (str): The URL of the paper on the ACL Anthology website.
-
- Returns:
- dict: A dictionary containing the title, authors, and ACL Anthology ID of the paper.
- """
- html_doc = requests.get(paper_url, timeout=10).text
- soup = BeautifulSoup(html_doc, "html.parser")
-
- title = soup.find("h2", id="title").text.strip()
- authors = [
- a.text
- for a in soup.find_all("a")
- if a.parent.name == "p" and a.parent["class"] == ["lead"]
- ]
- acl_id = paper_url.split("/")[-2]
-
- return {"title": title, "authors": authors, "acl_id": acl_id}
-
-
-def extract_author_info(author_url):
- """
- Extracts author information from the given author URL.
-
- Args:
- author_url (str): The URL of the author's page on ACL Anthology.
-
- Returns:
- dict: A dictionary containing the author's name and a list of their papers.
- Each paper is represented as a dictionary with keys "title" and "url".
- """
- html_doc = requests.get(author_url, timeout=10).text
- soup = BeautifulSoup(html_doc, "html.parser")
-
- author_name = soup.find("h2", id="title").text.strip()
- paper_elements = soup.find_all("p")
- papers = []
- for paper in paper_elements:
- links = paper.find_all("a")
- # Filter out a with text pdf and bib
- links = [
- l for l in links if l.text.strip() not in ["pdf", "bib", "abs"]
- ]
- if not links:
- continue
- title = links[0].text.strip()
- url = "https://aclanthology.org" + links[0]["href"]
- papers.append({"title": title, "url": url})
-
- return {"author": author_name, "papers": papers}
-
-
-def extract_venue_info(venue_url):
- """
- Extracts venue information from the given URL.
-
- Args:
- venue_url (str): The URL of the venue to extract information from.
-
- Returns:
- dict: A dictionary containing the venue name and a list of papers with their titles and URLs.
- """
- html_doc = requests.get(venue_url, timeout=10).text
- soup = BeautifulSoup(html_doc, "html.parser")
-
- venue_name = soup.find("h2", id="title").text.strip()
- paper_elements = soup.find_all("p")
- papers = []
- for paper in paper_elements:
- links = paper.find_all("a")
- # Filter out a with text pdf and bib
- links = [
- l for l in links if l.text.strip() not in ["pdf", "bib", "abs"]
- ]
- if not links:
- continue
- title = links[0].text.strip()
- url = "https://aclanthology.org" + links[0]["href"]
- papers.append({"title": title, "url": url})
-
- return {"venue": venue_name, "papers": papers}
-
-
-def determine_page_type(url):
- """
- Determine the type of ACL Anthology page given its URL.
-
- Args:
- url (str): The URL to be checked.
-
- Returns:
- str: "paper", "author", or "venue". Returns None if the type can't be determined.
- """
- # Extract last segments from the URL
- segments = [segment for segment in url.split("/") if segment]
-
- # Check if the URL points to an event (venue)
- if "events" in url or "volumes" in url:
- return "venue"
-
- # If URL ends in a pattern like "2023.acl-long.1" it's a paper
- if len(segments) > 1 and segments[-2].isnumeric() and "." in segments[-1]:
- return "paper"
-
- if "people" in url:
- return "author"
-
- # If none of the above rules apply, fetch the page and check its content
- try:
- html_doc = requests.get(url, timeout=10).text
- soup = BeautifulSoup(html_doc, "html.parser")
-
- # Check for unique elements specific to each page type
- if soup.find("h2", id="title"):
- return (
- "author"
- if soup.find("a", href=True, text="Google Scholar")
- else "paper"
- )
- elif soup.find("h1", text="Anthology Volume"):
- return "venue"
- except Exception as e:
- print(f"Error determining page type: {e}")
-
- return None
-
-
-if __name__ == "__main__":
- loop = asyncio.get_event_loop()
-
- urls = [
- "https://aclanthology.org/2023.acl-long.1/",
- "https://aclanthology.org/people/a/anna-rogers/",
- "https://aclanthology.org/events/acl-2022/",
- ]
-
- for url in urls:
- if determine_page_type(url) == "paper":
- print(f"Paper: {url}")
- res = extract_paper_info(url)
- paper = loop.run_until_complete(
- async_match_acl_id_to_s2_paper(res["acl_id"])
- )
- print(paper)
-
- elif determine_page_type(url) == "author":
- print(f"Author: {url}")
- res = extract_author_info(url)
- tasks = [
- async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
- for paper in res["papers"]
- ]
- s2_ids = loop.run_until_complete(asyncio.gather(*tasks))
- for paper, s2_id in zip(res["papers"], s2_ids):
- print(paper["paperId"])
-
- elif determine_page_type(url) == "venue":
- print(f"Venue: {url}")
- res = extract_venue_info(url)
- tasks = [
- async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
- for paper in res["papers"]
- ]
- s2_ids = loop.run_until_complete(asyncio.gather(*tasks))
- for paper, s2_id in zip(res["papers"], s2_ids):
- print(paper["paperId"])
diff --git a/spaces/jyseo/3DFuse/ldm/models/diffusion/ddpm.py b/spaces/jyseo/3DFuse/ldm/models/diffusion/ddpm.py
deleted file mode 100644
index f52edbb91720ecf276238761754064c5a43a4ed0..0000000000000000000000000000000000000000
--- a/spaces/jyseo/3DFuse/ldm/models/diffusion/ddpm.py
+++ /dev/null
@@ -1,1796 +0,0 @@
-"""
-wild mixture of
-https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
-https://github.com/CompVis/taming-transformers
--- merci
-"""
-
-import torch
-import torch.nn as nn
-import numpy as np
-import pytorch_lightning as pl
-from torch.optim.lr_scheduler import LambdaLR
-from einops import rearrange, repeat
-from contextlib import contextmanager, nullcontext
-from functools import partial
-import itertools
-from tqdm import tqdm
-from torchvision.utils import make_grid
-from pytorch_lightning.utilities.distributed import rank_zero_only
-from omegaconf import ListConfig
-
-from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
-from ldm.modules.ema import LitEma
-from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
-from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
-from ldm.models.diffusion.ddim import DDIMSampler
-
-
-__conditioning_keys__ = {'concat': 'c_concat',
- 'crossattn': 'c_crossattn',
- 'adm': 'y'}
-
-
-def disabled_train(self, mode=True):
- """Overwrite model.train with this function to make sure train/eval mode
- does not change anymore."""
- return self
-
-
-def uniform_on_device(r1, r2, shape, device):
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
-
-
-class DDPM(pl.LightningModule):
- # classic DDPM with Gaussian diffusion, in image space
- def __init__(self,
- unet_config,
- timesteps=1000,
- beta_schedule="linear",
- loss_type="l2",
- ckpt_path=None,
- ignore_keys=[],
- load_only_unet=False,
- monitor="val/loss",
- use_ema=True,
- first_stage_key="image",
- image_size=256,
- channels=3,
- log_every_t=100,
- clip_denoised=True,
- linear_start=1e-4,
- linear_end=2e-2,
- cosine_s=8e-3,
- given_betas=None,
- original_elbo_weight=0.,
- v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
- l_simple_weight=1.,
- conditioning_key=None,
- parameterization="eps", # all assuming fixed variance schedules
- scheduler_config=None,
- use_positional_encodings=False,
- learn_logvar=False,
- logvar_init=0.,
- make_it_fit=False,
- ucg_training=None,
- reset_ema=False,
- reset_num_ema_updates=False,
- ):
- super().__init__()
- assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
- self.parameterization = parameterization
- print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
- self.cond_stage_model = None
- self.clip_denoised = clip_denoised
- self.log_every_t = log_every_t
- self.first_stage_key = first_stage_key
- self.image_size = image_size # try conv?
- self.channels = channels
- self.use_positional_encodings = use_positional_encodings
- self.model = DiffusionWrapper(unet_config, conditioning_key)
- count_params(self.model, verbose=True)
- self.use_ema = use_ema
- if self.use_ema:
- self.model_ema = LitEma(self.model)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
-
- self.use_scheduler = scheduler_config is not None
- if self.use_scheduler:
- self.scheduler_config = scheduler_config
-
- self.v_posterior = v_posterior
- self.original_elbo_weight = original_elbo_weight
- self.l_simple_weight = l_simple_weight
-
- if monitor is not None:
- self.monitor = monitor
- self.make_it_fit = make_it_fit
- if reset_ema: assert exists(ckpt_path)
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
- if reset_ema:
- assert self.use_ema
- print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
- self.model_ema = LitEma(self.model)
- if reset_num_ema_updates:
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
- assert self.use_ema
- self.model_ema.reset_num_updates()
-
- self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
- linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
-
- self.loss_type = loss_type
-
- self.learn_logvar = learn_logvar
- logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
- if self.learn_logvar:
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
- else:
- self.register_buffer('logvar', logvar)
-
- self.ucg_training = ucg_training or dict()
- if self.ucg_training:
- self.ucg_prng = np.random.RandomState()
-
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if exists(given_betas):
- betas = given_betas
- else:
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
- cosine_s=cosine_s)
- alphas = 1. - betas
- alphas_cumprod = np.cumprod(alphas, axis=0)
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
-
- timesteps, = betas.shape
- self.num_timesteps = int(timesteps)
- self.linear_start = linear_start
- self.linear_end = linear_end
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
-
- to_torch = partial(torch.tensor, dtype=torch.float32)
-
- self.register_buffer('betas', to_torch(betas))
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
- 1. - alphas_cumprod) + self.v_posterior * betas
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
- self.register_buffer('posterior_mean_coef1', to_torch(
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
- self.register_buffer('posterior_mean_coef2', to_torch(
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
-
- if self.parameterization == "eps":
- lvlb_weights = self.betas ** 2 / (
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
- elif self.parameterization == "x0":
- lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
- elif self.parameterization == "v":
- lvlb_weights = torch.ones_like(self.betas ** 2 / (
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
- else:
- raise NotImplementedError("mu not supported")
- lvlb_weights[0] = lvlb_weights[1]
- self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
- assert not torch.isnan(self.lvlb_weights).all()
-
- @contextmanager
- def ema_scope(self, context=None):
- if self.use_ema:
- self.model_ema.store(self.model.parameters())
- self.model_ema.copy_to(self.model)
- if context is not None:
- print(f"{context}: Switched to EMA weights")
- try:
- yield None
- finally:
- if self.use_ema:
- self.model_ema.restore(self.model.parameters())
- if context is not None:
- print(f"{context}: Restored training weights")
-
- @torch.no_grad()
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
- sd = torch.load(path, map_location="cpu")
- if "state_dict" in list(sd.keys()):
- sd = sd["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- if self.make_it_fit:
- n_params = len([name for name, _ in
- itertools.chain(self.named_parameters(),
- self.named_buffers())])
- for name, param in tqdm(
- itertools.chain(self.named_parameters(),
- self.named_buffers()),
- desc="Fitting old weights to new weights",
- total=n_params
- ):
- if not name in sd:
- continue
- old_shape = sd[name].shape
- new_shape = param.shape
- assert len(old_shape) == len(new_shape)
- if len(new_shape) > 2:
- # we only modify first two axes
- assert new_shape[2:] == old_shape[2:]
- # assumes first axis corresponds to output dim
- if not new_shape == old_shape:
- new_param = param.clone()
- old_param = sd[name]
- if len(new_shape) == 1:
- for i in range(new_param.shape[0]):
- new_param[i] = old_param[i % old_shape[0]]
- elif len(new_shape) >= 2:
- for i in range(new_param.shape[0]):
- for j in range(new_param.shape[1]):
- new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
-
- n_used_old = torch.ones(old_shape[1])
- for j in range(new_param.shape[1]):
- n_used_old[j % old_shape[1]] += 1
- n_used_new = torch.zeros(new_shape[1])
- for j in range(new_param.shape[1]):
- n_used_new[j] = n_used_old[j % old_shape[1]]
-
- n_used_new = n_used_new[None, :]
- while len(n_used_new.shape) < len(new_shape):
- n_used_new = n_used_new.unsqueeze(-1)
- new_param /= n_used_new
-
- sd[name] = new_param
-
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
- sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys:\n {missing}")
- if len(unexpected) > 0:
- print(f"\nUnexpected Keys:\n {unexpected}")
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
- return mean, variance, log_variance
-
- def predict_start_from_noise(self, x_t, t, noise):
- return (
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
- )
-
- def predict_start_from_z_and_v(self, x_t, t, v):
- # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
- # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
- return (
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
- )
-
- def predict_eps_from_z_and_v(self, x_t, t, v):
- return (
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
- )
-
- def q_posterior(self, x_start, x_t, t):
- posterior_mean = (
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
- extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, x, t, clip_denoised: bool):
- model_out = self.model(x, t)
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
-
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
- b, *_, device = *x.shape, x.device
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
- noise = noise_like(x.shape, device, repeat_noise)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def p_sample_loop(self, shape, return_intermediates=False):
- device = self.betas.device
- b = shape[0]
- img = torch.randn(shape, device=device)
- intermediates = [img]
- for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
- img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
- clip_denoised=self.clip_denoised)
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
- intermediates.append(img)
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, batch_size=16, return_intermediates=False):
- image_size = self.image_size
- channels = self.channels
- return self.p_sample_loop((batch_size, channels, image_size, image_size),
- return_intermediates=return_intermediates)
-
- def q_sample(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
-
- def get_v(self, x, noise, t):
- return (
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
- )
-
- def get_loss(self, pred, target, mean=True):
- if self.loss_type == 'l1':
- loss = (target - pred).abs()
- if mean:
- loss = loss.mean()
- elif self.loss_type == 'l2':
- if mean:
- loss = torch.nn.functional.mse_loss(target, pred)
- else:
- loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
- else:
- raise NotImplementedError("unknown loss type '{loss_type}'")
-
- return loss
-
- def p_losses(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_out = self.model(x_noisy, t)
-
- loss_dict = {}
- if self.parameterization == "eps":
- target = noise
- elif self.parameterization == "x0":
- target = x_start
- elif self.parameterization == "v":
- target = self.get_v(x_start, noise, t)
- else:
- raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
-
- loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
-
- log_prefix = 'train' if self.training else 'val'
-
- loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
- loss_simple = loss.mean() * self.l_simple_weight
-
- loss_vlb = (self.lvlb_weights[t] * loss).mean()
- loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
-
- loss = loss_simple + self.original_elbo_weight * loss_vlb
-
- loss_dict.update({f'{log_prefix}/loss': loss})
-
- return loss, loss_dict
-
- def forward(self, x, *args, **kwargs):
- # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
- # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- return self.p_losses(x, t, *args, **kwargs)
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = rearrange(x, 'b h w c -> b c h w')
- x = x.to(memory_format=torch.contiguous_format).float()
- return x
-
- def shared_step(self, batch):
- x = self.get_input(batch, self.first_stage_key)
- loss, loss_dict = self(x)
- return loss, loss_dict
-
- def training_step(self, batch, batch_idx):
- for k in self.ucg_training:
- p = self.ucg_training[k]["p"]
- val = self.ucg_training[k]["val"]
- if val is None:
- val = ""
- for i in range(len(batch[k])):
- if self.ucg_prng.choice(2, p=[1 - p, p]):
- batch[k][i] = val
-
- loss, loss_dict = self.shared_step(batch)
-
- self.log_dict(loss_dict, prog_bar=True,
- logger=True, on_step=True, on_epoch=True)
-
- self.log("global_step", self.global_step,
- prog_bar=True, logger=True, on_step=True, on_epoch=False)
-
- if self.use_scheduler:
- lr = self.optimizers().param_groups[0]['lr']
- self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
-
- return loss
-
- @torch.no_grad()
- def validation_step(self, batch, batch_idx):
- _, loss_dict_no_ema = self.shared_step(batch)
- with self.ema_scope():
- _, loss_dict_ema = self.shared_step(batch)
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
- self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
- self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self.model)
-
- def _get_rows_from_list(self, samples):
- n_imgs_per_row = len(samples)
- denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
- log = dict()
- x = self.get_input(batch, self.first_stage_key)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- x = x.to(self.device)[:N]
- log["inputs"] = x
-
- # get diffusion row
- diffusion_row = list()
- x_start = x[:n_row]
-
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(x_start)
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- diffusion_row.append(x_noisy)
-
- log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
-
- if sample:
- # get denoise row
- with self.ema_scope("Plotting"):
- samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
-
- log["samples"] = samples
- log["denoise_row"] = self._get_rows_from_list(denoise_row)
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.learn_logvar:
- params = params + [self.logvar]
- opt = torch.optim.AdamW(params, lr=lr)
- return opt
-
-
-class LatentDiffusion(DDPM):
- """main class"""
-
- def __init__(self,
- first_stage_config,
- cond_stage_config,
- num_timesteps_cond=None,
- cond_stage_key="image",
- cond_stage_trainable=False,
- concat_mode=True,
- cond_stage_forward=None,
- conditioning_key=None,
- scale_factor=1.0,
- scale_by_std=False,
- force_null_conditioning=False,
- *args, **kwargs):
- self.force_null_conditioning = force_null_conditioning
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
- self.scale_by_std = scale_by_std
- assert self.num_timesteps_cond <= kwargs['timesteps']
- # for backwards compatibility after implementation of DiffusionWrapper
- if conditioning_key is None:
- conditioning_key = 'concat' if concat_mode else 'crossattn'
- if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
- conditioning_key = None
- ckpt_path = kwargs.pop("ckpt_path", None)
- reset_ema = kwargs.pop("reset_ema", False)
- reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
- ignore_keys = kwargs.pop("ignore_keys", [])
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
- self.concat_mode = concat_mode
- self.cond_stage_trainable = cond_stage_trainable
- self.cond_stage_key = cond_stage_key
- try:
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
- self.num_downs = 0
- if not scale_by_std:
- self.scale_factor = scale_factor
- else:
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
- self.instantiate_first_stage(first_stage_config)
- self.instantiate_cond_stage(cond_stage_config)
- self.cond_stage_forward = cond_stage_forward
- self.clip_denoised = False
- self.bbox_tokenizer = None
-
- self.restarted_from_ckpt = False
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys)
- self.restarted_from_ckpt = True
- if reset_ema:
- assert self.use_ema
- print(
- f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
- self.model_ema = LitEma(self.model)
- if reset_num_ema_updates:
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
- assert self.use_ema
- self.model_ema.reset_num_updates()
-
- def make_cond_schedule(self, ):
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
- self.cond_ids[:self.num_timesteps_cond] = ids
-
- @rank_zero_only
- @torch.no_grad()
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
- # only for very first batch
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
- # set rescale weight to 1./std of encodings
- print("### USING STD-RESCALING ###")
- x = super().get_input(batch, self.first_stage_key)
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
- del self.scale_factor
- self.register_buffer('scale_factor', 1. / z.flatten().std())
- print(f"setting self.scale_factor to {self.scale_factor}")
- print("### USING STD-RESCALING ###")
-
- def register_schedule(self,
- given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
-
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
- if self.shorten_cond_schedule:
- self.make_cond_schedule()
-
- def instantiate_first_stage(self, config):
- model = instantiate_from_config(config)
- self.first_stage_model = model.eval()
- self.first_stage_model.train = disabled_train
- for param in self.first_stage_model.parameters():
- param.requires_grad = False
-
- def instantiate_cond_stage(self, config):
- if not self.cond_stage_trainable:
- if config == "__is_first_stage__":
- print("Using first stage also as cond stage.")
- self.cond_stage_model = self.first_stage_model
- elif config == "__is_unconditional__":
- print(f"Training {self.__class__.__name__} as an unconditional model.")
- self.cond_stage_model = None
- # self.be_unconditional = True
- else:
- model = instantiate_from_config(config)
- self.cond_stage_model = model.eval()
- self.cond_stage_model.train = disabled_train
- for param in self.cond_stage_model.parameters():
- param.requires_grad = False
- else:
- assert config != '__is_first_stage__'
- assert config != '__is_unconditional__'
- model = instantiate_from_config(config)
- self.cond_stage_model = model
-
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
- denoise_row = []
- for zd in tqdm(samples, desc=desc):
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
- force_not_quantize=force_no_decoder_quantization))
- n_imgs_per_row = len(denoise_row)
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- def get_first_stage_encoding(self, encoder_posterior):
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
- z = encoder_posterior.sample()
- elif isinstance(encoder_posterior, torch.Tensor):
- z = encoder_posterior
- else:
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
- return self.scale_factor * z
-
- def get_learned_conditioning(self, c):
- if self.cond_stage_forward is None:
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
- c = self.cond_stage_model.encode(c)
- if isinstance(c, DiagonalGaussianDistribution):
- c = c.mode()
- else:
- c = self.cond_stage_model(c)
- else:
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
- return c
-
- def meshgrid(self, h, w):
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
-
- arr = torch.cat([y, x], dim=-1)
- return arr
-
- def delta_border(self, h, w):
- """
- :param h: height
- :param w: width
- :return: normalized distance to image border,
- wtith min distance = 0 at border and max dist = 0.5 at image center
- """
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
- arr = self.meshgrid(h, w) / lower_right_corner
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
- return edge_dist
-
- def get_weighting(self, h, w, Ly, Lx, device):
- weighting = self.delta_border(h, w)
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
- self.split_input_params["clip_max_weight"], )
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
-
- if self.split_input_params["tie_braker"]:
- L_weighting = self.delta_border(Ly, Lx)
- L_weighting = torch.clip(L_weighting,
- self.split_input_params["clip_min_tie_weight"],
- self.split_input_params["clip_max_tie_weight"])
-
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
- weighting = weighting * L_weighting
- return weighting
-
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
- """
- :param x: img of size (bs, c, h, w)
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
- """
- bs, nc, h, w = x.shape
-
- # number of crops in image
- Ly = (h - kernel_size[0]) // stride[0] + 1
- Lx = (w - kernel_size[1]) // stride[1] + 1
-
- if uf == 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
-
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
-
- elif uf > 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
- dilation=1, padding=0,
- stride=(stride[0] * uf, stride[1] * uf))
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
-
- elif df > 1 and uf == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
- dilation=1, padding=0,
- stride=(stride[0] // df, stride[1] // df))
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
-
- else:
- raise NotImplementedError
-
- return fold, unfold, normalization, weighting
-
- @torch.no_grad()
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
- cond_key=None, return_original_cond=False, bs=None, return_x=False):
- x = super().get_input(batch, k)
- if bs is not None:
- x = x[:bs]
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
-
- if self.model.conditioning_key is not None and not self.force_null_conditioning:
- if cond_key is None:
- cond_key = self.cond_stage_key
- if cond_key != self.first_stage_key:
- if cond_key in ['caption', 'coordinates_bbox', "txt"]:
- xc = batch[cond_key]
- elif cond_key in ['class_label', 'cls']:
- xc = batch
- else:
- xc = super().get_input(batch, cond_key).to(self.device)
- else:
- xc = x
- if not self.cond_stage_trainable or force_c_encode:
- if isinstance(xc, dict) or isinstance(xc, list):
- c = self.get_learned_conditioning(xc)
- else:
- c = self.get_learned_conditioning(xc.to(self.device))
- else:
- c = xc
- if bs is not None:
- c = c[:bs]
-
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- ckey = __conditioning_keys__[self.model.conditioning_key]
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
-
- else:
- c = None
- xc = None
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- c = {'pos_x': pos_x, 'pos_y': pos_y}
- out = [z, c]
- if return_first_stage_outputs:
- xrec = self.decode_first_stage(z)
- out.extend([x, xrec])
- if return_x:
- out.extend([x])
- if return_original_cond:
- out.append(xc)
- return out
-
- @torch.no_grad()
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
- if predict_cids:
- if z.dim() == 4:
- z = torch.argmax(z.exp(), dim=1).long()
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
-
- z = 1. / self.scale_factor * z
- return self.first_stage_model.decode(z)
-
- @torch.no_grad()
- def encode_first_stage(self, x):
- return self.first_stage_model.encode(x)
-
- def shared_step(self, batch, **kwargs):
- x, c = self.get_input(batch, self.first_stage_key)
- loss = self(x, c)
- return loss
-
- def forward(self, x, c, *args, **kwargs):
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- if self.model.conditioning_key is not None:
- assert c is not None
- if self.cond_stage_trainable:
- c = self.get_learned_conditioning(c)
- if self.shorten_cond_schedule: # TODO: drop this option
- tc = self.cond_ids[t].to(self.device)
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
- return self.p_losses(x, c, t, *args, **kwargs)
-
- def apply_model(self, x_noisy, t, cond, return_ids=False):
- if isinstance(cond, dict):
- # hybrid case, cond is expected to be a dict
- pass
- else:
- if not isinstance(cond, list):
- cond = [cond]
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
- cond = {key: cond}
- x_recon = self.model(x_noisy, t, **cond)
-
- if isinstance(x_recon, tuple) and not return_ids:
- return x_recon[0]
- else:
- return x_recon
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
-
- def _prior_bpd(self, x_start):
- """
- Get the prior KL term for the variational lower-bound, measured in
- bits-per-dim.
- This term can't be optimized, as it only depends on the encoder.
- :param x_start: the [N x C x ...] tensor of inputs.
- :return: a batch of [N] KL values (in bits), one per batch element.
- """
- batch_size = x_start.shape[0]
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
- return mean_flat(kl_prior) / np.log(2.0)
-
- def p_losses(self, x_start, cond, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_output = self.apply_model(x_noisy, t, cond)
-
- loss_dict = {}
- prefix = 'train' if self.training else 'val'
-
- if self.parameterization == "x0":
- target = x_start
- elif self.parameterization == "eps":
- target = noise
- elif self.parameterization == "v":
- target = self.get_v(x_start, noise, t)
- else:
- raise NotImplementedError()
-
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
-
- logvar_t = self.logvar[t].to(self.device)
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
- if self.learn_logvar:
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
- loss_dict.update({'logvar': self.logvar.data.mean()})
-
- loss = self.l_simple_weight * loss.mean()
-
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
- loss += (self.original_elbo_weight * loss_vlb)
- loss_dict.update({f'{prefix}/loss': loss})
-
- return loss, loss_dict
-
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
- return_x0=False, score_corrector=None, corrector_kwargs=None):
- t_in = t
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
-
- if score_corrector is not None:
- assert self.parameterization == "eps"
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
-
- if return_codebook_ids:
- model_out, logits = model_out
-
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- else:
- raise NotImplementedError()
-
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
- if quantize_denoised:
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- if return_codebook_ids:
- return model_mean, posterior_variance, posterior_log_variance, logits
- elif return_x0:
- return model_mean, posterior_variance, posterior_log_variance, x_recon
- else:
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
- b, *_, device = *x.shape, x.device
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
- return_codebook_ids=return_codebook_ids,
- quantize_denoised=quantize_denoised,
- return_x0=return_x0,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if return_codebook_ids:
- raise DeprecationWarning("Support dropped.")
- model_mean, _, model_log_variance, logits = outputs
- elif return_x0:
- model_mean, _, model_log_variance, x0 = outputs
- else:
- model_mean, _, model_log_variance = outputs
-
- noise = noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
-
- if return_codebook_ids:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
- if return_x0:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
- else:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
- log_every_t=None):
- if not log_every_t:
- log_every_t = self.log_every_t
- timesteps = self.num_timesteps
- if batch_size is not None:
- b = batch_size if batch_size is not None else shape[0]
- shape = [batch_size] + list(shape)
- else:
- b = batch_size = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=self.device)
- else:
- img = x_T
- intermediates = []
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
- total=timesteps) if verbose else reversed(
- range(0, timesteps))
- if type(temperature) == float:
- temperature = [temperature] * timesteps
-
- for i in iterator:
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img, x0_partial = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised, return_x0=True,
- temperature=temperature[i], noise_dropout=noise_dropout,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if mask is not None:
- assert x0 is not None
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_loop(self, cond, shape, return_intermediates=False,
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, start_T=None,
- log_every_t=None):
-
- if not log_every_t:
- log_every_t = self.log_every_t
- device = self.betas.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- intermediates = [img]
- if timesteps is None:
- timesteps = self.num_timesteps
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
- range(0, timesteps))
-
- if mask is not None:
- assert x0 is not None
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
-
- for i in iterator:
- ts = torch.full((b,), i, device=device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised)
- if mask is not None:
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
-
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
- verbose=True, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, shape=None, **kwargs):
- if shape is None:
- shape = (batch_size, self.channels, self.image_size, self.image_size)
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
- return self.p_sample_loop(cond,
- shape,
- return_intermediates=return_intermediates, x_T=x_T,
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
- mask=mask, x0=x0)
-
- @torch.no_grad()
- def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
- if ddim:
- ddim_sampler = DDIMSampler(self)
- shape = (self.channels, self.image_size, self.image_size)
- samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
- shape, cond, verbose=False, **kwargs)
-
- else:
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
- return_intermediates=True, **kwargs)
-
- return samples, intermediates
-
- @torch.no_grad()
- def get_unconditional_conditioning(self, batch_size, null_label=None):
- if null_label is not None:
- xc = null_label
- if isinstance(xc, ListConfig):
- xc = list(xc)
- if isinstance(xc, dict) or isinstance(xc, list):
- c = self.get_learned_conditioning(xc)
- else:
- if hasattr(xc, "to"):
- xc = xc.to(self.device)
- c = self.get_learned_conditioning(xc)
- else:
- if self.cond_stage_key in ["class_label", "cls"]:
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
- return self.get_learned_conditioning(xc)
- else:
- raise NotImplementedError("todo")
- if isinstance(c, list): # in case the encoder gives us a list
- for i in range(len(c)):
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
- else:
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
- return c
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
- use_ema_scope=True,
- **kwargs):
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
- return_first_stage_outputs=True,
- force_c_encode=True,
- return_original_cond=True,
- bs=N)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption", "txt"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
- log["conditioning"] = xc
- elif self.cond_stage_key in ['class_label', "cls"]:
- try:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
- log['conditioning'] = xc
- except KeyError:
- # probably no "human_label" in batch
- pass
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with ema_scope("Sampling"):
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
- self.first_stage_model, IdentityFirstStage):
- # also display when quantizing x0 while sampling
- with ema_scope("Plotting Quantized Denoised"):
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- quantize_denoised=True)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
- # quantize_denoised=True)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_x0_quantized"] = x_samples
-
- if unconditional_guidance_scale > 1.0:
- uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
- if self.model.conditioning_key == "crossattn-adm":
- uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
- with ema_scope("Sampling with classifier-free guidance"):
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=uc,
- )
- x_samples_cfg = self.decode_first_stage(samples_cfg)
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
-
- if inpaint:
- # make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
- mask = torch.ones(N, h, w).to(self.device)
- # zeros will be filled in
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
- mask = mask[:, None, ...]
- with ema_scope("Plotting Inpaint"):
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_inpainting"] = x_samples
- log["mask"] = mask
-
- # outpaint
- mask = 1. - mask
- with ema_scope("Plotting Outpaint"):
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_outpainting"] = x_samples
-
- if plot_progressive_rows:
- with ema_scope("Plotting Progressives"):
- img, progressives = self.progressive_denoising(c,
- shape=(self.channels, self.image_size, self.image_size),
- batch_size=N)
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
- log["progressive_row"] = prog_row
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.cond_stage_trainable:
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
- params = params + list(self.cond_stage_model.parameters())
- if self.learn_logvar:
- print('Diffusion model optimizing logvar')
- params.append(self.logvar)
- opt = torch.optim.AdamW(params, lr=lr)
- if self.use_scheduler:
- assert 'target' in self.scheduler_config
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- }]
- return [opt], scheduler
- return opt
-
- @torch.no_grad()
- def to_rgb(self, x):
- x = x.float()
- if not hasattr(self, "colorize"):
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
- x = nn.functional.conv2d(x, weight=self.colorize)
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
- return x
-
-
-class DiffusionWrapper(pl.LightningModule):
- def __init__(self, diff_model_config, conditioning_key):
- super().__init__()
- self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
- self.diffusion_model = instantiate_from_config(diff_model_config)
- self.conditioning_key = conditioning_key
- assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
-
- def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
- if self.conditioning_key is None:
- out = self.diffusion_model(x, t)
- elif self.conditioning_key == 'concat':
- xc = torch.cat([x] + c_concat, dim=1)
- out = self.diffusion_model(xc, t)
- elif self.conditioning_key == 'crossattn':
- if not self.sequential_cross_attn:
- cc = torch.cat(c_crossattn, 1)
- else:
- cc = c_crossattn
- out = self.diffusion_model(x, t, context=cc)
- elif self.conditioning_key == 'hybrid':
- xc = torch.cat([x] + c_concat, dim=1)
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(xc, t, context=cc)
- elif self.conditioning_key == 'hybrid-adm':
- assert c_adm is not None
- xc = torch.cat([x] + c_concat, dim=1)
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(xc, t, context=cc, y=c_adm)
- elif self.conditioning_key == 'crossattn-adm':
- assert c_adm is not None
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(x, t, context=cc, y=c_adm)
- elif self.conditioning_key == 'adm':
- cc = c_crossattn[0]
- out = self.diffusion_model(x, t, y=cc)
- else:
- raise NotImplementedError()
-
- return out
-
-
-class LatentUpscaleDiffusion(LatentDiffusion):
- def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
- super().__init__(*args, **kwargs)
- # assumes that neither the cond_stage nor the low_scale_model contain trainable params
- assert not self.cond_stage_trainable
- self.instantiate_low_stage(low_scale_config)
- self.low_scale_key = low_scale_key
- self.noise_level_key = noise_level_key
-
- def instantiate_low_stage(self, config):
- model = instantiate_from_config(config)
- self.low_scale_model = model.eval()
- self.low_scale_model.train = disabled_train
- for param in self.low_scale_model.parameters():
- param.requires_grad = False
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
- if not log_mode:
- z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
- else:
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
- x_low = batch[self.low_scale_key][:bs]
- x_low = rearrange(x_low, 'b h w c -> b c h w')
- x_low = x_low.to(memory_format=torch.contiguous_format).float()
- zx, noise_level = self.low_scale_model(x_low)
- if self.noise_level_key is not None:
- # get noise level from batch instead, e.g. when extracting a custom noise level for bsr
- raise NotImplementedError('TODO')
-
- all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
- if log_mode:
- # TODO: maybe disable if too expensive
- x_low_rec = self.low_scale_model.decode(zx)
- return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
- plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
- unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
- **kwargs):
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
- log_mode=True)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- log["x_lr"] = x_low
- log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption", "txt"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
- log["conditioning"] = xc
- elif self.cond_stage_key in ['class_label', 'cls']:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
- log['conditioning'] = xc
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with ema_scope("Sampling"):
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if unconditional_guidance_scale > 1.0:
- uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
- # TODO explore better "unconditional" choices for the other keys
- # maybe guide away from empty text label and highest noise level and maximally degraded zx?
- uc = dict()
- for k in c:
- if k == "c_crossattn":
- assert isinstance(c[k], list) and len(c[k]) == 1
- uc[k] = [uc_tmp]
- elif k == "c_adm": # todo: only run with text-based guidance?
- assert isinstance(c[k], torch.Tensor)
- #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
- uc[k] = c[k]
- elif isinstance(c[k], list):
- uc[k] = [c[k][i] for i in range(len(c[k]))]
- else:
- uc[k] = c[k]
-
- with ema_scope("Sampling with classifier-free guidance"):
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=uc,
- )
- x_samples_cfg = self.decode_first_stage(samples_cfg)
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
-
- if plot_progressive_rows:
- with ema_scope("Plotting Progressives"):
- img, progressives = self.progressive_denoising(c,
- shape=(self.channels, self.image_size, self.image_size),
- batch_size=N)
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
- log["progressive_row"] = prog_row
-
- return log
-
-
-class LatentFinetuneDiffusion(LatentDiffusion):
- """
- Basis for different finetunas, such as inpainting or depth2image
- To disable finetuning mode, set finetune_keys to None
- """
-
- def __init__(self,
- concat_keys: tuple,
- finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
- "model_ema.diffusion_modelinput_blocks00weight"
- ),
- keep_finetune_dims=4,
- # if model was trained without concat mode before and we would like to keep these channels
- c_concat_log_start=None, # to log reconstruction of c_concat codes
- c_concat_log_end=None,
- *args, **kwargs
- ):
- ckpt_path = kwargs.pop("ckpt_path", None)
- ignore_keys = kwargs.pop("ignore_keys", list())
- super().__init__(*args, **kwargs)
- self.finetune_keys = finetune_keys
- self.concat_keys = concat_keys
- self.keep_dims = keep_finetune_dims
- self.c_concat_log_start = c_concat_log_start
- self.c_concat_log_end = c_concat_log_end
- if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
- if exists(ckpt_path):
- self.init_from_ckpt(ckpt_path, ignore_keys)
-
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
- sd = torch.load(path, map_location="cpu")
- if "state_dict" in list(sd.keys()):
- sd = sd["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
-
- # make it explicit, finetune by including extra input channels
- if exists(self.finetune_keys) and k in self.finetune_keys:
- new_entry = None
- for name, param in self.named_parameters():
- if name in self.finetune_keys:
- print(
- f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
- new_entry = torch.zeros_like(param) # zero init
- assert exists(new_entry), 'did not find matching parameter to modify'
- new_entry[:, :self.keep_dims, ...] = sd[k]
- sd[k] = new_entry
-
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
- sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys: {missing}")
- if len(unexpected) > 0:
- print(f"Unexpected Keys: {unexpected}")
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
- use_ema_scope=True,
- **kwargs):
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
- c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption", "txt"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
- log["conditioning"] = xc
- elif self.cond_stage_key in ['class_label', 'cls']:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
- log['conditioning'] = xc
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
- log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with ema_scope("Sampling"):
- samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
- batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if unconditional_guidance_scale > 1.0:
- uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
- uc_cat = c_cat
- uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
- with ema_scope("Sampling with classifier-free guidance"):
- samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
- batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=uc_full,
- )
- x_samples_cfg = self.decode_first_stage(samples_cfg)
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
-
- return log
-
-
-class LatentInpaintDiffusion(LatentFinetuneDiffusion):
- """
- can either run as pure inpainting model (only concat mode) or with mixed conditionings,
- e.g. mask as concat and text via cross-attn.
- To disable finetuning mode, set finetune_keys to None
- """
-
- def __init__(self,
- concat_keys=("mask", "masked_image"),
- masked_image_key="masked_image",
- *args, **kwargs
- ):
- super().__init__(concat_keys, *args, **kwargs)
- self.masked_image_key = masked_image_key
- assert self.masked_image_key in concat_keys
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
- # note: restricted to non-trainable encoders currently
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
-
- assert exists(self.concat_keys)
- c_cat = list()
- for ck in self.concat_keys:
- cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
- if bs is not None:
- cc = cc[:bs]
- cc = cc.to(self.device)
- bchw = z.shape
- if ck != self.masked_image_key:
- cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
- else:
- cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
- c_cat.append(cc)
- c_cat = torch.cat(c_cat, dim=1)
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
- if return_first_stage_outputs:
- return z, all_conds, x, xrec, xc
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, *args, **kwargs):
- log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
- log["masked_image"] = rearrange(args[0]["masked_image"],
- 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
- return log
-
-
-class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
- """
- condition on monocular depth estimation
- """
-
- def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
- self.depth_model = instantiate_from_config(depth_stage_config)
- self.depth_stage_key = concat_keys[0]
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
- # note: restricted to non-trainable encoders currently
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
-
- assert exists(self.concat_keys)
- assert len(self.concat_keys) == 1
- c_cat = list()
- for ck in self.concat_keys:
- cc = batch[ck]
- if bs is not None:
- cc = cc[:bs]
- cc = cc.to(self.device)
- cc = self.depth_model(cc)
- cc = torch.nn.functional.interpolate(
- cc,
- size=z.shape[2:],
- mode="bicubic",
- align_corners=False,
- )
-
- depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
- keepdim=True)
- cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
- c_cat.append(cc)
- c_cat = torch.cat(c_cat, dim=1)
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
- if return_first_stage_outputs:
- return z, all_conds, x, xrec, xc
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, *args, **kwargs):
- log = super().log_images(*args, **kwargs)
- depth = self.depth_model(args[0][self.depth_stage_key])
- depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
- torch.amax(depth, dim=[1, 2, 3], keepdim=True)
- log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
- return log
-
-
-class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
- """
- condition on low-res image (and optionally on some spatial noise augmentation)
- """
- def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
- low_scale_config=None, low_scale_key=None, *args, **kwargs):
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
- self.reshuffle_patch_size = reshuffle_patch_size
- self.low_scale_model = None
- if low_scale_config is not None:
- print("Initializing a low-scale model")
- assert exists(low_scale_key)
- self.instantiate_low_stage(low_scale_config)
- self.low_scale_key = low_scale_key
-
- def instantiate_low_stage(self, config):
- model = instantiate_from_config(config)
- self.low_scale_model = model.eval()
- self.low_scale_model.train = disabled_train
- for param in self.low_scale_model.parameters():
- param.requires_grad = False
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
- # note: restricted to non-trainable encoders currently
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
-
- assert exists(self.concat_keys)
- assert len(self.concat_keys) == 1
- # optionally make spatial noise_level here
- c_cat = list()
- noise_level = None
- for ck in self.concat_keys:
- cc = batch[ck]
- cc = rearrange(cc, 'b h w c -> b c h w')
- if exists(self.reshuffle_patch_size):
- assert isinstance(self.reshuffle_patch_size, int)
- cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
- p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
- if bs is not None:
- cc = cc[:bs]
- cc = cc.to(self.device)
- if exists(self.low_scale_model) and ck == self.low_scale_key:
- cc, noise_level = self.low_scale_model(cc)
- c_cat.append(cc)
- c_cat = torch.cat(c_cat, dim=1)
- if exists(noise_level):
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
- else:
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
- if return_first_stage_outputs:
- return z, all_conds, x, xrec, xc
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, *args, **kwargs):
- log = super().log_images(*args, **kwargs)
- log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
- return log
diff --git a/spaces/k1ngtai/MMS/vits/data_utils.py b/spaces/k1ngtai/MMS/vits/data_utils.py
deleted file mode 100644
index 4855699d23d5dee36d4a12e875c7465265caac0f..0000000000000000000000000000000000000000
--- a/spaces/k1ngtai/MMS/vits/data_utils.py
+++ /dev/null
@@ -1,392 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-
-import commons
-from mel_processing import spectrogram_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import text_to_sequence, cleaned_text_to_sequence
-
-
-class TextAudioLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
- def __init__(self, audiopaths_and_text, hparams):
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
- self.text_cleaners = hparams.text_cleaners
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 190)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_and_text)
- self._filter()
-
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_and_text_new = []
- lengths = []
- for audiopath, text in self.audiopaths_and_text:
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
- audiopaths_and_text_new.append([audiopath, text])
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- self.audiopaths_and_text = audiopaths_and_text_new
- self.lengths = lengths
-
- def get_audio_text_pair(self, audiopath_and_text):
- # separate filename and text
- audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
- text = self.get_text(text)
- spec, wav = self.get_audio(audiopath)
- return (text, spec, wav)
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} {} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text):
- if self.cleaned_text:
- text_norm = cleaned_text_to_sequence(text)
- else:
- text_norm = text_to_sequence(text, self.text_cleaners)
- if self.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = torch.LongTensor(text_norm)
- return text_norm
-
- def __getitem__(self, index):
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
-
- def __len__(self):
- return len(self.audiopaths_and_text)
-
-
-class TextAudioCollate():
- """ Zero-pads model inputs and targets
- """
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text and aduio
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]),
- dim=0, descending=True)
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
-
- text_padded = torch.LongTensor(len(batch), max_text_len)
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, :text.size(0)] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, :spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, :wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- if self.return_ids:
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
-
-
-"""Multi speaker version"""
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
- def __init__(self, audiopaths_sid_text, hparams):
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
- self.text_cleaners = hparams.text_cleaners
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 190)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_sid_text)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_sid_text_new = []
- lengths = []
- for audiopath, sid, text in self.audiopaths_sid_text:
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
- audiopaths_sid_text_new.append([audiopath, sid, text])
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- self.audiopaths_sid_text = audiopaths_sid_text_new
- self.lengths = lengths
-
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
- # separate filename, speaker_id and text
- audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
- text = self.get_text(text)
- spec, wav = self.get_audio(audiopath)
- sid = self.get_sid(sid)
- return (text, spec, wav, sid)
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} {} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text):
- if self.cleaned_text:
- text_norm = cleaned_text_to_sequence(text)
- else:
- text_norm = text_to_sequence(text, self.text_cleaners)
- if self.add_blank:
- text_norm = commons.intersperse(text_norm, 0)
- text_norm = torch.LongTensor(text_norm)
- return text_norm
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def __getitem__(self, index):
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
- def __len__(self):
- return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate():
- """ Zero-pads model inputs and targets
- """
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text, audio and speaker identities
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]),
- dim=0, descending=True)
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
- sid = torch.LongTensor(len(batch))
-
- text_padded = torch.LongTensor(len(batch), max_text_len)
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, :text.size(0)] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, :spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, :wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- sid[i] = row[3]
-
- if self.return_ids:
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
- """
- Maintain similar input lengths in a batch.
- Length groups are specified by boundaries.
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
- It removes samples which are not included in the boundaries.
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
- """
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- self.lengths = dataset.lengths
- self.batch_size = batch_size
- self.boundaries = boundaries
-
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
- self.total_size = sum(self.num_samples_per_bucket)
- self.num_samples = self.total_size // self.num_replicas
-
- def _create_buckets(self):
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
- for i in range(len(self.lengths)):
- length = self.lengths[i]
- idx_bucket = self._bisect(length)
- if idx_bucket != -1:
- buckets[idx_bucket].append(i)
-
- for i in range(len(buckets) - 1, 0, -1):
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i+1)
-
- num_samples_per_bucket = []
- for i in range(len(buckets)):
- len_bucket = len(buckets[i])
- total_batch_size = self.num_replicas * self.batch_size
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
- num_samples_per_bucket.append(len_bucket + rem)
- return buckets, num_samples_per_bucket
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
-
- indices = []
- if self.shuffle:
- for bucket in self.buckets:
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
- else:
- for bucket in self.buckets:
- indices.append(list(range(len(bucket))))
-
- batches = []
- for i in range(len(self.buckets)):
- bucket = self.buckets[i]
- len_bucket = len(bucket)
- ids_bucket = indices[i]
- num_samples_bucket = self.num_samples_per_bucket[i]
-
- # add extra samples to make it evenly divisible
- rem = num_samples_bucket - len_bucket
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
-
- # subsample
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
-
- # batching
- for j in range(len(ids_bucket) // self.batch_size):
- batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]]
- batches.append(batch)
-
- if self.shuffle:
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
- batches = [batches[i] for i in batch_ids]
- self.batches = batches
-
- assert len(self.batches) * self.batch_size == self.num_samples
- return iter(self.batches)
-
- def _bisect(self, x, lo=0, hi=None):
- if hi is None:
- hi = len(self.boundaries) - 1
-
- if hi > lo:
- mid = (hi + lo) // 2
- if self.boundaries[mid] < x and x <= self.boundaries[mid+1]:
- return mid
- elif x <= self.boundaries[mid]:
- return self._bisect(x, lo, mid)
- else:
- return self._bisect(x, mid + 1, hi)
- else:
- return -1
-
- def __len__(self):
- return self.num_samples // self.batch_size
diff --git a/spaces/kTonpa/Text2Cryptopunks/text2punks/tokenizer.py b/spaces/kTonpa/Text2Cryptopunks/text2punks/tokenizer.py
deleted file mode 100644
index 530ef297d43e056f41877b53da8db8028405a292..0000000000000000000000000000000000000000
--- a/spaces/kTonpa/Text2Cryptopunks/text2punks/tokenizer.py
+++ /dev/null
@@ -1,233 +0,0 @@
-import os
-import html
-import ftfy
-import regex as re
-from pathlib import Path
-
-import torch
-
-from functools import lru_cache
-
-import youtokentome as yttm
-from tokenizers import Tokenizer
-from tokenizers.processors import ByteLevel
-
-
-# OpenAI simple tokenizer
-
-@lru_cache()
-def default_bpe(bpe_path = "data/bpe_simple_vocab_16e6.txt"):
- return os.path.join(os.path.dirname(os.path.abspath(__file__)), bpe_path)
-
-@lru_cache()
-def bytes_to_unicode():
- bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
- cs = bs[:]
- n = 0
- for b in range(2 ** 8):
- if b not in bs:
- bs.append(b)
- cs.append(2 ** 8 + n)
- n += 1
- cs = [chr(n) for n in cs]
- return dict(zip(bs, cs))
-
-def get_pairs(word):
- pairs = set()
- prev_char = word[0]
- for char in word[1:]:
- pairs.add((prev_char, char))
- prev_char = char
- return pairs
-
-def basic_clean(text):
- text = ftfy.fix_text(text)
- text = html.unescape(html.unescape(text))
- return text.strip()
-
-def whitespace_clean(text):
- text = re.sub(r'\s+', ' ', text)
- text = text.strip()
- return text
-
-
-class SimpleTokenizer(object):
- def __init__(self, bpe_path = default_bpe()):
- self.byte_encoder = bytes_to_unicode()
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
- merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
- merges = merges[1:49152 - 256 - 2 + 1]
- merges = [tuple(merge.split()) for merge in merges]
- vocab = list(bytes_to_unicode().values())
- vocab = vocab + [v + '' for v in vocab]
- for merge in merges:
- vocab.append(''.join(merge))
- vocab.extend(['<|startoftext|>', '<|endoftext|>'])
-
- self.vocab_size = 49408
-
- self.encoder = dict(zip(vocab, range(len(vocab))))
- self.decoder = {v: k for k, v in self.encoder.items()}
- self.bpe_ranks = dict(zip(merges, range(len(merges))))
- self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
- self.pat = re.compile(
- r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
- re.IGNORECASE)
-
- def bpe(self, token):
- if token in self.cache:
- return self.cache[token]
- word = tuple(token[:-1]) + (token[-1] + '',)
- pairs = get_pairs(word)
-
- if not pairs:
- return token + ''
-
- while True:
- bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
- if bigram not in self.bpe_ranks:
- break
- first, second = bigram
- new_word = []
- i = 0
- while i < len(word):
- try:
- j = word.index(first, i)
- new_word.extend(word[i:j])
- i = j
- except:
- new_word.extend(word[i:])
- break
-
- if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
- new_word.append(first + second)
- i += 2
- else:
- new_word.append(word[i])
- i += 1
- new_word = tuple(new_word)
- word = new_word
- if len(word) == 1:
- break
- else:
- pairs = get_pairs(word)
- word = ' '.join(word)
- self.cache[token] = word
- return word
-
- def encode(self, text):
- bpe_tokens = []
- text = whitespace_clean(basic_clean(text)).lower()
- for token in re.findall(self.pat, text):
- token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
- bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
- return bpe_tokens
-
- def decode(self, tokens, remove_start_end = True):
- if torch.is_tensor(tokens):
- tokens = tokens.tolist()
-
- if remove_start_end:
- tokens = [token for token in tokens if token not in (49406, 40407, 0)]
- text = ''.join([self.decoder[token] for token in tokens])
- text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ')
- return text
-
- def tokenize(self, texts, context_length = 256, truncate_text = False):
- if isinstance(texts, str):
- texts = [texts]
-
- all_tokens = [self.encode(text) for text in texts]
- result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
-
- for i, tokens in enumerate(all_tokens):
- if len(tokens) > context_length:
- if truncate_text:
- tokens = tokens[:context_length]
- else:
- raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
- result[i, :len(tokens)] = torch.tensor(tokens)
-
- return result
-
-# txt_tokenizer = SimpleTokenizer()
-
-# huggingface tokenizer
-
-class HugTokenizer:
- def __init__(self, bpe_path):
- bpe_path = Path(default_bpe(bpe_path = bpe_path))
- assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
- tokenizer = Tokenizer.from_file(str(bpe_path))
- tokenizer.post_processor = ByteLevel(trim_offsets = True)
- self.tokenizer = tokenizer
- self.vocab_size = tokenizer.get_vocab_size()
-
- def decode(self, tokens):
- if torch.is_tensor(tokens):
- tokens = tokens.tolist()
-
- tokens = [token for token in tokens if token not in (0,)]
- return self.tokenizer.decode(tokens, skip_special_tokens = True)
-
- def encode(self, text):
- return self.tokenizer.encode(text).ids
-
- def tokenize(self, texts, context_length = 256, truncate_text = False):
- if isinstance(texts, str):
- texts = [texts]
-
- all_tokens = [self.encode(text) for text in texts]
-
- result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
- for i, tokens in enumerate(all_tokens):
- if len(tokens) > context_length:
- if truncate_text:
- tokens = tokens[:context_length]
- else:
- raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
- result[i, :len(tokens)] = torch.tensor(tokens)
-
- return result
-
-txt_tokenizer = HugTokenizer(bpe_path = "data/byte-level-bpe_4k.tokenizer.json")
-
-# yttm tokenizer
-
-class YttmTokenizer:
- def __init__(self, bpe_path = None):
- bpe_path = Path(default_bpe(bpe_path = bpe_path))
- assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
-
- tokenizer = yttm.BPE(model = str(bpe_path))
- self.tokenizer = tokenizer
- self.vocab_size = tokenizer.vocab_size()
-
- def decode(self, tokens):
- if torch.is_tensor(tokens):
- tokens = tokens.tolist()
-
- return self.tokenizer.decode(tokens, ignore_ids = [0])
-
- def encode(self, texts):
- encoded = self.tokenizer.encode(texts, output_type = yttm.OutputType.ID)
- return list(map(torch.tensor, encoded))
-
- def tokenize(self, texts, context_length = 256, truncate_text = False):
- if isinstance(texts, str):
- texts = [texts]
-
- all_tokens = self.encode(texts)
-
- result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
- for i, tokens in enumerate(all_tokens):
- if len(tokens) > context_length:
- if truncate_text:
- tokens = tokens[:context_length]
- else:
- raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
- result[i, :len(tokens)] = tokens.detach().clone()
-
- return result
-
-# txt_tokenizer = YttmTokenizer(bpe_path = "data/byte-level-bpe.tokenizer.json")
diff --git a/spaces/kadirnar/yolor/yolor/utils/datasets.py b/spaces/kadirnar/yolor/yolor/utils/datasets.py
deleted file mode 100644
index c261cce5e8afd4b7742a220615de4ac0f47c224e..0000000000000000000000000000000000000000
--- a/spaces/kadirnar/yolor/yolor/utils/datasets.py
+++ /dev/null
@@ -1,1292 +0,0 @@
-# Dataset utils and dataloaders
-
-import glob
-import math
-import os
-import random
-import shutil
-import time
-from itertools import repeat
-from multiprocessing.pool import ThreadPool
-from pathlib import Path
-from threading import Thread
-
-import cv2
-import numpy as np
-import torch
-from PIL import Image, ExifTags
-from torch.utils.data import Dataset
-from tqdm import tqdm
-
-from yolor.utils.general import xyxy2xywh, xywh2xyxy
-from yolor.utils.torch_utils import torch_distributed_zero_first
-
-# Parameters
-help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
-img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
-vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
-
-# Get orientation exif tag
-for orientation in ExifTags.TAGS.keys():
- if ExifTags.TAGS[orientation] == 'Orientation':
- break
-
-
-def get_hash(files):
- # Returns a single hash value of a list of files
- return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
-
-
-def exif_size(img):
- # Returns exif-corrected PIL size
- s = img.size # (width, height)
- try:
- rotation = dict(img._getexif().items())[orientation]
- if rotation == 6: # rotation 270
- s = (s[1], s[0])
- elif rotation == 8: # rotation 90
- s = (s[1], s[0])
- except:
- pass
-
- return s
-
-
-def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
- rank=-1, world_size=1, workers=8):
- # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
- with torch_distributed_zero_first(rank):
- dataset = LoadImagesAndLabels(path, imgsz, batch_size,
- augment=augment, # augment images
- hyp=hyp, # augmentation hyperparameters
- rect=rect, # rectangular training
- cache_images=cache,
- single_cls=opt.single_cls,
- stride=int(stride),
- pad=pad,
- rank=rank)
-
- batch_size = min(batch_size, len(dataset))
- nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
- sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
- dataloader = InfiniteDataLoader(dataset,
- batch_size=batch_size,
- num_workers=nw,
- sampler=sampler,
- pin_memory=True,
- collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
- return dataloader, dataset
-
-
-def create_dataloader9(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
- rank=-1, world_size=1, workers=8):
- # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
- with torch_distributed_zero_first(rank):
- dataset = LoadImagesAndLabels9(path, imgsz, batch_size,
- augment=augment, # augment images
- hyp=hyp, # augmentation hyperparameters
- rect=rect, # rectangular training
- cache_images=cache,
- single_cls=opt.single_cls,
- stride=int(stride),
- pad=pad,
- rank=rank)
-
- batch_size = min(batch_size, len(dataset))
- nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
- sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
- dataloader = InfiniteDataLoader(dataset,
- batch_size=batch_size,
- num_workers=nw,
- sampler=sampler,
- pin_memory=True,
- collate_fn=LoadImagesAndLabels9.collate_fn) # torch.utils.data.DataLoader()
- return dataloader, dataset
-
-
-class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
- """ Dataloader that reuses workers
-
- Uses same syntax as vanilla DataLoader
- """
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
- self.iterator = super().__iter__()
-
- def __len__(self):
- return len(self.batch_sampler.sampler)
-
- def __iter__(self):
- for i in range(len(self)):
- yield next(self.iterator)
-
-
-class _RepeatSampler(object):
- """ Sampler that repeats forever
-
- Args:
- sampler (Sampler)
- """
-
- def __init__(self, sampler):
- self.sampler = sampler
-
- def __iter__(self):
- while True:
- yield from iter(self.sampler)
-
-
-class LoadImages: # for inference
- def __init__(self, path, img_size=640, auto_size=32):
- p = str(Path(path)) # os-agnostic
- p = os.path.abspath(p) # absolute path
- if '*' in p:
- files = sorted(glob.glob(p, recursive=True)) # glob
- elif os.path.isdir(p):
- files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
- elif os.path.isfile(p):
- files = [p] # files
- else:
- raise Exception('ERROR: %s does not exist' % p)
-
- images = [x for x in files if x.split('.')[-1].lower() in img_formats]
- videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
- ni, nv = len(images), len(videos)
-
- self.img_size = img_size
- self.auto_size = auto_size
- self.files = images + videos
- self.nf = ni + nv # number of files
- self.video_flag = [False] * ni + [True] * nv
- self.mode = 'images'
- if any(videos):
- self.new_video(videos[0]) # new video
- else:
- self.cap = None
- assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
- (p, img_formats, vid_formats)
-
- def __iter__(self):
- self.count = 0
- return self
-
- def __next__(self):
- if self.count == self.nf:
- raise StopIteration
- path = self.files[self.count]
-
- if self.video_flag[self.count]:
- # Read video
- self.mode = 'video'
- ret_val, img0 = self.cap.read()
- if not ret_val:
- self.count += 1
- self.cap.release()
- if self.count == self.nf: # last video
- raise StopIteration
- else:
- path = self.files[self.count]
- self.new_video(path)
- ret_val, img0 = self.cap.read()
-
- self.frame += 1
- print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
-
- else:
- # Read image
- self.count += 1
- img0 = cv2.imread(path) # BGR
- assert img0 is not None, 'Image Not Found ' + path
- print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
-
- # Padded resize
- img = letterbox(img0, new_shape=self.img_size, auto_size=self.auto_size)[0]
-
- # Convert
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
- img = np.ascontiguousarray(img)
-
- return path, img, img0, self.cap
-
- def new_video(self, path):
- self.frame = 0
- self.cap = cv2.VideoCapture(path)
- self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
-
- def __len__(self):
- return self.nf # number of files
-
-
-class LoadWebcam: # for inference
- def __init__(self, pipe='0', img_size=640):
- self.img_size = img_size
-
- if pipe.isnumeric():
- pipe = eval(pipe) # local camera
- # pipe = 'rtsp://192.168.1.64/1' # IP camera
- # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
- # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
-
- self.pipe = pipe
- self.cap = cv2.VideoCapture(pipe) # video capture object
- self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
-
- def __iter__(self):
- self.count = -1
- return self
-
- def __next__(self):
- self.count += 1
- if cv2.waitKey(1) == ord('q'): # q to quit
- self.cap.release()
- cv2.destroyAllWindows()
- raise StopIteration
-
- # Read frame
- if self.pipe == 0: # local camera
- ret_val, img0 = self.cap.read()
- img0 = cv2.flip(img0, 1) # flip left-right
- else: # IP camera
- n = 0
- while True:
- n += 1
- self.cap.grab()
- if n % 30 == 0: # skip frames
- ret_val, img0 = self.cap.retrieve()
- if ret_val:
- break
-
- # Print
- assert ret_val, 'Camera Error %s' % self.pipe
- img_path = 'webcam.jpg'
- print('webcam %g: ' % self.count, end='')
-
- # Padded resize
- img = letterbox(img0, new_shape=self.img_size)[0]
-
- # Convert
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
- img = np.ascontiguousarray(img)
-
- return img_path, img, img0, None
-
- def __len__(self):
- return 0
-
-
-class LoadStreams: # multiple IP or RTSP cameras
- def __init__(self, sources='streams.txt', img_size=640):
- self.mode = 'images'
- self.img_size = img_size
-
- if os.path.isfile(sources):
- with open(sources, 'r') as f:
- sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
- else:
- sources = [sources]
-
- n = len(sources)
- self.imgs = [None] * n
- self.sources = sources
- for i, s in enumerate(sources):
- # Start the thread to read frames from the video stream
- print('%g/%g: %s... ' % (i + 1, n, s), end='')
- cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
- assert cap.isOpened(), 'Failed to open %s' % s
- w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- fps = cap.get(cv2.CAP_PROP_FPS) % 100
- _, self.imgs[i] = cap.read() # guarantee first frame
- thread = Thread(target=self.update, args=([i, cap]), daemon=True)
- print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
- thread.start()
- print('') # newline
-
- # check for common shapes
- s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
- self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
- if not self.rect:
- print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
-
- def update(self, index, cap):
- # Read next stream frame in a daemon thread
- n = 0
- while cap.isOpened():
- n += 1
- # _, self.imgs[index] = cap.read()
- cap.grab()
- if n == 4: # read every 4th frame
- _, self.imgs[index] = cap.retrieve()
- n = 0
- time.sleep(0.01) # wait time
-
- def __iter__(self):
- self.count = -1
- return self
-
- def __next__(self):
- self.count += 1
- img0 = self.imgs.copy()
- if cv2.waitKey(1) == ord('q'): # q to quit
- cv2.destroyAllWindows()
- raise StopIteration
-
- # Letterbox
- img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
-
- # Stack
- img = np.stack(img, 0)
-
- # Convert
- img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
- img = np.ascontiguousarray(img)
-
- return self.sources, img, img0, None
-
- def __len__(self):
- return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
-
-
-class LoadImagesAndLabels(Dataset): # for training/testing
- def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
- cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
- self.img_size = img_size
- self.augment = augment
- self.hyp = hyp
- self.image_weights = image_weights
- self.rect = False if image_weights else rect
- self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
- self.mosaic_border = [-img_size // 2, -img_size // 2]
- self.stride = stride
-
- def img2label_paths(img_paths):
- # Define label paths as a function of image paths
- sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
- return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
-
- try:
- f = [] # image files
- for p in path if isinstance(path, list) else [path]:
- p = Path(p) # os-agnostic
- if p.is_dir(): # dir
- f += glob.glob(str(p / '**' / '*.*'), recursive=True)
- elif p.is_file(): # file
- with open(p, 'r') as t:
- t = t.read().splitlines()
- parent = str(p.parent) + os.sep
- f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
- else:
- raise Exception('%s does not exist' % p)
- self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
- assert self.img_files, 'No images found'
- except Exception as e:
- raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
-
- # Check cache
- self.label_files = img2label_paths(self.img_files) # labels
- cache_path = str(Path(self.label_files[0]).parent) + '.cache3' # cached labels
- if os.path.isfile(cache_path):
- cache = torch.load(cache_path) # load
- if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
- cache = self.cache_labels(cache_path) # re-cache
- else:
- cache = self.cache_labels(cache_path) # cache
-
- # Read cache
- cache.pop('hash') # remove hash
- labels, shapes = zip(*cache.values())
- self.labels = list(labels)
- self.shapes = np.array(shapes, dtype=np.float64)
- self.img_files = list(cache.keys()) # update
- self.label_files = img2label_paths(cache.keys()) # update
-
- n = len(shapes) # number of images
- bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
- nb = bi[-1] + 1 # number of batches
- self.batch = bi # batch index of image
- self.n = n
-
- # Rectangular Training
- if self.rect:
- # Sort by aspect ratio
- s = self.shapes # wh
- ar = s[:, 1] / s[:, 0] # aspect ratio
- irect = ar.argsort()
- self.img_files = [self.img_files[i] for i in irect]
- self.label_files = [self.label_files[i] for i in irect]
- self.labels = [self.labels[i] for i in irect]
- self.shapes = s[irect] # wh
- ar = ar[irect]
-
- # Set training image shapes
- shapes = [[1, 1]] * nb
- for i in range(nb):
- ari = ar[bi == i]
- mini, maxi = ari.min(), ari.max()
- if maxi < 1:
- shapes[i] = [maxi, 1]
- elif mini > 1:
- shapes[i] = [1, 1 / mini]
-
- self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
-
- # Check labels
- create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
- nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
- pbar = enumerate(self.label_files)
- if rank in [-1, 0]:
- pbar = tqdm(pbar)
- for i, file in pbar:
- l = self.labels[i] # label
- if l is not None and l.shape[0]:
- assert l.shape[1] == 5, '> 5 label columns: %s' % file
- assert (l >= 0).all(), 'negative labels: %s' % file
- assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
- if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
- nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
- if single_cls:
- l[:, 0] = 0 # force dataset into single-class mode
- self.labels[i] = l
- nf += 1 # file found
-
- # Create subdataset (a smaller dataset)
- if create_datasubset and ns < 1E4:
- if ns == 0:
- create_folder(path='./datasubset')
- os.makedirs('./datasubset/images')
- exclude_classes = 43
- if exclude_classes not in l[:, 0]:
- ns += 1
- # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
- with open('./datasubset/images.txt', 'a') as f:
- f.write(self.img_files[i] + '\n')
-
- # Extract object detection boxes for a second stage classifier
- if extract_bounding_boxes:
- p = Path(self.img_files[i])
- img = cv2.imread(str(p))
- h, w = img.shape[:2]
- for j, x in enumerate(l):
- f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
- if not os.path.exists(Path(f).parent):
- os.makedirs(Path(f).parent) # make new output folder
-
- b = x[1:] * [w, h, w, h] # box
- b[2:] = b[2:].max() # rectangle to square
- b[2:] = b[2:] * 1.3 + 30 # pad
- b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
-
- b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
- b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
- assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
- else:
- ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
- # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
-
- if rank in [-1, 0]:
- pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
- cache_path, nf, nm, ne, nd, n)
- if nf == 0:
- s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
- print(s)
- assert not augment, '%s. Can not train without labels.' % s
-
- # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
- self.imgs = [None] * n
- if cache_images:
- gb = 0 # Gigabytes of cached images
- self.img_hw0, self.img_hw = [None] * n, [None] * n
- results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
- pbar = tqdm(enumerate(results), total=n)
- for i, x in pbar:
- self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
- gb += self.imgs[i].nbytes
- pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
-
- def cache_labels(self, path='labels.cache3'):
- # Cache dataset labels, check images and read shapes
- x = {} # dict
- pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
- for (img, label) in pbar:
- try:
- l = []
- im = Image.open(img)
- im.verify() # PIL verify
- shape = exif_size(im) # image size
- assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
- if os.path.isfile(label):
- with open(label, 'r') as f:
- l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
- if len(l) == 0:
- l = np.zeros((0, 5), dtype=np.float32)
- x[img] = [l, shape]
- except Exception as e:
- print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
-
- x['hash'] = get_hash(self.label_files + self.img_files)
- torch.save(x, path) # save for next time
- return x
-
- def __len__(self):
- return len(self.img_files)
-
- # def __iter__(self):
- # self.count = -1
- # print('ran dataset iter')
- # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
- # return self
-
- def __getitem__(self, index):
- if self.image_weights:
- index = self.indices[index]
-
- hyp = self.hyp
- mosaic = self.mosaic and random.random() < hyp['mosaic']
- if mosaic:
- # Load mosaic
- img, labels = load_mosaic(self, index)
- #img, labels = load_mosaic9(self, index)
- shapes = None
-
- # MixUp https://arxiv.org/pdf/1710.09412.pdf
- if random.random() < hyp['mixup']:
- img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
- #img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
- r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
- img = (img * r + img2 * (1 - r)).astype(np.uint8)
- labels = np.concatenate((labels, labels2), 0)
-
- else:
- # Load image
- img, (h0, w0), (h, w) = load_image(self, index)
-
- # Letterbox
- shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
- img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
- shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
-
- # Load labels
- labels = []
- x = self.labels[index]
- if x.size > 0:
- # Normalized xywh to pixel xyxy format
- labels = x.copy()
- labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
- labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
- labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
- labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
-
- if self.augment:
- # Augment imagespace
- if not mosaic:
- img, labels = random_perspective(img, labels,
- degrees=hyp['degrees'],
- translate=hyp['translate'],
- scale=hyp['scale'],
- shear=hyp['shear'],
- perspective=hyp['perspective'])
-
- # Augment colorspace
- augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
-
- # Apply cutouts
- # if random.random() < 0.9:
- # labels = cutout(img, labels)
-
- nL = len(labels) # number of labels
- if nL:
- labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
- labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
- labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
-
- if self.augment:
- # flip up-down
- if random.random() < hyp['flipud']:
- img = np.flipud(img)
- if nL:
- labels[:, 2] = 1 - labels[:, 2]
-
- # flip left-right
- if random.random() < hyp['fliplr']:
- img = np.fliplr(img)
- if nL:
- labels[:, 1] = 1 - labels[:, 1]
-
- labels_out = torch.zeros((nL, 6))
- if nL:
- labels_out[:, 1:] = torch.from_numpy(labels)
-
- # Convert
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
- img = np.ascontiguousarray(img)
-
- return torch.from_numpy(img), labels_out, self.img_files[index], shapes
-
- @staticmethod
- def collate_fn(batch):
- img, label, path, shapes = zip(*batch) # transposed
- for i, l in enumerate(label):
- l[:, 0] = i # add target image index for build_targets()
- return torch.stack(img, 0), torch.cat(label, 0), path, shapes
-
-
-class LoadImagesAndLabels9(Dataset): # for training/testing
- def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
- cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
- self.img_size = img_size
- self.augment = augment
- self.hyp = hyp
- self.image_weights = image_weights
- self.rect = False if image_weights else rect
- self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
- self.mosaic_border = [-img_size // 2, -img_size // 2]
- self.stride = stride
-
- def img2label_paths(img_paths):
- # Define label paths as a function of image paths
- sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
- return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
-
- try:
- f = [] # image files
- for p in path if isinstance(path, list) else [path]:
- p = Path(p) # os-agnostic
- if p.is_dir(): # dir
- f += glob.glob(str(p / '**' / '*.*'), recursive=True)
- elif p.is_file(): # file
- with open(p, 'r') as t:
- t = t.read().splitlines()
- parent = str(p.parent) + os.sep
- f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
- else:
- raise Exception('%s does not exist' % p)
- self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
- assert self.img_files, 'No images found'
- except Exception as e:
- raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
-
- # Check cache
- self.label_files = img2label_paths(self.img_files) # labels
- cache_path = str(Path(self.label_files[0]).parent) + '.cache3' # cached labels
- if os.path.isfile(cache_path):
- cache = torch.load(cache_path) # load
- if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
- cache = self.cache_labels(cache_path) # re-cache
- else:
- cache = self.cache_labels(cache_path) # cache
-
- # Read cache
- cache.pop('hash') # remove hash
- labels, shapes = zip(*cache.values())
- self.labels = list(labels)
- self.shapes = np.array(shapes, dtype=np.float64)
- self.img_files = list(cache.keys()) # update
- self.label_files = img2label_paths(cache.keys()) # update
-
- n = len(shapes) # number of images
- bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
- nb = bi[-1] + 1 # number of batches
- self.batch = bi # batch index of image
- self.n = n
-
- # Rectangular Training
- if self.rect:
- # Sort by aspect ratio
- s = self.shapes # wh
- ar = s[:, 1] / s[:, 0] # aspect ratio
- irect = ar.argsort()
- self.img_files = [self.img_files[i] for i in irect]
- self.label_files = [self.label_files[i] for i in irect]
- self.labels = [self.labels[i] for i in irect]
- self.shapes = s[irect] # wh
- ar = ar[irect]
-
- # Set training image shapes
- shapes = [[1, 1]] * nb
- for i in range(nb):
- ari = ar[bi == i]
- mini, maxi = ari.min(), ari.max()
- if maxi < 1:
- shapes[i] = [maxi, 1]
- elif mini > 1:
- shapes[i] = [1, 1 / mini]
-
- self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
-
- # Check labels
- create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
- nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
- pbar = enumerate(self.label_files)
- if rank in [-1, 0]:
- pbar = tqdm(pbar)
- for i, file in pbar:
- l = self.labels[i] # label
- if l is not None and l.shape[0]:
- assert l.shape[1] == 5, '> 5 label columns: %s' % file
- assert (l >= 0).all(), 'negative labels: %s' % file
- assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
- if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
- nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
- if single_cls:
- l[:, 0] = 0 # force dataset into single-class mode
- self.labels[i] = l
- nf += 1 # file found
-
- # Create subdataset (a smaller dataset)
- if create_datasubset and ns < 1E4:
- if ns == 0:
- create_folder(path='./datasubset')
- os.makedirs('./datasubset/images')
- exclude_classes = 43
- if exclude_classes not in l[:, 0]:
- ns += 1
- # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
- with open('./datasubset/images.txt', 'a') as f:
- f.write(self.img_files[i] + '\n')
-
- # Extract object detection boxes for a second stage classifier
- if extract_bounding_boxes:
- p = Path(self.img_files[i])
- img = cv2.imread(str(p))
- h, w = img.shape[:2]
- for j, x in enumerate(l):
- f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
- if not os.path.exists(Path(f).parent):
- os.makedirs(Path(f).parent) # make new output folder
-
- b = x[1:] * [w, h, w, h] # box
- b[2:] = b[2:].max() # rectangle to square
- b[2:] = b[2:] * 1.3 + 30 # pad
- b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
-
- b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
- b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
- assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
- else:
- ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
- # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
-
- if rank in [-1, 0]:
- pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
- cache_path, nf, nm, ne, nd, n)
- if nf == 0:
- s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
- print(s)
- assert not augment, '%s. Can not train without labels.' % s
-
- # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
- self.imgs = [None] * n
- if cache_images:
- gb = 0 # Gigabytes of cached images
- self.img_hw0, self.img_hw = [None] * n, [None] * n
- results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
- pbar = tqdm(enumerate(results), total=n)
- for i, x in pbar:
- self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
- gb += self.imgs[i].nbytes
- pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
-
- def cache_labels(self, path='labels.cache3'):
- # Cache dataset labels, check images and read shapes
- x = {} # dict
- pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
- for (img, label) in pbar:
- try:
- l = []
- im = Image.open(img)
- im.verify() # PIL verify
- shape = exif_size(im) # image size
- assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
- if os.path.isfile(label):
- with open(label, 'r') as f:
- l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
- if len(l) == 0:
- l = np.zeros((0, 5), dtype=np.float32)
- x[img] = [l, shape]
- except Exception as e:
- print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
-
- x['hash'] = get_hash(self.label_files + self.img_files)
- torch.save(x, path) # save for next time
- return x
-
- def __len__(self):
- return len(self.img_files)
-
- # def __iter__(self):
- # self.count = -1
- # print('ran dataset iter')
- # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
- # return self
-
- def __getitem__(self, index):
- if self.image_weights:
- index = self.indices[index]
-
- hyp = self.hyp
- mosaic = self.mosaic and random.random() < hyp['mosaic']
- if mosaic:
- # Load mosaic
- #img, labels = load_mosaic(self, index)
- img, labels = load_mosaic9(self, index)
- shapes = None
-
- # MixUp https://arxiv.org/pdf/1710.09412.pdf
- if random.random() < hyp['mixup']:
- #img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
- img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
- r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
- img = (img * r + img2 * (1 - r)).astype(np.uint8)
- labels = np.concatenate((labels, labels2), 0)
-
- else:
- # Load image
- img, (h0, w0), (h, w) = load_image(self, index)
-
- # Letterbox
- shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
- img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
- shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
-
- # Load labels
- labels = []
- x = self.labels[index]
- if x.size > 0:
- # Normalized xywh to pixel xyxy format
- labels = x.copy()
- labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
- labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
- labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
- labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
-
- if self.augment:
- # Augment imagespace
- if not mosaic:
- img, labels = random_perspective(img, labels,
- degrees=hyp['degrees'],
- translate=hyp['translate'],
- scale=hyp['scale'],
- shear=hyp['shear'],
- perspective=hyp['perspective'])
-
- # Augment colorspace
- augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
-
- # Apply cutouts
- # if random.random() < 0.9:
- # labels = cutout(img, labels)
-
- nL = len(labels) # number of labels
- if nL:
- labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
- labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
- labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
-
- if self.augment:
- # flip up-down
- if random.random() < hyp['flipud']:
- img = np.flipud(img)
- if nL:
- labels[:, 2] = 1 - labels[:, 2]
-
- # flip left-right
- if random.random() < hyp['fliplr']:
- img = np.fliplr(img)
- if nL:
- labels[:, 1] = 1 - labels[:, 1]
-
- labels_out = torch.zeros((nL, 6))
- if nL:
- labels_out[:, 1:] = torch.from_numpy(labels)
-
- # Convert
- img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
- img = np.ascontiguousarray(img)
-
- return torch.from_numpy(img), labels_out, self.img_files[index], shapes
-
- @staticmethod
- def collate_fn(batch):
- img, label, path, shapes = zip(*batch) # transposed
- for i, l in enumerate(label):
- l[:, 0] = i # add target image index for build_targets()
- return torch.stack(img, 0), torch.cat(label, 0), path, shapes
-
-
-# Ancillary functions --------------------------------------------------------------------------------------------------
-def load_image(self, index):
- # loads 1 image from dataset, returns img, original hw, resized hw
- img = self.imgs[index]
- if img is None: # not cached
- path = self.img_files[index]
- img = cv2.imread(path) # BGR
- assert img is not None, 'Image Not Found ' + path
- h0, w0 = img.shape[:2] # orig hw
- r = self.img_size / max(h0, w0) # resize image to img_size
- if r != 1: # always resize down, only resize up if training with augmentation
- interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
- img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
- return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
- else:
- return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
-
-
-def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
- r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
- hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
- dtype = img.dtype # uint8
-
- x = np.arange(0, 256, dtype=np.int16)
- lut_hue = ((x * r[0]) % 180).astype(dtype)
- lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
- lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
-
- img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
- cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
-
- # Histogram equalization
- # if random.random() < 0.2:
- # for i in range(3):
- # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
-
-
-def load_mosaic(self, index):
- # loads images in a mosaic
-
- labels4 = []
- s = self.img_size
- yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
- indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
- for i, index in enumerate(indices):
- # Load image
- img, _, (h, w) = load_image(self, index)
-
- # place img in img4
- if i == 0: # top left
- img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
- x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
- x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
- elif i == 1: # top right
- x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
- x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
- elif i == 2: # bottom left
- x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
- x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
- elif i == 3: # bottom right
- x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
- x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
-
- img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
- padw = x1a - x1b
- padh = y1a - y1b
-
- # Labels
- x = self.labels[index]
- labels = x.copy()
- if x.size > 0: # Normalized xywh to pixel xyxy format
- labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
- labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
- labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
- labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
- labels4.append(labels)
-
- # Concat/clip labels
- if len(labels4):
- labels4 = np.concatenate(labels4, 0)
- np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
- # img4, labels4 = replicate(img4, labels4) # replicate
-
- # Augment
- img4, labels4 = random_perspective(img4, labels4,
- degrees=self.hyp['degrees'],
- translate=self.hyp['translate'],
- scale=self.hyp['scale'],
- shear=self.hyp['shear'],
- perspective=self.hyp['perspective'],
- border=self.mosaic_border) # border to remove
-
- return img4, labels4
-
-
-def load_mosaic9(self, index):
- # loads images in a 9-mosaic
-
- labels9 = []
- s = self.img_size
- indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(8)] # 8 additional image indices
- for i, index in enumerate(indices):
- # Load image
- img, _, (h, w) = load_image(self, index)
-
- # place img in img9
- if i == 0: # center
- img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
- h0, w0 = h, w
- c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
- elif i == 1: # top
- c = s, s - h, s + w, s
- elif i == 2: # top right
- c = s + wp, s - h, s + wp + w, s
- elif i == 3: # right
- c = s + w0, s, s + w0 + w, s + h
- elif i == 4: # bottom right
- c = s + w0, s + hp, s + w0 + w, s + hp + h
- elif i == 5: # bottom
- c = s + w0 - w, s + h0, s + w0, s + h0 + h
- elif i == 6: # bottom left
- c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
- elif i == 7: # left
- c = s - w, s + h0 - h, s, s + h0
- elif i == 8: # top left
- c = s - w, s + h0 - hp - h, s, s + h0 - hp
-
- padx, pady = c[:2]
- x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
-
- # Labels
- x = self.labels[index]
- labels = x.copy()
- if x.size > 0: # Normalized xywh to pixel xyxy format
- labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
- labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
- labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
- labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
- labels9.append(labels)
-
- # Image
- img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
- hp, wp = h, w # height, width previous
-
- # Offset
- yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
- img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
-
- # Concat/clip labels
- if len(labels9):
- labels9 = np.concatenate(labels9, 0)
- labels9[:, [1, 3]] -= xc
- labels9[:, [2, 4]] -= yc
-
- np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
- # img9, labels9 = replicate(img9, labels9) # replicate
-
- # Augment
- img9, labels9 = random_perspective(img9, labels9,
- degrees=self.hyp['degrees'],
- translate=self.hyp['translate'],
- scale=self.hyp['scale'],
- shear=self.hyp['shear'],
- perspective=self.hyp['perspective'],
- border=self.mosaic_border) # border to remove
-
- return img9, labels9
-
-
-def replicate(img, labels):
- # Replicate labels
- h, w = img.shape[:2]
- boxes = labels[:, 1:].astype(int)
- x1, y1, x2, y2 = boxes.T
- s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
- for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
- x1b, y1b, x2b, y2b = boxes[i]
- bh, bw = y2b - y1b, x2b - x1b
- yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
- x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
- img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
- labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
-
- return img, labels
-
-
-def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, auto_size=32):
- # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
- shape = img.shape[:2] # current shape [height, width]
- if isinstance(new_shape, int):
- new_shape = (new_shape, new_shape)
-
- # Scale ratio (new / old)
- r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
- if not scaleup: # only scale down, do not scale up (for better test mAP)
- r = min(r, 1.0)
-
- # Compute padding
- ratio = r, r # width, height ratios
- new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
- if auto: # minimum rectangle
- dw, dh = np.mod(dw, auto_size), np.mod(dh, auto_size) # wh padding
- elif scaleFill: # stretch
- dw, dh = 0.0, 0.0
- new_unpad = (new_shape[1], new_shape[0])
- ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
-
- dw /= 2 # divide padding into 2 sides
- dh /= 2
-
- if shape[::-1] != new_unpad: # resize
- img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
- top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
- left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
- img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
- return img, ratio, (dw, dh)
-
-
-def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
- # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
- # targets = [cls, xyxy]
-
- height = img.shape[0] + border[0] * 2 # shape(h,w,c)
- width = img.shape[1] + border[1] * 2
-
- # Center
- C = np.eye(3)
- C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
- C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
-
- # Perspective
- P = np.eye(3)
- P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
- P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
-
- # Rotation and Scale
- R = np.eye(3)
- a = random.uniform(-degrees, degrees)
- # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
- s = random.uniform(1 - scale, 1 + scale)
- # s = 2 ** random.uniform(-scale, scale)
- R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
-
- # Shear
- S = np.eye(3)
- S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
- S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
-
- # Translation
- T = np.eye(3)
- T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
- T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
-
- # Combined rotation matrix
- M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
- if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
- if perspective:
- img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
- else: # affine
- img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
-
- # Visualize
- # import matplotlib.pyplot as plt
- # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
- # ax[0].imshow(img[:, :, ::-1]) # base
- # ax[1].imshow(img2[:, :, ::-1]) # warped
-
- # Transform label coordinates
- n = len(targets)
- if n:
- # warp points
- xy = np.ones((n * 4, 3))
- xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
- xy = xy @ M.T # transform
- if perspective:
- xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
- else: # affine
- xy = xy[:, :2].reshape(n, 8)
-
- # create new boxes
- x = xy[:, [0, 2, 4, 6]]
- y = xy[:, [1, 3, 5, 7]]
- xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
-
- # # apply angle-based reduction of bounding boxes
- # radians = a * math.pi / 180
- # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
- # x = (xy[:, 2] + xy[:, 0]) / 2
- # y = (xy[:, 3] + xy[:, 1]) / 2
- # w = (xy[:, 2] - xy[:, 0]) * reduction
- # h = (xy[:, 3] - xy[:, 1]) * reduction
- # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
-
- # clip boxes
- xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
- xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
-
- # filter candidates
- i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
- targets = targets[i]
- targets[:, 1:5] = xy[i]
-
- return img, targets
-
-
-def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
- # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
- w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
- w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
- ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
- return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
-
-
-def cutout(image, labels):
- # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
- h, w = image.shape[:2]
-
- def bbox_ioa(box1, box2):
- # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
- box2 = box2.transpose()
-
- # Get the coordinates of bounding boxes
- b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
- b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
-
- # Intersection area
- inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
- (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
-
- # box2 area
- box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
-
- # Intersection over box2 area
- return inter_area / box2_area
-
- # create random masks
- scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
- for s in scales:
- mask_h = random.randint(1, int(h * s))
- mask_w = random.randint(1, int(w * s))
-
- # box
- xmin = max(0, random.randint(0, w) - mask_w // 2)
- ymin = max(0, random.randint(0, h) - mask_h // 2)
- xmax = min(w, xmin + mask_w)
- ymax = min(h, ymin + mask_h)
-
- # apply random color mask
- image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
-
- # return unobscured labels
- if len(labels) and s > 0.03:
- box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
- ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
- labels = labels[ioa < 0.60] # remove >60% obscured labels
-
- return labels
-
-
-def create_folder(path='./new'):
- # Create folder
- if os.path.exists(path):
- shutil.rmtree(path) # delete output folder
- os.makedirs(path) # make new output folder
-
-
-def flatten_recursive(path='../coco128'):
- # Flatten a recursive directory by bringing all files to top level
- new_path = Path(path + '_flat')
- create_folder(new_path)
- for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
- shutil.copyfile(file, new_path / Path(file).name)
-
-
diff --git a/spaces/kaleidoscope-data/data-cleaning-llm/app/util.py b/spaces/kaleidoscope-data/data-cleaning-llm/app/util.py
deleted file mode 100644
index bc18770f90f24153b0c4885aea2308559b9b31ae..0000000000000000000000000000000000000000
--- a/spaces/kaleidoscope-data/data-cleaning-llm/app/util.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import json
-
-def compare_completion_and_prediction(completion, prediction, verbose=False):
- """
- a function that compares the completion and prediction
- separating each string by comma into their respective columns,
- then compare each column and return a DataFrame with the results
-
- Args:
- completion (_type_): str
- prediction (_type_): str
- verbose (bool, optional): bool. Defaults to False.
-
- Returns:
- _type_: json object with completion, prediction, matches, and num_correct
- """
- # if verbose is True, print the completion and prediction strings
- if verbose:
- print("Completion:", completion, f"type({type(completion)}):")
- print("Prediction:", prediction, f"type({type(prediction)}):")
- # split completion and prediction strings on comma character
- completion = completion.split(',')
- prediction = prediction.split(',')
- # create a column that counts the number of matches between completion and prediction
- matches = [completion[i] == prediction[i] for i in range(len(completion))]
- return {
- "completion": completion,
- "prediction": prediction,
- "matches": matches,
- "num_correct": sum(matches),
- }
-
-def json_to_dict(json_string):
- """function that takes string in the form of json and returns a dictionary"""
- return json.loads(json_string)
-
-def join_dicts(dict1, dict2):
- """function that joins two dictionaries into one dictionary
-
- Args:
- dict1 (_type_): dict
- dict2 (_type_): dict
-
- Returns:
- _type_: dict
- """
- return {key:[dict1[key], dict2[key]] for key in dict1}
diff --git a/spaces/kdrkdrkdr/HoshinoTTS/README.md b/spaces/kdrkdrkdr/HoshinoTTS/README.md
deleted file mode 100644
index 1b4ae4306aedc8f7b20984f8c2312ca239e3ed99..0000000000000000000000000000000000000000
--- a/spaces/kdrkdrkdr/HoshinoTTS/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: HoshinoTTS
-emoji: 📈
-colorFrom: pink
-colorTo: gray
-sdk: gradio
-sdk_version: 3.11.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/keras-io/video-transformers/utils/constants.py b/spaces/keras-io/video-transformers/utils/constants.py
deleted file mode 100644
index a7f7e2a32bef3b18bfab03aa22c7331ef206d818..0000000000000000000000000000000000000000
--- a/spaces/keras-io/video-transformers/utils/constants.py
+++ /dev/null
@@ -1,4 +0,0 @@
-MAX_SEQ_LENGTH = 20
-NUM_FEATURES = 1024
-IMG_SIZE = 128
-CLASS_VOCAB = ['CricketShot', 'PlayingCello', 'Punch', 'ShavingBeard', 'TennisSwing']
\ No newline at end of file
diff --git a/spaces/kevinwang676/Bert-VITS2/text/chinese.py b/spaces/kevinwang676/Bert-VITS2/text/chinese.py
deleted file mode 100644
index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/Bert-VITS2/text/chinese.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import re
-
-import cn2an
-from pypinyin import lazy_pinyin, Style
-
-from text import symbols
-from text.symbols import punctuation
-from text.tone_sandhi import ToneSandhi
-
-current_file_path = os.path.dirname(__file__)
-pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
- open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
-
-import jieba.posseg as psg
-
-
-rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- '$': '.',
- '“': "'",
- '”': "'",
- '‘': "'",
- '’': "'",
- '(': "'",
- ')': "'",
- '(': "'",
- ')': "'",
- '《': "'",
- '》': "'",
- '【': "'",
- '】': "'",
- '[': "'",
- ']': "'",
- '—': "-",
- '~': "-",
- '~': "-",
- '「': "'",
- '」': "'",
-
-}
-
-tone_modifier = ToneSandhi()
-
-def replace_punctuation(text):
- text = text.replace("嗯", "恩").replace("呣","母")
- pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
-
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
-
- replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text)
-
- return replaced_text
-
-def g2p(text):
- pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
- sentences = [i for i in re.split(pattern, text) if i.strip()!='']
- phones, tones, word2ph = _g2p(sentences)
- assert sum(word2ph) == len(phones)
- assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch.
- phones = ['_'] + phones + ["_"]
- tones = [0] + tones + [0]
- word2ph = [1] + word2ph + [1]
- return phones, tones, word2ph
-
-
-def _get_initials_finals(word):
- initials = []
- finals = []
- orig_initials = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.INITIALS)
- orig_finals = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for c, v in zip(orig_initials, orig_finals):
- initials.append(c)
- finals.append(v)
- return initials, finals
-
-
-def _g2p(segments):
- phones_list = []
- tones_list = []
- word2ph = []
- for seg in segments:
- pinyins = []
- # Replace all English words in the sentence
- seg = re.sub('[a-zA-Z]+', '', seg)
- seg_cut = psg.lcut(seg)
- initials = []
- finals = []
- seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
- for word, pos in seg_cut:
- if pos == 'eng':
- continue
- sub_initials, sub_finals = _get_initials_finals(word)
- sub_finals = tone_modifier.modified_tone(word, pos,
- sub_finals)
- initials.append(sub_initials)
- finals.append(sub_finals)
-
- # assert len(sub_initials) == len(sub_finals) == len(word)
- initials = sum(initials, [])
- finals = sum(finals, [])
- #
- for c, v in zip(initials, finals):
- raw_pinyin = c+v
- # NOTE: post process for pypinyin outputs
- # we discriminate i, ii and iii
- if c == v:
- assert c in punctuation
- phone = [c]
- tone = '0'
- word2ph.append(1)
- else:
- v_without_tone = v[:-1]
- tone = v[-1]
-
- pinyin = c+v_without_tone
- assert tone in '12345'
-
- if c:
- # 多音节
- v_rep_map = {
- "uei": 'ui',
- 'iou': 'iu',
- 'uen': 'un',
- }
- if v_without_tone in v_rep_map.keys():
- pinyin = c+v_rep_map[v_without_tone]
- else:
- # 单音节
- pinyin_rep_map = {
- 'ing': 'ying',
- 'i': 'yi',
- 'in': 'yin',
- 'u': 'wu',
- }
- if pinyin in pinyin_rep_map.keys():
- pinyin = pinyin_rep_map[pinyin]
- else:
- single_rep_map = {
- 'v': 'yu',
- 'e': 'e',
- 'i': 'y',
- 'u': 'w',
- }
- if pinyin[0] in single_rep_map.keys():
- pinyin = single_rep_map[pinyin[0]]+pinyin[1:]
-
- assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
- phone = pinyin_to_symbol_map[pinyin].split(' ')
- word2ph.append(len(phone))
-
- phones_list += phone
- tones_list += [int(tone)] * len(phone)
- return phones_list, tones_list, word2ph
-
-
-
-def text_normalize(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- text = replace_punctuation(text)
- return text
-
-def get_bert_feature(text, word2ph):
- from text import chinese_bert
- return chinese_bert.get_bert_feature(text, word2ph)
-
-if __name__ == '__main__':
- from text.chinese_bert import get_bert_feature
- text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
- text = text_normalize(text)
- print(text)
- phones, tones, word2ph = g2p(text)
- bert = get_bert_feature(text, word2ph)
-
- print(phones, tones, word2ph, bert.shape)
-
-
-# # 示例用法
-# text = "这是一个示例文本:,你好!这是一个测试...."
-# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/base_model.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/base_model.py
deleted file mode 100644
index cfe64a7f739ad8f8cfbf3073a2bf49e1468127fd..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/base_model.py
+++ /dev/null
@@ -1,316 +0,0 @@
-"""This script defines the base network model for Deep3DFaceRecon_pytorch
-"""
-
-import os
-import numpy as np
-import torch
-from collections import OrderedDict
-from abc import ABC, abstractmethod
-from . import networks
-
-
-class BaseModel(ABC):
- """This class is an abstract base class (ABC) for models.
- To create a subclass, you need to implement the following five functions:
- -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
- -- : unpack data from dataset and apply preprocessing.
- -- : produce intermediate results.
- -- : calculate losses, gradients, and update network weights.
- -- : (optionally) add model-specific options and set default options.
- """
-
- def __init__(self, opt):
- """Initialize the BaseModel class.
-
- Parameters:
- opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
-
- When creating your custom class, you need to implement your own initialization.
- In this fucntion, you should first call
- Then, you need to define four lists:
- -- self.loss_names (str list): specify the training losses that you want to plot and save.
- -- self.model_names (str list): specify the images that you want to display and save.
- -- self.visual_names (str list): define networks used in our training.
- -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
- """
- self.opt = opt
- self.isTrain = False
- self.device = torch.device('cpu')
- self.save_dir = " " # os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
- self.loss_names = []
- self.model_names = []
- self.visual_names = []
- self.parallel_names = []
- self.optimizers = []
- self.image_paths = []
- self.metric = 0 # used for learning rate policy 'plateau'
-
- @staticmethod
- def dict_grad_hook_factory(add_func=lambda x: x):
- saved_dict = dict()
-
- def hook_gen(name):
- def grad_hook(grad):
- saved_vals = add_func(grad)
- saved_dict[name] = saved_vals
- return grad_hook
- return hook_gen, saved_dict
-
- @staticmethod
- def modify_commandline_options(parser, is_train):
- """Add new model-specific options, and rewrite default values for existing options.
-
- Parameters:
- parser -- original option parser
- is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
-
- Returns:
- the modified parser.
- """
- return parser
-
- @abstractmethod
- def set_input(self, input):
- """Unpack input data from the dataloader and perform necessary pre-processing steps.
-
- Parameters:
- input (dict): includes the data itself and its metadata information.
- """
- pass
-
- @abstractmethod
- def forward(self):
- """Run forward pass; called by both functions and ."""
- pass
-
- @abstractmethod
- def optimize_parameters(self):
- """Calculate losses, gradients, and update network weights; called in every training iteration"""
- pass
-
- def setup(self, opt):
- """Load and print networks; create schedulers
-
- Parameters:
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
- """
- if self.isTrain:
- self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
-
- if not self.isTrain or opt.continue_train:
- load_suffix = opt.epoch
- self.load_networks(load_suffix)
-
-
- # self.print_networks(opt.verbose)
-
- def parallelize(self, convert_sync_batchnorm=True):
- if not self.opt.use_ddp:
- for name in self.parallel_names:
- if isinstance(name, str):
- module = getattr(self, name)
- setattr(self, name, module.to(self.device))
- else:
- for name in self.model_names:
- if isinstance(name, str):
- module = getattr(self, name)
- if convert_sync_batchnorm:
- module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
- setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device),
- device_ids=[self.device.index],
- find_unused_parameters=True, broadcast_buffers=True))
-
- # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient.
- for name in self.parallel_names:
- if isinstance(name, str) and name not in self.model_names:
- module = getattr(self, name)
- setattr(self, name, module.to(self.device))
-
- # put state_dict of optimizer to gpu device
- if self.opt.phase != 'test':
- if self.opt.continue_train:
- for optim in self.optimizers:
- for state in optim.state.values():
- for k, v in state.items():
- if isinstance(v, torch.Tensor):
- state[k] = v.to(self.device)
-
- def data_dependent_initialize(self, data):
- pass
-
- def train(self):
- """Make models train mode"""
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- net.train()
-
- def eval(self):
- """Make models eval mode"""
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- net.eval()
-
- def test(self):
- """Forward function used in test time.
-
- This function wraps function in no_grad() so we don't save intermediate steps for backprop
- It also calls to produce additional visualization results
- """
- with torch.no_grad():
- self.forward()
- self.compute_visuals()
-
- def compute_visuals(self):
- """Calculate additional output images for visdom and HTML visualization"""
- pass
-
- def get_image_paths(self, name='A'):
- """ Return image paths that are used to load current data"""
- return self.image_paths if name =='A' else self.image_paths_B
-
- def update_learning_rate(self):
- """Update learning rates for all the networks; called at the end of every epoch"""
- for scheduler in self.schedulers:
- if self.opt.lr_policy == 'plateau':
- scheduler.step(self.metric)
- else:
- scheduler.step()
-
- lr = self.optimizers[0].param_groups[0]['lr']
- print('learning rate = %.7f' % lr)
-
- def get_current_visuals(self):
- """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
- visual_ret = OrderedDict()
- for name in self.visual_names:
- if isinstance(name, str):
- visual_ret[name] = getattr(self, name)[:, :3, ...]
- return visual_ret
-
- def get_current_losses(self):
- """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
- errors_ret = OrderedDict()
- for name in self.loss_names:
- if isinstance(name, str):
- errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
- return errors_ret
-
- def save_networks(self, epoch):
- """Save all the networks to the disk.
-
- Parameters:
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
- """
- if not os.path.isdir(self.save_dir):
- os.makedirs(self.save_dir)
-
- save_filename = 'epoch_%s.pth' % (epoch)
- save_path = os.path.join(self.save_dir, save_filename)
-
- save_dict = {}
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- if isinstance(net, torch.nn.DataParallel) or isinstance(net,
- torch.nn.parallel.DistributedDataParallel):
- net = net.module
- save_dict[name] = net.state_dict()
-
-
- for i, optim in enumerate(self.optimizers):
- save_dict['opt_%02d'%i] = optim.state_dict()
-
- for i, sched in enumerate(self.schedulers):
- save_dict['sched_%02d'%i] = sched.state_dict()
-
- torch.save(save_dict, save_path)
-
- def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
- """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
- key = keys[i]
- if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
- if module.__class__.__name__.startswith('InstanceNorm') and \
- (key == 'running_mean' or key == 'running_var'):
- if getattr(module, key) is None:
- state_dict.pop('.'.join(keys))
- if module.__class__.__name__.startswith('InstanceNorm') and \
- (key == 'num_batches_tracked'):
- state_dict.pop('.'.join(keys))
- else:
- self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
-
- def load_networks(self, epoch):
- """Load all the networks from the disk.
-
- Parameters:
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
- """
- if self.opt.isTrain and self.opt.pretrained_name is not None:
- load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
- else:
- load_dir = self.save_dir
- load_filename = 'epoch_%s.pth' % (epoch)
- load_path = os.path.join(load_dir, load_filename)
- state_dict = torch.load(load_path, map_location=self.device)
- print('loading the model from %s' % load_path)
-
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- if isinstance(net, torch.nn.DataParallel):
- net = net.module
- net.load_state_dict(state_dict[name])
-
- if self.opt.phase != 'test':
- if self.opt.continue_train:
- print('loading the optim from %s' % load_path)
- for i, optim in enumerate(self.optimizers):
- optim.load_state_dict(state_dict['opt_%02d'%i])
-
- try:
- print('loading the sched from %s' % load_path)
- for i, sched in enumerate(self.schedulers):
- sched.load_state_dict(state_dict['sched_%02d'%i])
- except:
- print('Failed to load schedulers, set schedulers according to epoch count manually')
- for i, sched in enumerate(self.schedulers):
- sched.last_epoch = self.opt.epoch_count - 1
-
-
-
-
- def print_networks(self, verbose):
- """Print the total number of parameters in the network and (if verbose) network architecture
-
- Parameters:
- verbose (bool) -- if verbose: print the network architecture
- """
- print('---------- Networks initialized -------------')
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- num_params = 0
- for param in net.parameters():
- num_params += param.numel()
- if verbose:
- print(net)
- print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
- print('-----------------------------------------------')
-
- def set_requires_grad(self, nets, requires_grad=False):
- """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
- Parameters:
- nets (network list) -- a list of networks
- requires_grad (bool) -- whether the networks require gradients or not
- """
- if not isinstance(nets, list):
- nets = [nets]
- for net in nets:
- if net is not None:
- for param in net.parameters():
- param.requires_grad = requires_grad
-
- def generate_visuals_for_evaluation(self, data, mode):
- return {}
diff --git a/spaces/kevinwang676/VoiceChanger/src/gradio_demo.py b/spaces/kevinwang676/VoiceChanger/src/gradio_demo.py
deleted file mode 100644
index 1e70005831b9f29dc3c7f39642364bc325a4c8a4..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChanger/src/gradio_demo.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import torch, uuid
-import os, sys, shutil
-from src.utils.preprocess import CropAndExtract
-from src.test_audio2coeff import Audio2Coeff
-from src.facerender.animate import AnimateFromCoeff
-from src.generate_batch import get_data
-from src.generate_facerender_batch import get_facerender_data
-
-from src.utils.init_path import init_path
-
-from pydub import AudioSegment
-
-
-def mp3_to_wav(mp3_filename,wav_filename,frame_rate):
- mp3_file = AudioSegment.from_file(file=mp3_filename)
- mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav")
-
-
-class SadTalker():
-
- def __init__(self, checkpoint_path='checkpoints', config_path='src/config', lazy_load=False):
-
- if torch.cuda.is_available() :
- device = "cuda"
- else:
- device = "cpu"
-
- self.device = device
-
- os.environ['TORCH_HOME']= checkpoint_path
-
- self.checkpoint_path = checkpoint_path
- self.config_path = config_path
-
-
- def test(self, source_image, driven_audio, preprocess='crop',
- still_mode=False, use_enhancer=False, batch_size=1, size=256,
- pose_style = 0, exp_scale=1.0,
- use_ref_video = False,
- ref_video = None,
- ref_info = None,
- use_idle_mode = False,
- length_of_audio = 0, use_blink=True,
- result_dir='./results/'):
-
- self.sadtalker_paths = init_path(self.checkpoint_path, self.config_path, size, False, preprocess)
- print(self.sadtalker_paths)
-
- self.audio_to_coeff = Audio2Coeff(self.sadtalker_paths, self.device)
- self.preprocess_model = CropAndExtract(self.sadtalker_paths, self.device)
- self.animate_from_coeff = AnimateFromCoeff(self.sadtalker_paths, self.device)
-
- time_tag = str(uuid.uuid4())
- save_dir = os.path.join(result_dir, time_tag)
- os.makedirs(save_dir, exist_ok=True)
-
- input_dir = os.path.join(save_dir, 'input')
- os.makedirs(input_dir, exist_ok=True)
-
- print(source_image)
- pic_path = os.path.join(input_dir, os.path.basename(source_image))
- shutil.move(source_image, input_dir)
-
- if driven_audio is not None and os.path.isfile(driven_audio):
- audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
-
- #### mp3 to wav
- if '.mp3' in audio_path:
- mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
- audio_path = audio_path.replace('.mp3', '.wav')
- else:
- shutil.move(driven_audio, input_dir)
-
- elif use_idle_mode:
- audio_path = os.path.join(input_dir, 'idlemode_'+str(length_of_audio)+'.wav') ## generate audio from this new audio_path
- from pydub import AudioSegment
- one_sec_segment = AudioSegment.silent(duration=1000*length_of_audio) #duration in milliseconds
- one_sec_segment.export(audio_path, format="wav")
- else:
- print(use_ref_video, ref_info)
- assert use_ref_video == True and ref_info == 'all'
-
- if use_ref_video and ref_info == 'all': # full ref mode
- ref_video_videoname = os.path.basename(ref_video)
- audio_path = os.path.join(save_dir, ref_video_videoname+'.wav')
- print('new audiopath:',audio_path)
- # if ref_video contains audio, set the audio from ref_video.
- cmd = r"ffmpeg -y -hide_banner -loglevel error -i %s %s"%(ref_video, audio_path)
- os.system(cmd)
-
- os.makedirs(save_dir, exist_ok=True)
-
- #crop image and extract 3dmm from image
- first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
- os.makedirs(first_frame_dir, exist_ok=True)
- first_coeff_path, crop_pic_path, crop_info = self.preprocess_model.generate(pic_path, first_frame_dir, preprocess, True, size)
-
- if first_coeff_path is None:
- raise AttributeError("No face is detected")
-
- if use_ref_video:
- print('using ref video for genreation')
- ref_video_videoname = os.path.splitext(os.path.split(ref_video)[-1])[0]
- ref_video_frame_dir = os.path.join(save_dir, ref_video_videoname)
- os.makedirs(ref_video_frame_dir, exist_ok=True)
- print('3DMM Extraction for the reference video providing pose')
- ref_video_coeff_path, _, _ = self.preprocess_model.generate(ref_video, ref_video_frame_dir, preprocess, source_image_flag=False)
- else:
- ref_video_coeff_path = None
-
- if use_ref_video:
- if ref_info == 'pose':
- ref_pose_coeff_path = ref_video_coeff_path
- ref_eyeblink_coeff_path = None
- elif ref_info == 'blink':
- ref_pose_coeff_path = None
- ref_eyeblink_coeff_path = ref_video_coeff_path
- elif ref_info == 'pose+blink':
- ref_pose_coeff_path = ref_video_coeff_path
- ref_eyeblink_coeff_path = ref_video_coeff_path
- elif ref_info == 'all':
- ref_pose_coeff_path = None
- ref_eyeblink_coeff_path = None
- else:
- raise('error in refinfo')
- else:
- ref_pose_coeff_path = None
- ref_eyeblink_coeff_path = None
-
- #audio2ceoff
- if use_ref_video and ref_info == 'all':
- coeff_path = ref_video_coeff_path # self.audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
- else:
- batch = get_data(first_coeff_path, audio_path, self.device, ref_eyeblink_coeff_path=ref_eyeblink_coeff_path, still=still_mode, idlemode=use_idle_mode, length_of_audio=length_of_audio, use_blink=use_blink) # longer audio?
- coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
-
- #coeff2video
- data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode, preprocess=preprocess, size=size, expression_scale = exp_scale)
- return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess, img_size=size)
- video_name = data['video_name']
- print(f'The generated video is named {video_name} in {save_dir}')
-
- del self.preprocess_model
- del self.audio_to_coeff
- del self.animate_from_coeff
-
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- torch.cuda.synchronize()
-
- import gc; gc.collect()
-
- return return_path
-
-
\ No newline at end of file
diff --git a/spaces/kevinwang676/web-singer-2/README.md b/spaces/kevinwang676/web-singer-2/README.md
deleted file mode 100644
index d8e8ec25d5d05323df8afde3e13e390082fa918c..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/web-singer-2/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Web UI
-emoji: 🌍
-colorFrom: pink
-colorTo: pink
-sdk: docker
-pinned: false
-duplicated_from: kevinwang676/web-singer
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/res_layer.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/res_layer.py
deleted file mode 100644
index b2c07b47007e92e4c3945b989e79f9d50306f5fe..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/utils/res_layer.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from annotator.uniformer.mmcv.cnn import build_conv_layer, build_norm_layer
-from torch import nn as nn
-
-
-class ResLayer(nn.Sequential):
- """ResLayer to build ResNet style backbone.
-
- Args:
- block (nn.Module): block used to build ResLayer.
- inplanes (int): inplanes of block.
- planes (int): planes of block.
- num_blocks (int): number of blocks.
- stride (int): stride of the first block. Default: 1
- avg_down (bool): Use AvgPool instead of stride conv when
- downsampling in the bottleneck. Default: False
- conv_cfg (dict): dictionary to construct and config conv layer.
- Default: None
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: dict(type='BN')
- multi_grid (int | None): Multi grid dilation rates of last
- stage. Default: None
- contract_dilation (bool): Whether contract first dilation of each layer
- Default: False
- """
-
- def __init__(self,
- block,
- inplanes,
- planes,
- num_blocks,
- stride=1,
- dilation=1,
- avg_down=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- multi_grid=None,
- contract_dilation=False,
- **kwargs):
- self.block = block
-
- downsample = None
- if stride != 1 or inplanes != planes * block.expansion:
- downsample = []
- conv_stride = stride
- if avg_down:
- conv_stride = 1
- downsample.append(
- nn.AvgPool2d(
- kernel_size=stride,
- stride=stride,
- ceil_mode=True,
- count_include_pad=False))
- downsample.extend([
- build_conv_layer(
- conv_cfg,
- inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=conv_stride,
- bias=False),
- build_norm_layer(norm_cfg, planes * block.expansion)[1]
- ])
- downsample = nn.Sequential(*downsample)
-
- layers = []
- if multi_grid is None:
- if dilation > 1 and contract_dilation:
- first_dilation = dilation // 2
- else:
- first_dilation = dilation
- else:
- first_dilation = multi_grid[0]
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=stride,
- dilation=first_dilation,
- downsample=downsample,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- **kwargs))
- inplanes = planes * block.expansion
- for i in range(1, num_blocks):
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=1,
- dilation=dilation if multi_grid is None else multi_grid[i],
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- **kwargs))
- super(ResLayer, self).__init__(*layers)
diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py b/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py
deleted file mode 100644
index a30254604311a488a1d4959f941051890ed32b2e..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import logging
-from pathlib import Path
-from collections import defaultdict
-from typing import List, Dict, Tuple
-
-import pandas as pd
-import numpy as np
-import torchaudio
-from tqdm import tqdm
-
-from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv
-
-
-log = logging.getLogger(__name__)
-
-SPLITS = ["train", "dev", "test"]
-
-
-def get_top_n(
- root: Path, n_speakers: int = 10, min_n_tokens: int = 5
-) -> pd.DataFrame:
- df = load_df_from_tsv(root / "validated.tsv")
- df["n_tokens"] = [len(s.split()) for s in df["sentence"]]
- df = df[df["n_tokens"] >= min_n_tokens]
- df["n_frames"] = [
- torchaudio.info((root / "clips" / p).as_posix()).num_frames
- for p in tqdm(df["path"])
- ]
- df["id"] = [Path(p).stem for p in df["path"]]
- total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"])
- total_duration_ms = total_duration_ms.sort_values("sum", ascending=False)
-
- top_n_total_duration_ms = total_duration_ms.head(n_speakers)
- top_n_client_ids = set(top_n_total_duration_ms.index.tolist())
- df_top_n = df[df["client_id"].isin(top_n_client_ids)]
- return df_top_n
-
-
-def get_splits(
- df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0
-) -> Tuple[Dict[str, str], List[str]]:
- np.random.seed(rand_seed)
- dev_split_ratio = (1. - train_split_ratio) / 3
- grouped = list(df.groupby("client_id"))
- id_to_split = {}
- for _, cur_df in tqdm(grouped):
- cur_n_examples = len(cur_df)
- if speaker_in_all_splits and cur_n_examples < 3:
- continue
- cur_n_train = int(cur_n_examples * train_split_ratio)
- cur_n_dev = int(cur_n_examples * dev_split_ratio)
- cur_n_test = cur_n_examples - cur_n_dev - cur_n_train
- if speaker_in_all_splits and cur_n_dev * cur_n_test == 0:
- cur_n_dev, cur_n_test = 1, 1
- cur_n_train = cur_n_examples - cur_n_dev - cur_n_test
- cur_indices = cur_df.index.tolist()
- cur_shuffled_indices = np.random.permutation(cur_n_examples)
- cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices]
- cur_indices_by_split = {
- "train": cur_shuffled_indices[:cur_n_train],
- "dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev],
- "test": cur_shuffled_indices[cur_n_train + cur_n_dev:]
- }
- for split in SPLITS:
- for i in cur_indices_by_split[split]:
- id_ = df["id"].loc[i]
- id_to_split[id_] = split
- return id_to_split, sorted(df["client_id"].unique())
-
-
-def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000):
- out_root = root / "wav"
- out_root.mkdir(exist_ok=True, parents=True)
- print("Converting to WAV...")
- for n in tqdm(filenames):
- in_path = (root / "clips" / n).as_posix()
- waveform, sr = torchaudio.load(in_path)
- converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor(
- waveform, sr, [["rate", str(target_sr)], ["channels", "1"]]
- )
- out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix()
- torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S",
- bits_per_sample=16)
-
-
-def process(args):
- data_root = Path(args.data_root).absolute() / args.lang
-
- # Generate TSV manifest
- print("Generating manifest...")
-
- df_top_n = get_top_n(data_root)
- id_to_split, speakers = get_splits(df_top_n)
-
- if args.convert_to_wav:
- convert_to_wav(data_root, df_top_n["path"].tolist())
-
- manifest_by_split = {split: defaultdict(list) for split in SPLITS}
- for sample in tqdm(df_top_n.to_dict(orient="index").values()):
- sample_id = sample["id"]
- split = id_to_split[sample_id]
- manifest_by_split[split]["id"].append(sample_id)
- if args.convert_to_wav:
- audio_path = data_root / "wav" / f"{sample_id}.wav"
- else:
- audio_path = data_root / "clips" / f"{sample_id}.mp3"
- manifest_by_split[split]["audio"].append(audio_path.as_posix())
- manifest_by_split[split]["n_frames"].append(sample["n_frames"])
- manifest_by_split[split]["tgt_text"].append(sample["sentence"])
- manifest_by_split[split]["speaker"].append(sample["client_id"])
- manifest_by_split[split]["src_text"].append(sample["sentence"])
-
- output_root = Path(args.output_manifest_root).absolute()
- output_root.mkdir(parents=True, exist_ok=True)
- for split in SPLITS:
- save_df_to_tsv(
- pd.DataFrame.from_dict(manifest_by_split[split]),
- output_root / f"{split}.audio.tsv"
- )
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("--data-root", "-d", required=True, type=str)
- parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
- parser.add_argument("--lang", "-l", required=True, type=str)
- parser.add_argument("--convert-to-wav", action="store_true")
- args = parser.parse_args()
-
- process(args)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/kodirovshchik/food_classification_api/Dockerfile b/spaces/kodirovshchik/food_classification_api/Dockerfile
deleted file mode 100644
index 1217a78cc9bfcb25112a9ef7aca70a292f70d0d1..0000000000000000000000000000000000000000
--- a/spaces/kodirovshchik/food_classification_api/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-
-RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y
-RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-COPY . .
-
-CMD ["gunicorn", "-b", "0.0.0.0:7860", "main:app"]
\ No newline at end of file
diff --git a/spaces/kukuhtw/AutoGPT/main.py b/spaces/kukuhtw/AutoGPT/main.py
deleted file mode 100644
index 160addc390b94a8b143a3a2e18991a560f9b032e..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/AutoGPT/main.py
+++ /dev/null
@@ -1 +0,0 @@
-from autogpt import main
diff --git a/spaces/kukuhtw/AutoGPT/tests/test_image_gen.py b/spaces/kukuhtw/AutoGPT/tests/test_image_gen.py
deleted file mode 100644
index 19c57e427d5c1b84aa7f72925733d0056ddf5268..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/AutoGPT/tests/test_image_gen.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import hashlib
-import os
-import unittest
-
-from PIL import Image
-
-from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui
-from autogpt.config import Config
-from autogpt.workspace import path_in_workspace
-
-
-def lst(txt):
- return txt.split(":")[1].strip()
-
-
-@unittest.skipIf(os.getenv("CI"), "Skipping image generation tests")
-class TestImageGen(unittest.TestCase):
- def setUp(self):
- self.config = Config()
-
- def test_dalle(self):
- self.config.image_provider = "dalle"
-
- # Test using size 256
- result = lst(generate_image("astronaut riding a horse", 256))
- image_path = path_in_workspace(result)
- self.assertTrue(image_path.exists())
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (256, 256))
- image_path.unlink()
-
- # Test using size 512
- result = lst(generate_image("astronaut riding a horse", 512))
- image_path = path_in_workspace(result)
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (512, 512))
- image_path.unlink()
-
- def test_huggingface(self):
- self.config.image_provider = "huggingface"
-
- # Test usin SD 1.4 model and size 512
- self.config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
- result = lst(generate_image("astronaut riding a horse", 512))
- image_path = path_in_workspace(result)
- self.assertTrue(image_path.exists())
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (512, 512))
- image_path.unlink()
-
- # Test using SD 2.1 768 model and size 768
- self.config.huggingface_image_model = "stabilityai/stable-diffusion-2-1"
- result = lst(generate_image("astronaut riding a horse", 768))
- image_path = path_in_workspace(result)
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (768, 768))
- image_path.unlink()
-
- def test_sd_webui(self):
- self.config.image_provider = "sd_webui"
- return
-
- # Test using size 128
- result = lst(generate_image_with_sd_webui("astronaut riding a horse", 128))
- image_path = path_in_workspace(result)
- self.assertTrue(image_path.exists())
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (128, 128))
- image_path.unlink()
-
- # Test using size 64 and negative prompt
- result = lst(
- generate_image_with_sd_webui(
- "astronaut riding a horse",
- negative_prompt="horse",
- size=64,
- extra={"seed": 123},
- )
- )
- image_path = path_in_workspace(result)
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (64, 64))
- neg_image_hash = hashlib.md5(img.tobytes()).hexdigest()
- image_path.unlink()
-
- # Same test as above but without the negative prompt
- result = lst(
- generate_image_with_sd_webui(
- "astronaut riding a horse", image_size=64, size=1, extra={"seed": 123}
- )
- )
- image_path = path_in_workspace(result)
- with Image.open(image_path) as img:
- self.assertEqual(img.size, (64, 64))
- image_hash = hashlib.md5(img.tobytes()).hexdigest()
- image_path.unlink()
-
- self.assertNotEqual(image_hash, neg_image_hash)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/kukuhtw/VToonify/vtoonify/model/raft/download_models.sh b/spaces/kukuhtw/VToonify/vtoonify/model/raft/download_models.sh
deleted file mode 100644
index 7b6ed7e478b74699d3c8db3bd744643c35f7da76..0000000000000000000000000000000000000000
--- a/spaces/kukuhtw/VToonify/vtoonify/model/raft/download_models.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-wget https://www.dropbox.com/s/4j4z58wuv8o0mfz/models.zip
-unzip models.zip
diff --git a/spaces/kunderabr/ResumoYouTube/README.md b/spaces/kunderabr/ResumoYouTube/README.md
deleted file mode 100644
index c3aaed43857980ca00919943be80de7850eaa352..0000000000000000000000000000000000000000
--- a/spaces/kunderabr/ResumoYouTube/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ResumoYouTube
-emoji: 🔥
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.47.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py
deleted file mode 100644
index a45230e8dbd8399fdd2a5d292bf71fe96c271b78..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from fontTools.ttLib import TTFont
-from fontTools.feaLib.builder import addOpenTypeFeatures, Builder
-from fontTools.feaLib.error import FeatureLibError
-from fontTools import configLogger
-from fontTools.misc.cliTools import makeOutputFileName
-import sys
-import argparse
-import logging
-
-
-log = logging.getLogger("fontTools.feaLib")
-
-
-def main(args=None):
- """Add features from a feature file (.fea) into an OTF font"""
- parser = argparse.ArgumentParser(
- description="Use fontTools to compile OpenType feature files (*.fea)."
- )
- parser.add_argument(
- "input_fea", metavar="FEATURES", help="Path to the feature file"
- )
- parser.add_argument(
- "input_font", metavar="INPUT_FONT", help="Path to the input font"
- )
- parser.add_argument(
- "-o",
- "--output",
- dest="output_font",
- metavar="OUTPUT_FONT",
- help="Path to the output font.",
- )
- parser.add_argument(
- "-t",
- "--tables",
- metavar="TABLE_TAG",
- choices=Builder.supportedTables,
- nargs="+",
- help="Specify the table(s) to be built.",
- )
- parser.add_argument(
- "-d",
- "--debug",
- action="store_true",
- help="Add source-level debugging information to font.",
- )
- parser.add_argument(
- "-v",
- "--verbose",
- help="Increase the logger verbosity. Multiple -v " "options are allowed.",
- action="count",
- default=0,
- )
- parser.add_argument(
- "--traceback", help="show traceback for exceptions.", action="store_true"
- )
- options = parser.parse_args(args)
-
- levels = ["WARNING", "INFO", "DEBUG"]
- configLogger(level=levels[min(len(levels) - 1, options.verbose)])
-
- output_font = options.output_font or makeOutputFileName(options.input_font)
- log.info("Compiling features to '%s'" % (output_font))
-
- font = TTFont(options.input_font)
- try:
- addOpenTypeFeatures(
- font, options.input_fea, tables=options.tables, debug=options.debug
- )
- except FeatureLibError as e:
- if options.traceback:
- raise
- log.error(e)
- sys.exit(1)
- font.save(output_font)
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_d_m_x.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_d_m_x.py
deleted file mode 100644
index b6d56a7e70823e14d790361a844df2c6553ff35f..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_d_m_x.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, strjoin
-from . import DefaultTable
-import array
-from collections.abc import Mapping
-
-hdmxHeaderFormat = """
- > # big endian!
- version: H
- numRecords: H
- recordSize: l
-"""
-
-
-class _GlyphnamedList(Mapping):
- def __init__(self, reverseGlyphOrder, data):
- self._array = data
- self._map = dict(reverseGlyphOrder)
-
- def __getitem__(self, k):
- return self._array[self._map[k]]
-
- def __len__(self):
- return len(self._map)
-
- def __iter__(self):
- return iter(self._map)
-
- def keys(self):
- return self._map.keys()
-
-
-class table__h_d_m_x(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- numGlyphs = ttFont["maxp"].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
- self.hdmx = {}
- for i in range(self.numRecords):
- ppem = byteord(data[0])
- maxSize = byteord(data[1])
- widths = _GlyphnamedList(
- ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs])
- )
- self.hdmx[ppem] = widths
- data = data[self.recordSize :]
- assert len(data) == 0, "too much hdmx data"
-
- def compile(self, ttFont):
- self.version = 0
- numGlyphs = ttFont["maxp"].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
- pad = (self.recordSize - 2 - numGlyphs) * b"\0"
- self.numRecords = len(self.hdmx)
- data = sstruct.pack(hdmxHeaderFormat, self)
- items = sorted(self.hdmx.items())
- for ppem, widths in items:
- data = data + bytechr(ppem) + bytechr(max(widths.values()))
- for glyphID in range(len(glyphOrder)):
- width = widths[glyphOrder[glyphID]]
- data = data + bytechr(width)
- data = data + pad
- return data
-
- def toXML(self, writer, ttFont):
- writer.begintag("hdmxData")
- writer.newline()
- ppems = sorted(self.hdmx.keys())
- records = []
- format = ""
- for ppem in ppems:
- widths = self.hdmx[ppem]
- records.append(widths)
- format = format + "%4d"
- glyphNames = ttFont.getGlyphOrder()[:]
- glyphNames.sort()
- maxNameLen = max(map(len, glyphNames))
- format = "%" + repr(maxNameLen) + "s:" + format + " ;"
- writer.write(format % (("ppem",) + tuple(ppems)))
- writer.newline()
- writer.newline()
- for glyphName in glyphNames:
- row = []
- for ppem in ppems:
- widths = self.hdmx[ppem]
- row.append(widths[glyphName])
- if ";" in glyphName:
- glyphName = "\\x3b".join(glyphName.split(";"))
- writer.write(format % ((glyphName,) + tuple(row)))
- writer.newline()
- writer.endtag("hdmxData")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name != "hdmxData":
- return
- content = strjoin(content)
- lines = content.split(";")
- topRow = lines[0].split()
- assert topRow[0] == "ppem:", "illegal hdmx format"
- ppems = list(map(int, topRow[1:]))
- self.hdmx = hdmx = {}
- for ppem in ppems:
- hdmx[ppem] = {}
- lines = (line.split() for line in lines[1:])
- for line in lines:
- if not line:
- continue
- assert line[0][-1] == ":", "illegal hdmx format"
- glyphName = line[0][:-1]
- if "\\" in glyphName:
- from fontTools.misc.textTools import safeEval
-
- glyphName = safeEval('"""' + glyphName + '"""')
- line = list(map(int, line[1:]))
- assert len(line) == len(ppems), "illegal hdmx format"
- for i in range(len(ppems)):
- hdmx[ppems[i]][glyphName] = line[i]
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/conftest.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/conftest.py
deleted file mode 100644
index 6874a42c4895c3c7b973dc5d63fd4488a4e60b44..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/conftest.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-import shutil
-import subprocess
-import sys
-import time
-
-import pytest
-
-import fsspec
-from fsspec.implementations.cached import CachingFileSystem
-
-
-@pytest.fixture()
-def m():
- """
- Fixture providing a memory filesystem.
- """
- m = fsspec.filesystem("memory")
- m.store.clear()
- m.pseudo_dirs.clear()
- m.pseudo_dirs.append("")
- try:
- yield m
- finally:
- m.store.clear()
- m.pseudo_dirs.clear()
- m.pseudo_dirs.append("")
-
-
-@pytest.fixture
-def ftp_writable(tmpdir):
- """
- Fixture providing a writable FTP filesystem.
- """
- pytest.importorskip("pyftpdlib")
- from fsspec.implementations.ftp import FTPFileSystem
-
- FTPFileSystem.clear_instance_cache() # remove lingering connections
- CachingFileSystem.clear_instance_cache()
- d = str(tmpdir)
- with open(os.path.join(d, "out"), "wb") as f:
- f.write(b"hello" * 10000)
- P = subprocess.Popen(
- [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
- )
- try:
- time.sleep(1)
- yield "localhost", 2121, "user", "pass"
- finally:
- P.terminate()
- P.wait()
- try:
- shutil.rmtree(tmpdir)
- except Exception:
- pass
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/StaticImage.svelte_svelte_type_style_lang-c5ace72f.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/StaticImage.svelte_svelte_type_style_lang-c5ace72f.js
deleted file mode 100644
index c6135b2052cc66ff53348fa1553769ba6a2685e6..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/StaticImage.svelte_svelte_type_style_lang-c5ace72f.js
+++ /dev/null
@@ -1,11 +0,0 @@
-import{S as bt,i as wt,s as yt,B as P,C as p,g as U,E as Z,F as I,q as j,G as ct,H as je,M as re,p as W,l as jt,t as G,o as Vt,r as Ve,u as Ge,J as qe,a1 as Fe,b as Ke,e as Gt,m as qt,n as Ft,f as Qe}from"./index-7c0e54a6.js";/* empty css */function Ze(a){let t,e,i;return{c(){t=P("svg"),e=P("path"),i=P("circle"),p(e,"d","M23 19a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h4l2-3h6l2 3h4a2 2 0 0 1 2 2z"),p(i,"cx","12"),p(i,"cy","13"),p(i,"r","4"),p(t,"xmlns","http://www.w3.org/2000/svg"),p(t,"width","100%"),p(t,"height","100%"),p(t,"viewBox","0 0 24 24"),p(t,"fill","none"),p(t,"stroke","currentColor"),p(t,"stroke-width","1.5"),p(t,"stroke-linecap","round"),p(t,"stroke-linejoin","round"),p(t,"class","feather feather-camera")},m(n,r){U(n,t,r),Z(t,e),Z(t,i)},p:I,i:I,o:I,d(n){n&&j(t)}}}class Je extends bt{constructor(t){super(),wt(this,t,null,Ze,yt,{})}}function $e(a){let t,e;return{c(){t=P("svg"),e=P("circle"),p(e,"cx","12"),p(e,"cy","12"),p(e,"r","10"),p(t,"xmlns","http://www.w3.org/2000/svg"),p(t,"width","100%"),p(t,"height","100%"),p(t,"viewBox","0 0 24 24"),p(t,"fill","red"),p(t,"stroke","red"),p(t,"stroke-width","1.5"),p(t,"stroke-linecap","round"),p(t,"stroke-linejoin","round"),p(t,"class","feather feather-circle")},m(i,n){U(i,t,n),Z(t,e)},p:I,i:I,o:I,d(i){i&&j(t)}}}class ti extends bt{constructor(t){super(),wt(this,t,null,$e,yt,{})}}function ei(a){let t,e;return{c(){t=P("svg"),e=P("rect"),p(e,"x","3"),p(e,"y","3"),p(e,"width","18"),p(e,"height","18"),p(e,"rx","2"),p(e,"ry","2"),p(t,"xmlns","http://www.w3.org/2000/svg"),p(t,"width","100%"),p(t,"height","100%"),p(t,"viewBox","0 0 24 24"),p(t,"fill","red"),p(t,"stroke","red"),p(t,"stroke-width","1.5"),p(t,"stroke-linecap","round"),p(t,"stroke-linejoin","round"),p(t,"class","feather feather-square")},m(i,n){U(i,t,n),Z(t,e)},p:I,i:I,o:I,d(i){i&&j(t)}}}class ii extends bt{constructor(t){super(),wt(this,t,null,ei,yt,{})}}function ai(a){let t,e,i;return{c(){t=P("svg"),e=P("polyline"),i=P("path"),p(e,"points","1 4 1 10 7 10"),p(i,"d","M3.51 15a9 9 0 1 0 2.13-9.36L1 10"),p(t,"xmlns","http://www.w3.org/2000/svg"),p(t,"width","100%"),p(t,"height","100%"),p(t,"viewBox","0 0 24 24"),p(t,"fill","none"),p(t,"stroke","currentColor"),p(t,"stroke-width","1.5"),p(t,"stroke-linecap","round"),p(t,"stroke-linejoin","round"),p(t,"class","feather feather-rotate-ccw")},m(n,r){U(n,t,r),Z(t,e),Z(t,i)},p:I,i:I,o:I,d(n){n&&j(t)}}}class ba extends bt{constructor(t){super(),wt(this,t,null,ai,yt,{})}}/*!
- * Cropper.js v1.5.12
- * https://fengyuanchen.github.io/cropperjs
- *
- * Copyright 2015-present Chen Fengyuan
- * Released under the MIT license
- *
- * Date: 2021-06-12T08:00:17.411Z
- */function ne(a,t){var e=Object.keys(a);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(a);t&&(i=i.filter(function(n){return Object.getOwnPropertyDescriptor(a,n).enumerable})),e.push.apply(e,i)}return e}function Ee(a){for(var t=1;ta.length)&&(t=a.length);for(var e=0,i=new Array(t);e