diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/__init__.py deleted file mode 100644 index bcc03a3b93386abe1089181bb979de6b350dd554..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/__init__.py +++ /dev/null @@ -1,103 +0,0 @@ -from enum import Enum - -from gpt4free import forefront -from gpt4free import quora -from gpt4free import theb -from gpt4free import usesless -from gpt4free import you -from gpt4free import aicolors -from gpt4free import deepai - - -class Provider(Enum): - """An enum representing different providers.""" - - You = "you" - Poe = "poe" - ForeFront = "fore_front" - Theb = "theb" - UseLess = "useless" - AiColors = "ai_colors" - DeepAI = "deepai" - - -class Completion: - """This class will be used for invoking the given provider""" - - @staticmethod - def create(provider: Provider, prompt: str, **kwargs) -> str: - """ - Invokes the given provider with given prompt and addition arguments and returns the string response - - :param provider: an enum representing the provider to use while invoking - :param prompt: input provided by the user - :param kwargs: Additional keyword arguments to pass to the provider while invoking - :return: A string representing the response from the provider - """ - if provider == Provider.Poe: - return Completion.__poe_service(prompt, **kwargs) - elif provider == Provider.You: - return Completion.__you_service(prompt, **kwargs) - elif provider == Provider.ForeFront: - return Completion.__fore_front_service(prompt, **kwargs) - elif provider == Provider.Theb: - return Completion.__theb_service(prompt, **kwargs) - elif provider == Provider.UseLess: - return Completion.__useless_service(prompt, **kwargs) - elif provider == Provider.AiColors: - return Completion.__ai_colors_service(prompt, **kwargs) - elif provider == Provider.DeepAI: - return Completion.__deepai_service(prompt, **kwargs) - else: - raise Exception("Provider not exist, Please try again") - - @staticmethod - def __ai_colors_service(prompt: str): - return aicolors.Completion.create(prompt=prompt) - - @staticmethod - def __useless_service(prompt: str, **kwargs) -> str: - return usesless.Completion.create(prompt=prompt, **kwargs) - - @staticmethod - def __you_service(prompt: str, **kwargs) -> str: - return you.Completion.create(prompt, **kwargs).text - - @staticmethod - def __poe_service(prompt: str, **kwargs) -> str: - return quora.Completion.create(prompt=prompt, **kwargs).text - - @staticmethod - def __fore_front_service(prompt: str, **kwargs) -> str: - return forefront.Completion.create(prompt=prompt, **kwargs).text - - @staticmethod - def __theb_service(prompt: str, **kwargs): - return "".join(theb.Completion.create(prompt=prompt)) - - @staticmethod - def __deepai_service(prompt: str, **kwargs): - return "".join(deepai.Completion.create(prompt=prompt)) - - -class ChatCompletion: - """This class is used to execute a chat completion for a specified provider""" - - @staticmethod - def create(provider: Provider, messages: list, **kwargs) -> str: - """ - Invokes the given provider with given chat messages and addition arguments and returns the string response - - :param provider: an enum representing the provider to use while invoking - :param messages: a list of chat messages, see the OpenAI docs for how to format this (https://platform.openai.com/docs/guides/chat/introduction) - :param kwargs: Additional keyword arguments to pass to the provider while invoking - :return: A string representing the response from the provider - """ - if provider == Provider.DeepAI: - return ChatCompletion.__deepai_service(messages, **kwargs) - else: - raise Exception("Provider not exist, Please try again") - - @staticmethod - def __deepai_service(messages: list, **kwargs): - return "".join(deepai.ChatCompletion.create(messages=messages)) diff --git a/spaces/1368565466ki/Satdia/attentions.py b/spaces/1368565466ki/Satdia/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/Satdia/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/1368565466ki/ZSTRD/monotonic_align/__init__.py b/spaces/1368565466ki/ZSTRD/monotonic_align/__init__.py deleted file mode 100644 index e97eecc595dd3bd97d0104ec62799e2e5efea57c..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/monotonic_align/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/17TheWord/RealESRGAN/app.py b/spaces/17TheWord/RealESRGAN/app.py deleted file mode 100644 index 97c59221c429e335c3a2e3413c11cc155d5b6122..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -os.system("pip install gradio==2.9b23") -import random -import gradio as gr -from PIL import Image -import torch -from random import randint -import sys -from subprocess import call -import psutil - - - - -torch.hub.download_url_to_file('http://people.csail.mit.edu/billf/project%20pages/sresCode/Markov%20Random%20Fields%20for%20Super-Resolution_files/100075_lowres.jpg', 'bear.jpg') - - -def run_cmd(command): - try: - print(command) - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) -run_cmd("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P .") -run_cmd("pip install basicsr") -run_cmd("pip freeze") - -os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P .") - - -def inference(img,mode): - _id = randint(1, 10000) - INPUT_DIR = "/tmp/input_image" + str(_id) + "/" - OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/" - run_cmd("rm -rf " + INPUT_DIR) - run_cmd("rm -rf " + OUTPUT_DIR) - run_cmd("mkdir " + INPUT_DIR) - run_cmd("mkdir " + OUTPUT_DIR) - basewidth = 256 - wpercent = (basewidth/float(img.size[0])) - hsize = int((float(img.size[1])*float(wpercent))) - img = img.resize((basewidth,hsize), Image.ANTIALIAS) - img.save(INPUT_DIR + "1.jpg", "JPEG") - if mode == "base": - run_cmd("python inference_realesrgan.py -n RealESRGAN_x4plus -i "+ INPUT_DIR + " -o " + OUTPUT_DIR) - else: - os.system("python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i "+ INPUT_DIR + " -o " + OUTPUT_DIR) - return os.path.join(OUTPUT_DIR, "1_out.jpg") - - - - -title = "Real-ESRGAN" -description = "Gradio demo for Real-ESRGAN. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data | Github Repo

" - -gr.Interface( - inference, - [gr.inputs.Image(type="pil", label="Input"),gr.inputs.Radio(["base","anime"], type="value", default="base", label="model type")], - gr.outputs.Image(type="file", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['bear.jpg','base'], - ['anime.png','anime'] - ]).launch() \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cedar Noise Reduction Plugin.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cedar Noise Reduction Plugin.md deleted file mode 100644 index e9123f835a0c0525f0b3110168da1b664457829a..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cedar Noise Reduction Plugin.md +++ /dev/null @@ -1,7 +0,0 @@ -
-
- Overview of Cedar noise reduction plugin: describe its features, benefits, and applications | | H2: How to Use Cedar Noise Reduction Plugin for Audio Restoration | - Step 1: Choose the right plugin for your needs: compare different Cedar products and their capabilities
- Step 2: Load the plugin in your DAW or audio editor: show how to install and activate the plugin
- Step 3: Adjust the settings and parameters: explain how to use the learn function, the noise attenuation knob, and other controls
- Step 4: Preview and apply the noise reduction: demonstrate the results and tips for fine-tuning | | H2: How to Use Cedar Noise Reduction Plugin for Live Sound | - Step 1: Connect the plugin to your audio interface or mixer: show how to use the analog, digital, or Dante inputs and outputs
- Step 2: Set up the plugin for live mode: explain how to use the adaptive mode, the freeze function, and other features
- Step 3: Monitor and control the noise reduction: show how to use the meters, indicators, and bypass switch
- Step 4: Enjoy a cleaner, brighter, and louder sound: demonstrate the benefits and advantages of using the plugin for live sound | | H2: How to Use Cedar Noise Reduction Plugin for Post Production | - Step 1: Import your audio file or project into the standalone RX Audio Editor: show how to use the file browser, the waveform display, and the spectral view
- Step 2: Select the noise reduction module from the RX menu: explain how to choose between DNS 2, DNS 4, or DNS 8D modules
- Step 3: Edit your audio with advanced tools and features: show how to use the spectral repair, dialogue isolate, de-reverb, de-click, de-hum, and other tools
- Step 4: Export your audio file or project with improved quality: demonstrate how to use the batch processor, the metadata editor, and the format options | | H2: How to Compare Cedar Noise Reduction Plugin with Other Software | - Criteria for comparison: list some factors to consider when comparing noise reduction software, such as performance, quality, ease of use, price, support, etc.
- Examples of other software: mention some popular alternatives to Cedar noise reduction plugin, such as iZotope RX 8, Accusonus ERA, SoundSoap 5, etc.
- Pros and cons of each software: highlight the strengths and weaknesses of each software based on the criteria
- Conclusion: summarize which software is best suited for different scenarios and preferences | | H2: How to Get Cedar Noise Reduction Plugin and Learn More About It | - Pricing and availability: provide information on how to purchase or rent Cedar noise reduction plugin, as well as its system requirements and compatibility
- Resources and support: provide links to Cedar's official website, user manual, video tutorials, FAQs, customer service, etc.
- Testimonials and reviews: provide some quotes from satisfied customers or reputable sources that praise Cedar noise reduction plugin | | H2: Conclusion | - Recap the main points of the article: summarize what is Cedar noise reduction plugin, how to use it for different purposes, how to compare it with other software, and how to get it and learn more about it
- Call to action: encourage readers to try out Cedar noise reduction plugin for themselves or contact Cedar for more information | Article with HTML formatting

What is Cedar Noise Reduction Plugin and Why You Need It

-

If you are involved in any kind of audio production, whether it is recording, mixing, mastering, broadcasting, live sound, or post production, you know how important it is to have a clean and clear sound. However, you also know how challenging it can be to achieve that sound in real-world situations, where you have to deal with various types of noise that can ruin your audio quality.

-

Noise can be defined as any unwanted sound that interferes with your desired sound. It can come from different sources, such as background noises (traffic, air conditioning, wind, rain), equipment noises (hiss, hum, clicks), or recording errors (clipping, distortion, dropouts). Noise can affect your audio in different ways, such as reducing

-

cedar noise reduction plugin


DOWNLOAD »»» https://byltly.com/2uKA3d



b2dd77e56b
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 2013 in Minutes with This Simple Trick.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 2013 in Minutes with This Simple Trick.md deleted file mode 100644 index 2386cfa667c325ebc3f4de0ccd7f540cfc5e5ff5..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 2013 in Minutes with This Simple Trick.md +++ /dev/null @@ -1,40 +0,0 @@ - -

How to Install Microsoft Office 2013 Setup with Free Download

-

Microsoft Office 2013 is a popular suite of productivity applications that includes Word, Excel, PowerPoint, Outlook, OneNote, Access, and Publisher. If you want to install Microsoft Office 2013 setup with free download, you can follow these simple steps:

-

microsoft office 2013 setup with crack free download


Download Filehttps://byltly.com/2uKvwT



-
    -
  1. Go to the official Microsoft website and sign in with your Microsoft account. If you don't have one, you can create one for free.
  2. -
  3. Click on the Office tab and select Office 2013 from the drop-down menu.
  4. -
  5. Choose your preferred language and edition (32-bit or 64-bit) and click on the Download button.
  6. -
  7. Save the file to your computer and run it once the download is complete.
  8. -
  9. Follow the on-screen instructions to complete the installation process. You may need to enter your product key if you have one.
  10. -
  11. Enjoy using Microsoft Office 2013 on your device.
  12. -
-

Note: Microsoft Office 2013 is no longer supported by Microsoft and may not receive security updates or bug fixes. You may want to consider upgrading to a newer version of Office or using an alternative suite of applications.

- -

How to Use Microsoft Office 2013

-

Once you have installed Microsoft Office 2013 setup with free download, you can start using the applications for your personal or professional needs. Here are some tips on how to use some of the most common features of Office 2013:

- -

Microsoft Office 2013 is a powerful and versatile suite of applications that can help you with various tasks and projects. However, it may not be compatible with some newer devices or systems. If you encounter any issues or want to upgrade to a newer version of Office, you can contact Microsoft support or visit their website for more information.

- -

How to Uninstall Microsoft Office 2013

-

If you want to uninstall Microsoft Office 2013 setup with free download from your device, you can follow these steps:

-
    -
  1. Go to the Control Panel and select Programs and Features.
  2. -
  3. Find Microsoft Office 2013 in the list of installed programs and click on it.
  4. -
  5. Click on the Uninstall button and confirm your choice.
  6. -
  7. Wait for the uninstallation process to finish and restart your device if prompted.
  8. -
-

Note: Uninstalling Microsoft Office 2013 will remove all the applications and files associated with it. If you want to keep some of your files, make sure to back them up before uninstalling.

-

- -

How to Download Microsoft Office 2013 Setup with Free Download Again

-

If you want to download Microsoft Office 2013 setup with free download again, you can use the same link and file that you used before. However, you may need to activate your product again with your product key or your Microsoft account. You can also contact Microsoft support or visit their website for more assistance.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Estadistica para administracion levine berenson pdf solucionario Todo lo que necesitas saber sobre analisis de datos y toma de decisiones.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Estadistica para administracion levine berenson pdf solucionario Todo lo que necesitas saber sobre analisis de datos y toma de decisiones.md deleted file mode 100644 index 531db96d6aed58faae1aa526207511e13fb2ec44..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Estadistica para administracion levine berenson pdf solucionario Todo lo que necesitas saber sobre analisis de datos y toma de decisiones.md +++ /dev/null @@ -1,102 +0,0 @@ - -

Estadistica para administracion levine berenson pdf solucionario

-

¿Te interesa aprender estadística para administración de una manera práctica, sencilla y actualizada? ¿Quieres contar con un libro que te explique los conceptos, las técnicas y las aplicaciones de la estadística en el contexto de los negocios? ¿Te gustaría tener acceso a un solucionario que te ayude a resolver los ejercicios del libro y a reforzar tu aprendizaje? Si tu respuesta es sí, entonces este artículo es para ti.

-

En este artículo te voy a hablar sobre el libro Estadística para Administración, escrito por David M. Levine, Mark L. Berenson y Timothy C. Krehbiel, en su cuarta edición. Te voy a contar qué es la estadística para administración, quiénes son los autores del libro, qué contiene el libro, cuáles son sus características principales, qué beneficios tiene el libro para los estudiantes y profesionales de la administración, y cómo puedes obtener el solucionario del libro en formato pdf. Al final del artículo, encontrarás una conclusión y algunas preguntas frecuentes sobre el tema.

-

estadistica para administracion levine berenson pdf solucionario


Download ✪✪✪ https://byltly.com/2uKyzb



-

Introducción

-

¿Qué es la estadística para administración?

-

La estadística para administración es una rama de la estadística que se ocupa de recopilar, organizar, analizar e interpretar datos relacionados con las áreas funcionales de los negocios, como contabilidad, economía y finanzas, sistemas de información, administración y marketing. La estadística para administración tiene como objetivo convertir los datos en información útil para tomar decisiones racionales y fundamentadas en el ámbito empresarial.

-

¿Quiénes son Levine, Berenson y Krehbiel?

-

¿Qué contiene el libro de estadística para administración?

-

El libro de estadística para administración contiene 18 capítulos que abarcan los temas fundamentales de la estadística descriptiva, la probabilidad, las distribuciones de probabilidad, el muestreo, la estimación, las pruebas de hipótesis, el análisis de varianza, la regresión, el análisis multivariado y el control estadístico de la calidad. Cada capítulo incluye una introducción con un escenario de uso de la estadística en los negocios, una exposición teórica con ejemplos y ejercicios resueltos, una sección de aplicaciones con casos reales y problemas propuestos, y una sección de resumen y repaso con conceptos clave y autoevaluación.

-

Características del libro de estadística para administración

-

Enfoque aplicado a las áreas funcionales de los negocios

-

Una de las características más destacadas del libro de estadística para administración es que presenta cada tema estadístico en un contexto aplicado relacionado con al menos una de las áreas funcionales de los negocios. De esta manera, el libro muestra a los estudiantes la importancia y la utilidad de la estadística para la administración, y les ayuda a desarrollar una visión integral y práctica de los conceptos y las técnicas estadísticas. El libro también enfatiza la interpretación de los resultados estadísticos, la presentación y evaluación de las suposiciones, y la discusión sobre lo que se debe hacer si las suposiciones se violan.

-

Integración del software de hojas de trabajo y estadístico

-

Otra característica importante del libro de estadística para administración es que integra el uso del software de hojas de trabajo (Excel) y el software estadístico (Minitab) en todos los aspectos del curso. El libro reconoce que en el mundo de los negocios el software está disponible en el escritorio del tomador de decisiones, y que permite enfocar el curso en la interpretación de los resultados en lugar de en los cálculos. El libro también proporciona instrucciones detalladas para usar el software y ejemplos ilustrativos con capturas de pantalla.

-

Guía detallada para usar el software

-

El libro de estadística para administración ofrece una guía detallada para usar el software de hojas de trabajo (Excel) y el software estadístico (Minitab) en cada capítulo. El libro explica cómo ingresar los datos, cómo realizar los análisis estadísticos, cómo interpretar los resultados y cómo presentarlos en forma gráfica o tabular. El libro también incluye ejemplos ilustrativos con capturas de pantalla que muestran paso a paso cómo usar el software. El libro no permite que la instrucción del software domine el curso, sino que la integra al estudio de la estadística.

-

Estadística para administración - David M. Levine, Timothy C. Krehbiel, Mark L. Berenson - 4ta Edición
-Estadística para administración Mark L. Berenson, David M. Levine, Timothy C. Krehbiel - Google Books
-Estadistica para Administracion y Economia_Levin-Rubin_7ma Ed_Pearson.pdf - Google Drive
-Estadística para administración, 4ta Edición David M. Levine FREELIBROS.ORG
-Estadística para administración Levine Berenson Krehbiel - PDF Descargar
-Estadística para administración - David M. Levine, Mark L. Berenson, Timothy C. Krehbiel - 6ta Edición
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 7ma Edición
-Estadística para administración y economía Levine Rubin - PDF Download
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 6ta Edición
-Estadística para administración y economía Levine Rubin - Solucionario
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 8va Edición
-Estadística para administración y economía Levine Rubin - Libro en Español
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 9na Edición
-Estadística para administración y economía Levine Rubin - eBook Gratis
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 10ma Edición
-Estadística para administración y economía Levine Rubin - Resumen y Análisis
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 11va Edición
-Estadística para administración y economía Levine Rubin - Ejercicios Resueltos
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 12va Edición
-Estadística para administración y economía Levine Rubin - Aplicaciones en Negocios
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 13va Edición
-Estadística para administración y economía Levine Rubin - Conceptos Básicos
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 14va Edición
-Estadística para administración y economía Levine Rubin - Métodos Cuantitativos
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 15va Edición
-Estadística para administración y economía Levine Rubin - Probabilidad y Distribuciones
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 16va Edición
-Estadística para administración y economía Levine Rubin - Inferencia Estadística
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 17va Edición
-Estadística para administración y economía Levine Rubin - Regresión Lineal Simple
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 18va Edición
-Estadística para administración y economía Levine Rubin - Regresión Lineal Múltiple
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 19va Edición
-Estadística para administración y economía Levine Rubin - Análisis de Varianza (ANOVA)
-Estadística para administración y economía - Richard I. Levin, David S. Rubin - 20va Edición
-Estadística para administración y economía Levine Rubin - Pruebas de Hipótesis
-Solucionario de estadistica para la Administracion de Berenson & Levine (4ta edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (5ta edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (6ta edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (7ma edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (8va edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (9na edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (10ma edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (11va edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (12va edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (13va edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (14va edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (15va edicion)
-Solucionario de estadistica para la Administracion de Berenson & Levine (16va edicion)

-

Práctica abundante para comprender la estadística en la administración

-

El libro de estadística para administración ofrece una práctica abundante para comprender cómo se usa la estadística en la administración. El libro contiene más de 1.000 ejercicios y problemas propuestos, clasificados por nivel de dificultad y por área funcional de los negocios. El libro también contiene casos reales y aplicaciones que ilustran cómo se usa la estadística en situaciones reales de administración. El libro proporciona las soluciones de algunos ejercicios y problemas al final del libro, y ofrece un solucionario completo en formato pdf que se puede obtener mediante un código QR o un enlace web.

-

Beneficios del libro de estadística para administración

-

Aprendizaje contextual y relevante

-área funcional de los negocios, como contabilidad, economía y finanzas, sistemas de información, administración y marketing. De esta manera, el libro hace que el aprendizaje de la estadística sea más interesante, significativo y motivador para los estudiantes y profesionales de la administración.

-

Desarrollo de habilidades analíticas y de toma de decisiones

-

Otro beneficio del libro de estadística para administración es que contribuye al desarrollo de habilidades analíticas y de toma de decisiones en los estudiantes y profesionales de la administración. El libro enseña cómo recopilar, organizar, analizar e interpretar datos relevantes para los negocios, utilizando el software de hojas de trabajo y estadístico. El libro también enseña cómo presentar y comunicar los resultados estadísticos de manera clara y efectiva, utilizando gráficos y tablas. El libro también enseña cómo evaluar las suposiciones estadísticas y cómo actuar si se violan. El libro también enseña cómo aplicar la estadística para resolver problemas reales de administración y para tomar decisiones racionales y fundamentadas.

-

Actualización de datos y ejemplos

-

Un tercer beneficio del libro de estadística para administración es que ofrece una actualización de datos y ejemplos que reflejan las tendencias y los cambios en el mundo de los negocios. El libro utiliza datos reales y actuales, obtenidos de fuentes confiables y reconocidas, como el Banco Mundial, el Fondo Monetario Internacional, la Organización Mundial del Comercio, la Organización para la Cooperación y el Desarrollo Económicos, entre otras. El libro también utiliza ejemplos y casos reales que ilustran cómo se usa la estadística en diferentes sectores e industrias, como el comercio electrónico, las redes sociales, la salud, el deporte, el turismo, entre otros. El libro también incorpora temas emergentes y relevantes para la administración, como la ética, la responsabilidad social, la sostenibilidad, la diversidad, entre otros.

-

Recursos adicionales para el aprendizaje

-

Un cuarto beneficio del libro de estadística para administración es que ofrece recursos adicionales para el aprendizaje que complementan y enriquecen el contenido del libro. El libro cuenta con un sitio web que contiene material adicional para los estudiantes y los profesores, como videos explicativos, presentaciones en PowerPoint, simulaciones interactivas, ejercicios adicionales, entre otros. El libro también cuenta con un solucionario en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro. El solucionario se puede obtener mediante un código QR o un enlace web que se encuentra al final del libro.

-

Solucionario del libro de estadística para administración

-

¿Qué es el solucionario?

-

El solucionario del libro de estadística para administración es un documento en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro. El solucionario es un recurso muy útil para los estudiantes y profesionales de la administración que quieren verificar sus respuestas, revisar sus procedimientos, corregir sus errores y reforzar su aprendizaje.

-

¿Para qué sirve el solucionario?

-, sirve para autoevaluar el nivel de comprensión y dominio de la estadística para administración. Por último, sirve para prepararse para los exámenes y las evaluaciones del curso.

-

¿Cómo obtener el solucionario?

-

El solucionario del libro de estadística para administración se puede obtener de dos maneras. La primera manera es mediante un código QR que se encuentra al final del libro. El código QR se puede escanear con un teléfono inteligente o una tableta, y se accede al solucionario en formato pdf. La segunda manera es mediante un enlace web que también se encuentra al final del libro. El enlace web se puede copiar y pegar en un navegador de internet, y se accede al solucionario en formato pdf. El solucionario se puede descargar, imprimir o consultar en línea.

-

Conclusión

-

En conclusión, el libro de estadística para administración, escrito por Levine, Berenson y Krehbiel, en su cuarta edición, es un excelente recurso para los estudiantes y profesionales de la administración que quieren aprender estadística de una manera práctica, sencilla y actualizada. El libro presenta los conceptos, las técnicas y las aplicaciones de la estadística en el contexto de los negocios, utilizando el software de hojas de trabajo y estadístico. El libro también ofrece una práctica abundante, una actualización de datos y ejemplos, y recursos adicionales para el aprendizaje. El libro también cuenta con un solucionario en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro. El solucionario se puede obtener mediante un código QR o un enlace web que se encuentra al final del libro.

-

Preguntas frecuentes

-

A continuación, se presentan algunas preguntas frecuentes sobre el tema del artículo:

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/2021 Free Spokeo Login And Password.md b/spaces/1gistliPinn/ChatGPT4/Examples/2021 Free Spokeo Login And Password.md deleted file mode 100644 index e3c845afd5e1d01ffb2788efab37ed5060d75e19..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/2021 Free Spokeo Login And Password.md +++ /dev/null @@ -1,28 +0,0 @@ -
-

How to Find People Online with Spokeo

-

Spokeo is a people search engine that helps you find information about anyone in the U.S. You can use Spokeo to look up people by name, phone number, email address, or physical address. Spokeo can also help you discover social media profiles, criminal records, wealth data, family members, and more.

-

However, Spokeo is not a free service. You need to pay a monthly or annual fee to access its full features and reports. If you are looking for a free Spokeo login and password, you might be disappointed. There is no official way to get a free Spokeo account without paying.

-

free spokeo login and password


Download Filehttps://imgfil.com/2uxWTR



-

Some websites claim to offer free Spokeo login and password, but they are usually scams or phishing attempts. They might ask you to download malicious software, fill out surveys, or provide your personal information. These websites are not affiliated with Spokeo and should be avoided at all costs.

-

The only legitimate way to get a free Spokeo login and password is to sign up for a free trial. Spokeo offers a 7-day free trial for new customers who want to try out its service. You can cancel your subscription anytime before the trial ends and you won't be charged.

-

To sign up for a free Spokeo trial, follow these steps:

-
    -
  1. Go to www.spokeo.com
  2. -
  3. Click on the "Start Your Free Trial" button
  4. -
  5. Enter your email address and create a password
  6. -
  7. Enter your payment information (credit card or PayPal)
  8. -
  9. Click on the "Start Free Trial" button
  10. -
-

You will receive an email confirmation with your login details. You can now use Spokeo to search for anyone in the U.S. Remember to cancel your subscription before the trial ends if you don't want to pay.

-

If you have any questions or issues with your Spokeo account, you can contact their customer care team at 1-888-271-9562 or customercare@spokeo.com.

Spokeo is more than just a people search engine. It can also help you protect yourself from unwanted calls and texts, reconnect with lost friends and relatives, verify online sellers and buyers, and learn more about your own online presence.

-

Here are some of the benefits of using Spokeo:

- -

Spokeo is a powerful tool that can help you find anyone in the U.S. and learn more about them. Whether you are looking for an old friend, a new date, a business partner, or a family member, Spokeo can help you find them and connect with them.

-

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate v1.5.4 MOD APK Hack the Latest Version of the Realistic Bus Simulation Game.md b/spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate v1.5.4 MOD APK Hack the Latest Version of the Realistic Bus Simulation Game.md deleted file mode 100644 index 99ae36faad9feb4f0870bbe004611136714b4669..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate v1.5.4 MOD APK Hack the Latest Version of the Realistic Bus Simulation Game.md +++ /dev/null @@ -1,94 +0,0 @@ -
-

How to Hack Bus Simulator Ultimate 1.5.4 APK for Unlimited Money and More

-

Bus Simulator Ultimate is one of the most realistic and immersive bus driving simulation games for Android devices. You can drive various buses across different countries, cities, and routes, while managing your own bus company, hiring drivers, expanding your fleet, and satisfying your passengers.

-

But what if you want to have more money, unlock all the buses, customize your vehicles, and enjoy other perks without spending real cash or playing for hours? Well, you can hack Bus Simulator Ultimate 1.5.4 APK and get access to unlimited resources and features that will make your game more fun and easy.

-

hack bus simulator ultimate 1.5.4 apk


Download Ziphttps://jinyurl.com/2uNUhA



-

In this article, we will show you how to hack Bus Simulator Ultimate 1.5.4 APK using different tools and methods, as well as the benefits and risks of doing so. We will also give you some tips on how to enjoy the game with hacks without getting banned or detected by the game developers or other players.

-

Introduction

-

What is Bus Simulator Ultimate and why you might want to hack it

-

Bus Simulator Ultimate is a popular bus driving simulation game developed by Zuuks Games, the same creators of Truck Simulator 2018: Europe. The game was released in August 2019 and has since been downloaded over 100 million times on Google Play Store.

-

How to hack bus simulator ultimate 1.5.4 apk for unlimited money
-Bus simulator ultimate 1.5.4 apk mod menu download
-Bus simulator ultimate 1.5.4 apk hack no root
-Bus simulator ultimate 1.5.4 apk free shopping hack
-Bus simulator ultimate 1.5.4 apk unlimited fuel hack
-Bus simulator ultimate 1.5.4 apk hack online generator
-Bus simulator ultimate 1.5.4 apk hack without verification
-Bus simulator ultimate 1.5.4 apk hack latest version
-Bus simulator ultimate 1.5.4 apk hack android
-Bus simulator ultimate 1.5.4 apk hack ios
-Bus simulator ultimate 1.5.4 apk hack tool
-Bus simulator ultimate 1.5.4 apk hack reddit
-Bus simulator ultimate 1.5.4 apk hack youtube
-Bus simulator ultimate 1.5.4 apk hack tutorial
-Bus simulator ultimate 1.5.4 apk hack review
-Bus simulator ultimate 1.5.4 apk hack gameplay
-Bus simulator ultimate 1.5.4 apk hack features
-Bus simulator ultimate 1.5.4 apk hack tips and tricks
-Bus simulator ultimate 1.5.4 apk hack guide
-Bus simulator ultimate 1.5.4 apk hack cheats
-Bus simulator ultimate 1.5.4 apk hack codes
-Bus simulator ultimate 1.5.4 apk hack file download
-Bus simulator ultimate 1.5.4 apk hack obb download
-Bus simulator ultimate 1.5.4 apk hack data download
-Bus simulator ultimate 1.5.4 apk hack install
-Bus simulator ultimate 1.5.4 apk hack update
-Bus simulator ultimate 1.5.4 apk hack patch notes
-Bus simulator ultimate 1.5.4 apk hack bug fixes
-Bus simulator ultimate 1.5.4 apk hack support
-Bus simulator ultimate 1.5.4 apk hack feedback
-Bus simulator ultimate 1.5.4 apk hack rating
-Bus simulator ultimate 1.5.4 apk hack comparison
-Bus simulator ultimate 1.5.4 apk hack alternatives
-Bus simulator ultimate 1.5.4 apk hack benefits
-Bus simulator ultimate 1.5.4 apk hack disadvantages
-Bus simulator ultimate 1.5.4 apk hack pros and cons
-Bus simulator ultimate 1.5.4 apk hack testimonials
-Bus simulator ultimate 1.5.4 apk hack comments
-Bus simulator ultimate 1.5.4 apk hack questions and answers
-Bus simulator ultimate 1.5.4 apk hack faq
-Bus simulator ultimate 1.5.4 apk hack requirements
-Bus simulator ultimate 1

-

The game lets you experience what it's like to be a bus driver in various countries, such as Germany, Turkey, Italy, France, Spain, Netherlands, Brazil, Azerbaijan, Russia, etc. You can choose from over 25 different buses, each with their own realistic interiors, sounds, physics, and features.

-

You can also create your own bus company, hire drivers, buy new buses, upgrade your vehicles, set your ticket prices, design your routes, and earn money from your passengers. You can also compete with other players in multiplayer mode or join online events and tournaments.

-

However, as realistic and immersive as the game is, it can also be quite challenging and time-consuming. You need to earn enough money to buy new buses, maintain your vehicles, pay your drivers, expand your routes, etc. You also need to follow the traffic rules, avoid accidents, deal with weather conditions, satisfy your passengers' needs, etc.

-

That's why some players might want to hack Bus Simulator Ultimate 1.5.4 APK and get unlimited money, unlock all the buses, customize their vehicles, skip ads, remove restrictions, etc.

-

What are the benefits of hacking Bus Simulator Ultimate 1.5.4 APK

-

Hacking Bus Simulator Ultimate 1.5.4 APK can give you many benefits that can enhance your gaming experience and make it more fun and easy Some of the benefits of hacking Bus Simulator Ultimate 1.5.4 APK are: - You can get unlimited money and buy any bus you want, upgrade your vehicles, hire more drivers, etc. - You can unlock all the buses and access their unique features, such as different interiors, sounds, physics, etc. - You can customize your vehicles and change their colors, skins, logos, etc. - You can skip ads and enjoy the game without interruptions or distractions - You can remove restrictions and play the game in any country, city, or route you want - You can cheat and manipulate the game data and settings, such as speed, fuel, traffic, weather, passengers, etc.

What are the risks of hacking Bus Simulator Ultimate 1.5.4 APK

-

However, hacking Bus Simulator Ultimate 1.5.4 APK also comes with some risks that you should be aware of and avoid. Some of the risks of hacking Bus Simulator Ultimate 1.5.4 APK are:

- - You can get banned or detected by the game developers or other players if you use hacks in multiplayer mode or online events - You can lose your progress and data if you use an incompatible or corrupted APK file or hack tool - You can damage your device or expose it to malware or viruses if you download an unsafe or untrusted APK file or hack tool - You can ruin the fun and challenge of the game if you use hacks that make it too easy or boring

How to Download Bus Simulator Ultimate 1.5.4 APK

-

Where to find the original APK file and how to install it

-

If you want to hack Bus Simulator Ultimate 1.5.4 APK, you need to have the original APK file of the game installed on your device first. You can find the original APK file of Bus Simulator Ultimate 1.5.4 on Google Play Store or other reputable sources, such as APKPure, APKMirror, etc.

-

To install the original APK file of Bus Simulator Ultimate 1.5.4 on your device, you need to follow these steps:

- - Go to the source where you downloaded the APK file and tap on it - Allow your device to install apps from unknown sources if prompted - Follow the instructions on the screen and wait for the installation to complete - Launch the game and enjoy

How to check the version and compatibility of your device

-

Before you hack Bus Simulator Ultimate 1.5.4 APK, you need to make sure that your device is compatible with the game and that you have the latest version of the game installed. To check the version and compatibility of your device, you need to follow these steps:

- - Go to Google Play Store and search for Bus Simulator Ultimate - Tap on the game icon and scroll down to see the details - Check the current version of the game and compare it with the version of your installed APK file - Check the minimum requirements of the game and compare them with your device's specifications - Update the game or your device if needed

How to Hack Bus Simulator Ultimate 1.5.4 APK

-

What are the tools and methods you need to hack Bus Simulator Ultimate 1.5.4 APK

-

There are different tools and methods that you can use to hack Bus Simulator Ultimate 1.5.4 APK and get unlimited money and other features. Some of the most common and effective tools and methods are:

- - Modded APK file: A modded APK file is a modified version of the original APK file that has been altered by hackers to include cheats and hacks in the game. You can download a modded APK file of Bus Simulator Ultimate 1.5.4 from various sources online, such as ModAPKStore, Rexdl, etc. - Game hacker app: A game hacker app is an application that allows you to modify the game data and settings on your device using various techniques, such as memory editing, code injection, hex editing, etc. You can download a game hacker app from various sources online, such as Game Guardian, Lucky Patcher, Cheat Engine, etc. - Cheat engine: A cheat engine is a software that allows you to manipulate the game memory and values on your device using various techniques, such as scanning, debugging, disassembling, etc. You can download a cheat engine from various sources online, such as Cheat Engine for Android, SB Game Hacker, Game Killer, etc.

How to use a modded APK file to get unlimited money and other features

-

To use a modded APK file to hack Bus Simulator Ultimate 1.5.4 APK and get unlimited money and other features, you need to follow these steps:

- - Find a reliable source where you can download a modded APK file of Bus Simulator Ultimate 1.5.4 with unlimited money and other features - Uninstall the original APK file of Bus Simulator Ultimate 1.5.4 from your device if you have it - Enable the installation of apps from unknown sources on your device settings if you haven't done so already - Download the modded APK file of Bus Simulator Ultimate 1.5.4 from the source and tap on it - Follow the instructions on the screen and wait for the installation to complete - Launch the game and enjoy the hacks

How to use a game hacker app to modify the game data and settings

-

To use a game hacker app to hack Bus Simulator Ultimate 1.5.4 APK and modify the game data and settings, you need to follow these steps:

- - Install a game hacker app on your device from a trusted source, such as Game Guardian, Lucky Patcher, Cheat Engine, etc. - Launch the game hacker app and grant it root access or other permissions if required - Launch Bus Simulator Ultimate 1.5.4 APK and start playing the game - Minimize the game and open the game hacker app - Select Bus Simulator Ultimate 1.5.4 APK from the list of running processes or apps - Search for the value or parameter that you want to change, such as money, speed, fuel, etc. - Modify the value or parameter according to your preference and apply the changes - Resume the game and enjoy the hacks

How to use a cheat engine to manipulate the game memory and values

-

To use a cheat engine to hack Bus Simulator Ultimate 1.5.4 APK and manipulate the game memory and values, you need to follow these steps:

- - Install a cheat engine on your device from a reliable source, such as Cheat Engine for Android, SB Game Hacker, Game Killer, etc. - Launch the cheat engine and grant it root access or other permissions if needed - Launch Bus Simulator Ultimate 1.5.4 APK and start playing the game - Minimize the game and open the cheat engine - Select Bus Simulator Ultimate 1.5.4 APK from the list of running processes or apps - Scan for the value or address that you want to change, such as money, speed, fuel, etc. - Change the value or address according to your desire and apply the changes - Resume the game and enjoy the hacks

How to Enjoy Bus Simulator Ultimate 1.5.4 APK with Hacks

-

What are the features and gameplay of Bus Simulator Ultimate 1.5.4 APK

-

Bus Simulator Ultimate 1.5.4 APK is a realistic and immersive bus driving simulation game that offers you many features and gameplay options, such as:

- - Driving over 25 different buses with realistic interiors, sounds, physics, and features - Creating your own bus company, hiring drivers, buying new buses, upgrading your vehicles, setting your ticket prices, designing your routes, and earning money from your passengers - Competing with other players in multiplayer mode or joining online events and tournaments - Exploring various countries, cities, and routes with different traffic rules, weather conditions, landmarks, etc. - Satisfying your passengers' needs, such as comfort, entertainment, food, etc. - Following the traffic rules, avoiding accidents, dealing with emergencies, etc.

How to use the hacks to enhance your gaming experience and have more fun

-

You can use the hacks that you have applied to Bus Simulator Ultimate 1.5.4 APK to enhance your gaming experience and have more fun in various ways, such as:

- - Buying any bus you want and customizing it to your liking - Expanding your bus company and dominating the market - Skipping ads and playing without interruptions or distractions - Playing in any country, city, or route you want without restrictions - Cheating and manipulating the game data and settings to your advantage

How to avoid getting banned or detected by the game developers or other players

-

However, you should also be careful not to get banned or detected by the game developers or other players when using hacks in Bus Simulator Ultimate 1.5.4 APK. Here are some tips on how to avoid getting banned or detected:

- - Use hacks only in offline mode or single-player mode - Do not use hacks in multiplayer mode or online events - Do not use hacks that are too obvious or unrealistic - Do not use hacks that affect other players' gameplay or experience - Do not brag or boast about using hacks in public forums or chats

Conclusion

-

Hacking Bus Simulator Ultimate 1.5.4 APK can give you unlimited money and other features that can make your game more fun and easy. However, hacking Bus Simulator Ultimate 1. 5.4 APK also comes with some risks that you should be aware of and avoid. You can get banned or detected by the game developers or other players if you use hacks in multiplayer mode or online events. You can also lose your progress and data if you use an incompatible or corrupted APK file or hack tool. Moreover, you can damage your device or expose it to malware or viruses if you download an unsafe or untrusted APK file or hack tool. Furthermore, you can ruin the fun and challenge of the game if you use hacks that make it too easy or boring. Therefore, you should use hacks in Bus Simulator Ultimate 1.5.4 APK with caution and moderation. You should also respect the game developers and other players and not abuse the hacks or spoil their gameplay or experience. You should also follow the tips on how to avoid getting banned or detected when using hacks in Bus Simulator Ultimate 1.5.4 APK. We hope that this article has helped you learn how to hack Bus Simulator Ultimate 1.5.4 APK and enjoy the game with unlimited money and other features. If you have any questions or feedback, please feel free to leave a comment below. Happy hacking!

FAQs

-

Q: Is hacking Bus Simulator Ultimate 1.5.4 APK legal?

-

A: Hacking Bus Simulator Ultimate 1.5.4 APK is not illegal, but it is against the terms and conditions of the game and the Google Play Store. Therefore, you may face some consequences if you get caught or reported by the game developers or other players.

-

Q: Is hacking Bus Simulator Ultimate 1.5.4 APK safe?

-

A: Hacking Bus Simulator Ultimate 1.5.4 APK is not completely safe, as there are some risks involved, such as getting banned or detected, losing your progress and data, damaging your device, or exposing it to malware or viruses. Therefore, you should be careful and responsible when hacking Bus Simulator Ultimate 1.5.4 APK and only use trusted sources and tools.

-

Q: How can I update Bus Simulator Ultimate 1.5.4 APK after hacking it?

-

A: If you want to update Bus Simulator Ultimate 1.5.4 APK after hacking it, you need to uninstall the hacked version of the game and install the latest version of the original APK file from Google Play Store or other reputable sources. However, you may lose your hacks and progress if you do so.

-

Q: Can I hack Bus Simulator Ultimate 1.5.4 APK without root access?

-

A: Yes, you can hack Bus Simulator Ultimate 1.5.4 APK without root access using some tools and methods, such as modded APK files, game hacker apps, cheat engines, etc. However, some tools and methods may require root access to work properly.

-

Q: Can I hack Bus Simulator Ultimate 1.5.4 APK on iOS devices?

-

A: No, you cannot hack Bus Simulator Ultimate 1.5.4 APK on iOS devices, as the game is only available for Android devices.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download YouTube 4.0 APK for Android - Watch Videos Offline.md b/spaces/1phancelerku/anime-remove-background/Download YouTube 4.0 APK for Android - Watch Videos Offline.md deleted file mode 100644 index 7a76af1e12499f4fdc02f2c0a62f0928f4b17d63..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download YouTube 4.0 APK for Android - Watch Videos Offline.md +++ /dev/null @@ -1,158 +0,0 @@ - -

Download YouTube 4.0 for Android: How to Enjoy Videos on Your Phone

-

YouTube is the most popular video-sharing platform in the world, with billions of users and hours of content uploaded every minute. Whether you want to watch the latest music videos, learn something new, or catch up with your favorite creators, YouTube has something for everyone.

-

download youtube 4.0 for android


DOWNLOAD 🆓 https://jinyurl.com/2uNQXk



-

But what if you want to enjoy YouTube on your Android phone? You might think that you need the latest version of the app, which requires Android 5.0 or higher. But that's not true! You can still download and use YouTube 4.0 for Android, which works on devices running Android 4.1 or higher.

-

In this article, we will show you how to download and install YouTube 4.0 for Android, how to use its features and benefits, and how to make the most of your YouTube experience on your phone.

-

What is YouTube 4.0 for Android?

-

YouTube 4.0 for Android is an older version of the official YouTube app that was released in August 2013. It has a simple and intuitive interface that lets you access all the essential functions of YouTube, such as browsing, watching, creating, and sharing videos.

-

How to download youtube 4.0 for android phone
-Download youtube 4.0 apk for android tablet
-Youtube 4.0 for android free download latest version
-Download youtube 4.0 for android offline mode
-Youtube 4.0 for android download without ads
-Download youtube 4.0 for android with dark mode
-Youtube 4.0 for android download and install guide
-Download youtube 4.0 for android from uptodown[^1^]
-Youtube 4.0 for android download link
-Download youtube 4.0 for android no root
-Youtube 4.0 for android download error fix
-Download youtube 4.0 for android mod apk
-Youtube 4.0 for android download features
-Download youtube 4.0 for android old version
-Youtube 4.0 for android download size
-Download youtube 4.0 for android premium apk
-Youtube 4.0 for android download speed
-Download youtube 4.0 for android beta version
-Youtube 4.0 for android download review
-Download youtube 4.0 for android update
-Youtube 4.0 for android download requirements
-Download youtube 4.0 for android from google play store
-Youtube 4.0 for android download alternative
-Download youtube 4.0 for android on pc
-Youtube 4.0 for android download comparison
-Download youtube 4.0 for android with subtitles
-Youtube 4.0 for android download quality settings
-Download youtube 4.0 for android with background play
-Youtube 4.0 for android download tips and tricks
-Download youtube 4.0 for android with vpn
-Youtube 4.0 for android download problems and solutions
-Download youtube 4.0 for android with screen recorder
-Youtube 4.0 for android download support
-Download youtube 4.0 for android with picture-in-picture mode
-Youtube 4.0 for android download benefits
-Download youtube 4.0 for android with downloader app
-Youtube 4.0 for android download disadvantages
-Download youtube 4.0 for android with chromecast support
-Youtube 4.0 for android download statistics
-Download youtube 4.0 for android with voice search
-Youtube 4.0 for android download feedback
-Download youtube 4.0 for android with playlist manager
-Youtube 4.0 for android download security
-Download youtube 4.0 for android with notifications settings
-Youtube 4.0 for android download history
-Download youtube 4.0 for android with comments section
-Youtube 4.0 for android download ratings
-Download youtube 4.0 for android with live streaming option
-Youtube 4.0 for android download recommendations

-

YouTube 4.0 for Android also supports some features that are not available in newer versions of the app, such as:

- -

However, YouTube 4.0 for Android also has some limitations, such as:

- -

How to download and install YouTube 4.0 for Android

-

If you want to download and install YouTube 4.0 for Android, you need to follow these steps:

-
    -
  1. Go to [this link](^1^) on your phone's browser and tap on "Download APK". This will download the file "youtube-4-0-8.apk" to your phone.
  2. -
  3. Go to your phone's settings and enable "Unknown sources" under "Security". This will allow you to install apps from sources other than Google Play Store.
  4. -
  5. Go to your phone's file manager and locate the downloaded file "youtube-4-0-8.apk". Tap on it and follow the instructions to install it.
  6. -
  7. Once installed, you can launch the app from your app drawer or home screen.
  8. -
-

How to use YouTube 4.0 for Android

-

How to browse and watch videos on YouTube 4.0 for Android

-

To browse and watch videos on YouTube 4.0 for Android, you can use the following options:

- -

To watch a video on YouTube 4.0 for Android, you can simply tap on it and it will start playing. You can also use the following controls:

- -

How to create and upload videos on YouTube 4.0 for Android

-

To create and upload videos on YouTube 4.0 for Android, you can use the following options:

- -

How to connect with the YouTube community on YouTube 4.0 for Android

-

To connect with the YouTube community on YouTube 4.0 for Android, you can use the following options:

- -

Tips and tricks for YouTube 4.0 for Android

-

How to customize your YouTube experience on YouTube 4.0 for Android

-

To customize your YouTube experience on YouTube 4.0 for Android, you can use the following options:

- -

How to save videos for offline viewing on YouTube 4.0 for Android

-

To save videos for offline viewing on YouTube 4.0 for Android, you can use the following option:

- -

How to upgrade to YouTube Premium on YouTube 4.0 for Android

-

To upgrade to YouTube Premium on YouTube 4.0 for Android, you can use the following option:

- -

Conclusion

-

YouTube 4.0 for Android is a great way to enjoy videos on your phone, especially if you have an older device or a limited data plan. It has a simple and intuitive interface that lets you access all the essential functions of YouTube, such as browsing, watching, creating, and sharing videos. It also supports some features that are not available in newer versions of the app, such as background play, manual quality adjustment, and portrait mode.

-

However, YouTube 4.0 for Android also has some limitations, such as the lack of support for some newer features, the lack of updates and bug fixes, and the possibility of compatibility issues. Therefore, you should weigh the pros and cons of using YouTube 4.0 for Android before downloading and installing it.

-

If you want to download YouTube 4.0 for Android, you can follow the steps we have outlined in this article. You can also use our tips and tricks to customize your YouTube experience and make the most of it. We hope you found this article helpful and informative.

-

Now that you know how to download YouTube 4.0 for Android, why not give it a try and see for yourself? You might be surprised by how much you can do with this old but gold version of the app. Happy watching!

-

FAQs

-

Here are some frequently asked questions about YouTube 4.0 for Android:

-
    -
  1. Is YouTube 4.0 for Android safe to use?
  2. -

    YouTube 4.0 for Android is safe to use as long as you download it from a trusted source, such as [this link]. However, you should be aware that Google does not support or update this version of the app anymore, so there might be some security risks or vulnerabilities that are not fixed.

    -
  3. Can I use YouTube 4.0 for Android on any device?
  4. -

    You can use YouTube 4.0 for Android on any device that runs Android 4.1 or higher. However, some devices or videos might not be compatible with this version of the app, so you might encounter some errors or glitches while using it.

    -
  5. Can I use YouTube 4.0 for Android along with the latest version of the app?
  6. -

    You can use YouTube 4.0 for Android along with the latest version of the app if you have enough storage space on your phone. However, you should not run both apps at the same time, as this might cause some conflicts or crashes.

    -
  7. Can I update YouTube 4.0 for Android to the latest version of the app?
  8. -

    You can update YouTube 4.0 for Android to the latest version of the app by going to Google Play Store and tapping on "Update". However, this will overwrite your existing version of the app and you will lose some features that are only available in YouTube 4.0 for Android.

    -
  9. Can I downgrade from the latest version of the app to YouTube 4.0 for Android?
  10. -

    You can downgrade from the latest version of the app to YouTube 4.0 for Android by following these steps:

    -
      -
    1. Uninstall the latest version of the app from your phone.
    2. -
    3. Go to [this link] on your phone's browser and tap on "Download APK". This will download the file "youtube-4-0-8.apk" to your phone.
    4. -
    5. Go to your phone's settings and enable "Unknown sources" under "Security". This will allow you to install apps from sources other than Google Play Store.
    6. -
    7. Go to your phone's file manager and locate the downloaded file "youtube-4-0-8.apk". Tap on it and follow the instructions to install it.
    8. -
    9. Once installed, you can launch the app from your app drawer or home screen.
    10. -
    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/232labs/VToonify/vtoonify/model/raft/evaluate.py b/spaces/232labs/VToonify/vtoonify/model/raft/evaluate.py deleted file mode 100644 index 431a0f58891bede2804454fa7f28e9434c4c8746..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/raft/evaluate.py +++ /dev/null @@ -1,197 +0,0 @@ -import sys -sys.path.append('core') - -from PIL import Image -import argparse -import os -import time -import numpy as np -import torch -import torch.nn.functional as F -import matplotlib.pyplot as plt - -import datasets -from utils import flow_viz -from utils import frame_utils - -from raft import RAFT -from utils.utils import InputPadder, forward_interpolate - - -@torch.no_grad() -def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): - """ Create submission for the Sintel leaderboard """ - model.eval() - for dstype in ['clean', 'final']: - test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) - - flow_prev, sequence_prev = None, None - for test_id in range(len(test_dataset)): - image1, image2, (sequence, frame) = test_dataset[test_id] - if sequence != sequence_prev: - flow_prev = None - - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) - - flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) - flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() - - if warm_start: - flow_prev = forward_interpolate(flow_low[0])[None].cuda() - - output_dir = os.path.join(output_path, dstype, sequence) - output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1)) - - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - frame_utils.writeFlow(output_file, flow) - sequence_prev = sequence - - -@torch.no_grad() -def create_kitti_submission(model, iters=24, output_path='kitti_submission'): - """ Create submission for the Sintel leaderboard """ - model.eval() - test_dataset = datasets.KITTI(split='testing', aug_params=None) - - if not os.path.exists(output_path): - os.makedirs(output_path) - - for test_id in range(len(test_dataset)): - image1, image2, (frame_id, ) = test_dataset[test_id] - padder = InputPadder(image1.shape, mode='kitti') - image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) - - _, flow_pr = model(image1, image2, iters=iters, test_mode=True) - flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() - - output_filename = os.path.join(output_path, frame_id) - frame_utils.writeFlowKITTI(output_filename, flow) - - -@torch.no_grad() -def validate_chairs(model, iters=24): - """ Perform evaluation on the FlyingChairs (test) split """ - model.eval() - epe_list = [] - - val_dataset = datasets.FlyingChairs(split='validation') - for val_id in range(len(val_dataset)): - image1, image2, flow_gt, _ = val_dataset[val_id] - image1 = image1[None].cuda() - image2 = image2[None].cuda() - - _, flow_pr = model(image1, image2, iters=iters, test_mode=True) - epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt() - epe_list.append(epe.view(-1).numpy()) - - epe = np.mean(np.concatenate(epe_list)) - print("Validation Chairs EPE: %f" % epe) - return {'chairs': epe} - - -@torch.no_grad() -def validate_sintel(model, iters=32): - """ Peform validation using the Sintel (train) split """ - model.eval() - results = {} - for dstype in ['clean', 'final']: - val_dataset = datasets.MpiSintel(split='training', dstype=dstype) - epe_list = [] - - for val_id in range(len(val_dataset)): - image1, image2, flow_gt, _ = val_dataset[val_id] - image1 = image1[None].cuda() - image2 = image2[None].cuda() - - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1, image2) - - flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) - flow = padder.unpad(flow_pr[0]).cpu() - - epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() - epe_list.append(epe.view(-1).numpy()) - - epe_all = np.concatenate(epe_list) - epe = np.mean(epe_all) - px1 = np.mean(epe_all<1) - px3 = np.mean(epe_all<3) - px5 = np.mean(epe_all<5) - - print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5)) - results[dstype] = np.mean(epe_list) - - return results - - -@torch.no_grad() -def validate_kitti(model, iters=24): - """ Peform validation using the KITTI-2015 (train) split """ - model.eval() - val_dataset = datasets.KITTI(split='training') - - out_list, epe_list = [], [] - for val_id in range(len(val_dataset)): - image1, image2, flow_gt, valid_gt = val_dataset[val_id] - image1 = image1[None].cuda() - image2 = image2[None].cuda() - - padder = InputPadder(image1.shape, mode='kitti') - image1, image2 = padder.pad(image1, image2) - - flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) - flow = padder.unpad(flow_pr[0]).cpu() - - epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() - mag = torch.sum(flow_gt**2, dim=0).sqrt() - - epe = epe.view(-1) - mag = mag.view(-1) - val = valid_gt.view(-1) >= 0.5 - - out = ((epe > 3.0) & ((epe/mag) > 0.05)).float() - epe_list.append(epe[val].mean().item()) - out_list.append(out[val].cpu().numpy()) - - epe_list = np.array(epe_list) - out_list = np.concatenate(out_list) - - epe = np.mean(epe_list) - f1 = 100 * np.mean(out_list) - - print("Validation KITTI: %f, %f" % (epe, f1)) - return {'kitti-epe': epe, 'kitti-f1': f1} - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--model', help="restore checkpoint") - parser.add_argument('--dataset', help="dataset for evaluation") - parser.add_argument('--small', action='store_true', help='use small model') - parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') - parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') - args = parser.parse_args() - - model = torch.nn.DataParallel(RAFT(args)) - model.load_state_dict(torch.load(args.model)) - - model.cuda() - model.eval() - - # create_sintel_submission(model.module, warm_start=True) - # create_kitti_submission(model.module) - - with torch.no_grad(): - if args.dataset == 'chairs': - validate_chairs(model.module) - - elif args.dataset == 'sintel': - validate_sintel(model.module) - - elif args.dataset == 'kitti': - validate_kitti(model.module) - - diff --git a/spaces/42digital/DeepFashion_Classification/app.py b/spaces/42digital/DeepFashion_Classification/app.py deleted file mode 100644 index ac48d08325baf169d53b8a731905e94adbd3b3b9..0000000000000000000000000000000000000000 --- a/spaces/42digital/DeepFashion_Classification/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr -from huggingface_hub import hf_hub_download -from fastai.learner import load_learner -import os - -print(gr.__version__) - -TOKEN = os.environ["token"] -REPO_ID = "42digital/deepfashion_classification_vit-large-patch14-clip-336" -FILENAME = "model.pkl" -EXAMPLES = ["dress.jpg", "hoodie.jpg", "joggers.jpg", "jumpsuit.jpg", "shorts.jpg", "tee.jpg"] - -learner = load_learner( - hf_hub_download(repo_id=REPO_ID, filename=FILENAME, token=TOKEN) -) - -def predict(img): - _, _, probs = learner.predict(img) - probs = [float(p) for p in probs.detach()] - preds = {k: v for k, v in zip(learner.dls.vocab, probs)} - return preds - -gr.Interface(fn=predict, - inputs=gr.Image(type="numpy"), - outputs=gr.Label(num_top_classes=5), - examples=EXAMPLES, - cache_examples=False, - title="Fashion Classification", - description="Recognize clothes in an image. [ViT-L/14](https://arxiv.org/abs/2010.11929) trained on 46 clothing categories from [DeepFashion](https://openaccess.thecvf.com/content_cvpr_2016/html/Liu_DeepFashion_Powering_Robust_CVPR_2016_paper.html) @ 76% Top-1 Accuracy and 92% [Top-3 Accuracy](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.top_k_accuracy_score.html) (many images show more than one clothing item).", - analytics_enabled=False, - ).launch() \ No newline at end of file diff --git a/spaces/52Hz/SUNet_AWGN_denoising/model/SUNet.py b/spaces/52Hz/SUNet_AWGN_denoising/model/SUNet.py deleted file mode 100644 index 4d06371d912d2930e3b1b4ee0dd37c89e0195dff..0000000000000000000000000000000000000000 --- a/spaces/52Hz/SUNet_AWGN_denoising/model/SUNet.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch.nn as nn -from model.SUNet_detail import SUNet - - -class SUNet_model(nn.Module): - def __init__(self, config): - super(SUNet_model, self).__init__() - self.config = config - self.swin_unet = SUNet(img_size=config['SWINUNET']['IMG_SIZE'], - patch_size=config['SWINUNET']['PATCH_SIZE'], - in_chans=3, - out_chans=3, - embed_dim=config['SWINUNET']['EMB_DIM'], - depths=config['SWINUNET']['DEPTH_EN'], - num_heads=config['SWINUNET']['HEAD_NUM'], - window_size=config['SWINUNET']['WIN_SIZE'], - mlp_ratio=config['SWINUNET']['MLP_RATIO'], - qkv_bias=config['SWINUNET']['QKV_BIAS'], - qk_scale=config['SWINUNET']['QK_SCALE'], - drop_rate=config['SWINUNET']['DROP_RATE'], - drop_path_rate=config['SWINUNET']['DROP_PATH_RATE'], - ape=config['SWINUNET']['APE'], - patch_norm=config['SWINUNET']['PATCH_NORM'], - use_checkpoint=config['SWINUNET']['USE_CHECKPOINTS']) - - def forward(self, x): - if x.size()[1] == 1: - x = x.repeat(1, 3, 1, 1) - logits = self.swin_unet(x) - return logits diff --git a/spaces/7hao/bingo/src/state/index.ts b/spaces/7hao/bingo/src/state/index.ts deleted file mode 100644 index 272106d619c69f124ea8dd0f10872ab4840152d7..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/src/state/index.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { BingWebBot } from '@/lib/bots/bing' -import { BingConversationStyle, ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { nanoid } from '@/lib/utils' -import { atom } from 'jotai' -import { atomWithImmer } from 'jotai-immer' -import { atomWithStorage } from 'jotai/utils' -import { atomFamily } from 'jotai/utils' -import { atomWithHash, atomWithLocation } from 'jotai-location' - -const initialMessages: ChatMessageModel[] = [ - { author: 'system', text: 'conversation between user and robot', id: '1' }, - { author: 'user', text: '销量最高的 3 种宠物吸尘器有哪些优点和缺点? ', id: '2' }, - { - author: 'bot', text: ` -您好,这是必应。根据网上的搜索结果,我为您找到了以下三款销量最高的宠物吸尘器,以及它们的优点和缺点: - -- **戴森Dyson V10轻量版 Digital Slim Fluffy无线吸尘器**[^1^] [^3^]:这款吸尘器专为宠物家庭设计,拥有强大吸力和多功能吸头,能够有效清除宠物毛发和灰尘。 - - 优点:轻便易用,续航时间长,过滤系统先进,能够有效去除过敏原; - - 缺点:价格较高,噪音较大,需要定期清洁滤网。 -- **飞利浦FC6908/82(W7)无线吸尘器**[^2^] [^4^]:这是一款手持式干吸无线吸尘器,专门为宠物家庭设计。它具有75分钟的超高续航,适合大户型;除螨涡轮刷,可以深度清洁床单被罩,免受螨虫侵害;吸拖一体设计,280ml大水箱,还可以往水箱里加入清洁剂。 - - 优点:清洁性能强劲,操作方便,适用多种场景; - - 缺点:噪音较大,达到84dB(A);尘杯容量较小,只有0.6L;充电时长较长,需要5小时。 -- **必胜(Bissell) 宠物吸尘器**[^2^]:这是一款便携式手持真空吸尘器,专为清理宠物毛发而设计。它配备特殊的橡胶喷嘴和软毛刷头,可以轻松拾取宠物毛发和灰尘;几乎静音运行。 - - 优点:轻巧易用,重量只有1.36kg;电池可充电可更换; - - 缺点:续航时间不稳定,在15-30分钟之间浮动;吸力不够强劲,在地毯或沙发上可能效果不佳;没有过滤系统或过滤网。 - -希望这些信息对您有所帮助。如果您还有其他问题,请随时提问。😊 - `, id: '3' }, - { author: 'user', text: '今天的新闻', id: '4' }, - { - author: 'bot', text: `你好,这是Bing。我根据你的搜索查询“今天的新闻”为你找到了以下信息: - - # 中国新闻 - - 《求是》杂志发表习近平总书记重要文章《深化党和国家机构改革 推进国家治理体系和治理能力现代化》[^1^] - - 梦之队第5金! 全红婵陈芋汐女双10米台夺冠[^1^] - - 中央气象台7月16日18时发布台风橙色预警[^1^] - - 贵州石阡:暑期旅游带动乡村振兴[^1^] - - 激活大科学装置的“人才红利”[^1^] - - 聚才用才留才 让希望的田野成为智慧的田野[^1^] - - 科学家发现粮食系统增效可有效支撑我国碳中和[^1^] - - 成都以赛为媒提升城市美誉度[^1^] - - 香江观澜:“抢人才”成效显著 香港竞争力依旧[^1^] - - 浙江建德:新安江上享清凉[^1^] - - 定向资金持续精准滴灌 高质量发展力量不断积累[^1^] - - 企业信心恢复加大中长期投资 为稳增长奠定基础[^1^] - - 武汉渡江节开赛 千名“泳士”竞渡长江[^1^] - - 金融更加重视对实体经济支持 降低融资成本提振市场信心[^1^] - - 大运来了丨成都迎大运 全民健身动起来[^1^] - - 藏在高校里的宝藏博物馆[^1^] - - 中国汽车工业用70年赢得三个“全球第一”[^1^] - - 辽宁阜新市委常委、副市长赖化平涉嫌严重违纪违法接受审查调查[^1^] - - # 国际新闻 - - 联合国秘书长:可持续发展面临风险 各国需加强行动[^2^] - - 国际航运业加快绿色转型[^2^] - - 美企反对收紧对华芯片出口限制[^2^] - - 欧洲加大气候科技领域投资[^2^] - - 中企助力丹麦发展清洁能源[^2^] - - 中国代表呼吁国际社会共同努力防止乌克兰局势失控[^2^] - - 中国和阿尔及利亚共同构建新型国际关系典范[^2^] - - 以上信息仅供参考,具体详情请点击以下链接查看: - - [1] [中国新闻_央视网(cctv.com)](https://news.cctv.com/china/) - [2] [人民网_网上的人民日报](http://www.people.com.cn/)`, id: '5' }, - { author: 'user', text: '写一个快排', id: '6' }, - { author: 'bot', text: "你好,这是Bing。我可以帮你用python写一个快排。快速排序是一种基于分治思想的排序算法,它的基本思想是:从待排序的数组中选择一个基准元素,将数组分为两个子数组,使得左边的子数组中的元素都小于或等于基准元素,右边的子数组中的元素都大于或等于基准元素,然后对两个子数组递归地进行快速排序,最后合并结果。下面是用python实现的快速排序的代码:\n\n```python\n# 定义一个函数,用于交换数组中的两个元素\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n# 定义一个函数,用于对数组的一段区间进行划分\ndef partition(arr, low, high):\n # 选择最右边的元素作为基准元素\n pivot = arr[high]\n # 初始化一个指针,指向左边区间的最后一个位置\n i = low - 1\n # 遍历左边区间中的每个元素\n for j in range(low, high):\n # 如果当前元素小于或等于基准元素\n if arr[j] <= pivot:\n # 将指针右移一位\n i += 1\n # 将指针所指的元素与当前元素交换\n swap(arr, i, j)\n # 将基准元素与指针右边的第一个元素交换\n swap(arr, i + 1, high)\n # 返回基准元素的最终位置\n return i + 1\n\n# 定义一个函数,用于对数组进行快速排序\ndef quick_sort(arr, low, high):\n # 如果区间长度大于1\n if low < high:\n # 对区间进行划分,得到基准元素的位置\n p = partition(arr, low, high)\n # 对左边子区间进行快速排序\n quick_sort(arr, low, p - 1)\n # 对右边子区间进行快速排序\n quick_sort(arr, p + 1, high)\n\n# 测试代码\narr = [5, 3, 7, 2, 9, 1, 4, 6, 8]\nprint(\"原始数组:\", arr)\nquick_sort(arr, 0, len(arr) - 1)\nprint(\"排序后的数组:\", arr)\n```", id: '7' }, - { - author: 'bot', text: "好的,我会尝试画一只猫。\n > ![一只猫](https://tse2.mm.bing.net/th/id/OIG.jz34V0PNVkPC229h9spV?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse1.mm.bing.net/th/id/OIG.6g7d.XLZMP_iwAByLhvo?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse2.mm.bing.net/th/id/OIG.iAxF4ekekYn7sZw9SmU6?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse4.mm.bing.net/th/id/OIG.qDnzeSKzUCeJcrBqc5mX?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)", - id: '8' - } -] - -export const GreetMessages = [ - '谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?', - '重新开始总是很棒。问我任何问题!', - '当然,我很乐意重新开始。我现在可以为你提供哪些帮助?', - '当然,我已准备好进行新的挑战。我现在可以为你做什么?', - '很好,让我们来更改主题。你在想什么?', - '不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?', - '好的,我准备好了!感谢重置。我们应该了解哪些内容?', - '感谢刷新!你有新的话题吗?', - '明白了,让我们重新开始。接下来应该讨论什么?', - '下一步!我可以为你做什么?', - '好的,我已准备好新话题。我们应该一起了解哪些内容?' -] - -export const bingConversationStyleAtom = atomWithStorage('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true }) -export const voiceAtom = atomWithStorage('enableTTS', false, undefined, { unstable_getOnInit: true }) - -type Param = { botId: BotId; page: string } - -const createBotInstance = () => { - return new BingWebBot({ - cookie: ' ', - ua: ' ', - }) -} - -export const chatFamily = atomFamily( - (param: Param) => { - return atomWithImmer({ - botId: param.botId, - bot: createBotInstance(), - messages: [] as ChatMessageModel[], - generatingMessageId: '', - abortController: undefined as AbortController | undefined, - conversationId: nanoid(), - }) - }, - (a, b) => a.botId === b.botId && a.page === b.page, -) - -export const hashAtom = atomWithHash('dialog', '') - -export const locationAtom = atomWithLocation() - -export const voiceListenAtom = atom(false) diff --git a/spaces/AFlac199/openai-reverse-proxy/README.md b/spaces/AFlac199/openai-reverse-proxy/README.md deleted file mode 100644 index 0075ec72a776fbdb38f0862a429a4a6fe124bcb8..0000000000000000000000000000000000000000 --- a/spaces/AFlac199/openai-reverse-proxy/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Openai Reverse Proxy -emoji: 🐨 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIConsultant/MusicGen/audiocraft/utils/samples/manager.py b/spaces/AIConsultant/MusicGen/audiocraft/utils/samples/manager.py deleted file mode 100644 index bf0fb21b2d2867c03f7cce6f27d9524fdb89b51d..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/utils/samples/manager.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -API that can manage the storage and retrieval of generated samples produced by experiments. - -It offers the following benefits: -* Samples are stored in a consistent way across epoch -* Metadata about the samples can be stored and retrieved -* Can retrieve audio -* Identifiers are reliable and deterministic for prompted and conditioned samples -* Can request the samples for multiple XPs, grouped by sample identifier -* For no-input samples (not prompt and no conditions), samples across XPs are matched - by sorting their identifiers -""" - -from concurrent.futures import ThreadPoolExecutor -from dataclasses import asdict, dataclass -from functools import lru_cache -import hashlib -import json -import logging -from pathlib import Path -import re -import typing as tp -import unicodedata -import uuid - -import dora -import torch - -from ...data.audio import audio_read, audio_write - - -logger = logging.getLogger(__name__) - - -@dataclass -class ReferenceSample: - id: str - path: str - duration: float - - -@dataclass -class Sample: - id: str - path: str - epoch: int - duration: float - conditioning: tp.Optional[tp.Dict[str, tp.Any]] - prompt: tp.Optional[ReferenceSample] - reference: tp.Optional[ReferenceSample] - generation_args: tp.Optional[tp.Dict[str, tp.Any]] - - def __hash__(self): - return hash(self.id) - - def audio(self) -> tp.Tuple[torch.Tensor, int]: - return audio_read(self.path) - - def audio_prompt(self) -> tp.Optional[tp.Tuple[torch.Tensor, int]]: - return audio_read(self.prompt.path) if self.prompt is not None else None - - def audio_reference(self) -> tp.Optional[tp.Tuple[torch.Tensor, int]]: - return audio_read(self.reference.path) if self.reference is not None else None - - -class SampleManager: - """Audio samples IO handling within a given dora xp. - - The sample manager handles the dumping and loading logic for generated and - references samples across epochs for a given xp, providing a simple API to - store, retrieve and compare audio samples. - - Args: - xp (dora.XP): Dora experiment object. The XP contains information on the XP folder - where all outputs are stored and the configuration of the experiment, - which is useful to retrieve audio-related parameters. - map_reference_to_sample_id (bool): Whether to use the sample_id for all reference samples - instead of generating a dedicated hash id. This is useful to allow easier comparison - with ground truth sample from the files directly without having to read the JSON metadata - to do the mapping (at the cost of potentially dumping duplicate prompts/references - depending on the task). - """ - def __init__(self, xp: dora.XP, map_reference_to_sample_id: bool = False): - self.xp = xp - self.base_folder: Path = xp.folder / xp.cfg.generate.path - self.reference_folder = self.base_folder / 'reference' - self.map_reference_to_sample_id = map_reference_to_sample_id - self.samples: tp.List[Sample] = [] - self._load_samples() - - @property - def latest_epoch(self): - """Latest epoch across all samples.""" - return max(self.samples, key=lambda x: x.epoch).epoch if self.samples else 0 - - def _load_samples(self): - """Scan the sample folder and load existing samples.""" - jsons = self.base_folder.glob('**/*.json') - with ThreadPoolExecutor(6) as pool: - self.samples = list(pool.map(self._load_sample, jsons)) - - @staticmethod - @lru_cache(2**26) - def _load_sample(json_file: Path) -> Sample: - with open(json_file, 'r') as f: - data: tp.Dict[str, tp.Any] = json.load(f) - # fetch prompt data - prompt_data = data.get('prompt') - prompt = ReferenceSample(id=prompt_data['id'], path=prompt_data['path'], - duration=prompt_data['duration']) if prompt_data else None - # fetch reference data - reference_data = data.get('reference') - reference = ReferenceSample(id=reference_data['id'], path=reference_data['path'], - duration=reference_data['duration']) if reference_data else None - # build sample object - return Sample(id=data['id'], path=data['path'], epoch=data['epoch'], duration=data['duration'], - prompt=prompt, conditioning=data.get('conditioning'), reference=reference, - generation_args=data.get('generation_args')) - - def _init_hash(self): - return hashlib.sha1() - - def _get_tensor_id(self, tensor: torch.Tensor) -> str: - hash_id = self._init_hash() - hash_id.update(tensor.numpy().data) - return hash_id.hexdigest() - - def _get_sample_id(self, index: int, prompt_wav: tp.Optional[torch.Tensor], - conditions: tp.Optional[tp.Dict[str, str]]) -> str: - """Computes an id for a sample given its input data. - This id is deterministic if prompt and/or conditions are provided by using a sha1 hash on the input. - Otherwise, a random id of the form "noinput_{uuid4().hex}" is returned. - - Args: - index (int): Batch index, Helpful to differentiate samples from the same batch. - prompt_wav (torch.Tensor): Prompt used during generation. - conditions (dict[str, str]): Conditioning used during generation. - """ - # For totally unconditioned generations we will just use a random UUID. - # The function get_samples_for_xps will do a simple ordered match with a custom key. - if prompt_wav is None and not conditions: - return f"noinput_{uuid.uuid4().hex}" - - # Human readable portion - hr_label = "" - # Create a deterministic id using hashing - hash_id = self._init_hash() - hash_id.update(f"{index}".encode()) - if prompt_wav is not None: - hash_id.update(prompt_wav.numpy().data) - hr_label += "_prompted" - else: - hr_label += "_unprompted" - if conditions: - encoded_json = json.dumps(conditions, sort_keys=True).encode() - hash_id.update(encoded_json) - cond_str = "-".join([f"{key}={slugify(value)}" - for key, value in sorted(conditions.items())]) - cond_str = cond_str[:100] # some raw text might be too long to be a valid filename - cond_str = cond_str if len(cond_str) > 0 else "unconditioned" - hr_label += f"_{cond_str}" - else: - hr_label += "_unconditioned" - - return hash_id.hexdigest() + hr_label - - def _store_audio(self, wav: torch.Tensor, stem_path: Path, overwrite: bool = False) -> Path: - """Stores the audio with the given stem path using the XP's configuration. - - Args: - wav (torch.Tensor): Audio to store. - stem_path (Path): Path in sample output directory with file stem to use. - overwrite (bool): When False (default), skips storing an existing audio file. - Returns: - Path: The path at which the audio is stored. - """ - existing_paths = [ - path for path in stem_path.parent.glob(stem_path.stem + '.*') - if path.suffix != '.json' - ] - exists = len(existing_paths) > 0 - if exists and overwrite: - logger.warning(f"Overwriting existing audio file with stem path {stem_path}") - elif exists: - return existing_paths[0] - - audio_path = audio_write(stem_path, wav, **self.xp.cfg.generate.audio) - return audio_path - - def add_sample(self, sample_wav: torch.Tensor, epoch: int, index: int = 0, - conditions: tp.Optional[tp.Dict[str, str]] = None, prompt_wav: tp.Optional[torch.Tensor] = None, - ground_truth_wav: tp.Optional[torch.Tensor] = None, - generation_args: tp.Optional[tp.Dict[str, tp.Any]] = None) -> Sample: - """Adds a single sample. - The sample is stored in the XP's sample output directory, under a corresponding epoch folder. - Each sample is assigned an id which is computed using the input data. In addition to the - sample itself, a json file containing associated metadata is stored next to it. - - Args: - sample_wav (torch.Tensor): sample audio to store. Tensor of shape [channels, shape]. - epoch (int): current training epoch. - index (int): helpful to differentiate samples from the same batch. - conditions (dict[str, str], optional): conditioning used during generation. - prompt_wav (torch.Tensor, optional): prompt used during generation. Tensor of shape [channels, shape]. - ground_truth_wav (torch.Tensor, optional): reference audio where prompt was extracted from. - Tensor of shape [channels, shape]. - generation_args (dict[str, any], optional): dictionary of other arguments used during generation. - Returns: - Sample: The saved sample. - """ - sample_id = self._get_sample_id(index, prompt_wav, conditions) - reuse_id = self.map_reference_to_sample_id - prompt, ground_truth = None, None - if prompt_wav is not None: - prompt_id = sample_id if reuse_id else self._get_tensor_id(prompt_wav.sum(0, keepdim=True)) - prompt_duration = prompt_wav.shape[-1] / self.xp.cfg.sample_rate - prompt_path = self._store_audio(prompt_wav, self.base_folder / str(epoch) / 'prompt' / prompt_id) - prompt = ReferenceSample(prompt_id, str(prompt_path), prompt_duration) - if ground_truth_wav is not None: - ground_truth_id = sample_id if reuse_id else self._get_tensor_id(ground_truth_wav.sum(0, keepdim=True)) - ground_truth_duration = ground_truth_wav.shape[-1] / self.xp.cfg.sample_rate - ground_truth_path = self._store_audio(ground_truth_wav, self.base_folder / 'reference' / ground_truth_id) - ground_truth = ReferenceSample(ground_truth_id, str(ground_truth_path), ground_truth_duration) - sample_path = self._store_audio(sample_wav, self.base_folder / str(epoch) / sample_id, overwrite=True) - duration = sample_wav.shape[-1] / self.xp.cfg.sample_rate - sample = Sample(sample_id, str(sample_path), epoch, duration, conditions, prompt, ground_truth, generation_args) - self.samples.append(sample) - with open(sample_path.with_suffix('.json'), 'w') as f: - json.dump(asdict(sample), f, indent=2) - return sample - - def add_samples(self, samples_wavs: torch.Tensor, epoch: int, - conditioning: tp.Optional[tp.List[tp.Dict[str, tp.Any]]] = None, - prompt_wavs: tp.Optional[torch.Tensor] = None, - ground_truth_wavs: tp.Optional[torch.Tensor] = None, - generation_args: tp.Optional[tp.Dict[str, tp.Any]] = None) -> tp.List[Sample]: - """Adds a batch of samples. - The samples are stored in the XP's sample output directory, under a corresponding - epoch folder. Each sample is assigned an id which is computed using the input data and their batch index. - In addition to the sample itself, a json file containing associated metadata is stored next to it. - - Args: - sample_wavs (torch.Tensor): Batch of audio wavs to store. Tensor of shape [batch_size, channels, shape]. - epoch (int): Current training epoch. - conditioning (list of dict[str, str], optional): List of conditions used during generation, - one per sample in the batch. - prompt_wavs (torch.Tensor, optional): Prompts used during generation. Tensor of shape - [batch_size, channels, shape]. - ground_truth_wav (torch.Tensor, optional): Reference audio where prompts were extracted from. - Tensor of shape [batch_size, channels, shape]. - generation_args (dict[str, Any], optional): Dictionary of other arguments used during generation. - Returns: - samples (list of Sample): The saved audio samples with prompts, ground truth and metadata. - """ - samples = [] - for idx, wav in enumerate(samples_wavs): - prompt_wav = prompt_wavs[idx] if prompt_wavs is not None else None - gt_wav = ground_truth_wavs[idx] if ground_truth_wavs is not None else None - conditions = conditioning[idx] if conditioning is not None else None - samples.append(self.add_sample(wav, epoch, idx, conditions, prompt_wav, gt_wav, generation_args)) - return samples - - def get_samples(self, epoch: int = -1, max_epoch: int = -1, exclude_prompted: bool = False, - exclude_unprompted: bool = False, exclude_conditioned: bool = False, - exclude_unconditioned: bool = False) -> tp.Set[Sample]: - """Returns a set of samples for this XP. Optionally, you can filter which samples to obtain. - Please note that existing samples are loaded during the manager's initialization, and added samples through this - manager are also tracked. Any other external changes are not tracked automatically, so creating a new manager - is the only way detect them. - - Args: - epoch (int): If provided, only return samples corresponding to this epoch. - max_epoch (int): If provided, only return samples corresponding to the latest epoch that is <= max_epoch. - exclude_prompted (bool): If True, does not include samples that used a prompt. - exclude_unprompted (bool): If True, does not include samples that did not use a prompt. - exclude_conditioned (bool): If True, excludes samples that used conditioning. - exclude_unconditioned (bool): If True, excludes samples that did not use conditioning. - Returns: - Samples (set of Sample): The retrieved samples matching the provided filters. - """ - if max_epoch >= 0: - samples_epoch = max(sample.epoch for sample in self.samples if sample.epoch <= max_epoch) - else: - samples_epoch = self.latest_epoch if epoch < 0 else epoch - samples = { - sample - for sample in self.samples - if ( - (sample.epoch == samples_epoch) and - (not exclude_prompted or sample.prompt is None) and - (not exclude_unprompted or sample.prompt is not None) and - (not exclude_conditioned or not sample.conditioning) and - (not exclude_unconditioned or sample.conditioning) - ) - } - return samples - - -def slugify(value: tp.Any, allow_unicode: bool = False): - """Process string for safer file naming. - - Taken from https://github.com/django/django/blob/master/django/utils/text.py - - Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated - dashes to single dashes. Remove characters that aren't alphanumerics, - underscores, or hyphens. Convert to lowercase. Also strip leading and - trailing whitespace, dashes, and underscores. - """ - value = str(value) - if allow_unicode: - value = unicodedata.normalize("NFKC", value) - else: - value = ( - unicodedata.normalize("NFKD", value) - .encode("ascii", "ignore") - .decode("ascii") - ) - value = re.sub(r"[^\w\s-]", "", value.lower()) - return re.sub(r"[-\s]+", "-", value).strip("-_") - - -def _match_stable_samples(samples_per_xp: tp.List[tp.Set[Sample]]) -> tp.Dict[str, tp.List[Sample]]: - # Create a dictionary of stable id -> sample per XP - stable_samples_per_xp = [{ - sample.id: sample for sample in samples - if sample.prompt is not None or sample.conditioning - } for samples in samples_per_xp] - # Set of all stable ids - stable_ids = {id for samples in stable_samples_per_xp for id in samples.keys()} - # Dictionary of stable id -> list of samples. If an XP does not have it, assign None - stable_samples = {id: [xp.get(id) for xp in stable_samples_per_xp] for id in stable_ids} - # Filter out ids that contain None values (we only want matched samples after all) - # cast is necessary to avoid mypy linter errors. - return {id: tp.cast(tp.List[Sample], samples) for id, samples in stable_samples.items() if None not in samples} - - -def _match_unstable_samples(samples_per_xp: tp.List[tp.Set[Sample]]) -> tp.Dict[str, tp.List[Sample]]: - # For unstable ids, we use a sorted list since we'll match them in order - unstable_samples_per_xp = [[ - sample for sample in sorted(samples, key=lambda x: x.id) - if sample.prompt is None and not sample.conditioning - ] for samples in samples_per_xp] - # Trim samples per xp so all samples can have a match - min_len = min([len(samples) for samples in unstable_samples_per_xp]) - unstable_samples_per_xp = [samples[:min_len] for samples in unstable_samples_per_xp] - # Dictionary of index -> list of matched samples - return { - f'noinput_{i}': [samples[i] for samples in unstable_samples_per_xp] for i in range(min_len) - } - - -def get_samples_for_xps(xps: tp.List[dora.XP], **kwargs) -> tp.Dict[str, tp.List[Sample]]: - """Gets a dictionary of matched samples across the given XPs. - Each dictionary entry maps a sample id to a list of samples for that id. The number of samples per id - will always match the number of XPs provided and will correspond to each XP in the same order given. - In other words, only samples that can be match across all provided XPs will be returned - in order to satisfy this rule. - - There are two types of ids that can be returned: stable and unstable. - * Stable IDs are deterministic ids that were computed by the SampleManager given a sample's inputs - (prompts/conditioning). This is why we can match them across XPs. - * Unstable IDs are of the form "noinput_{idx}" and are generated on-the-fly, in order to map samples - that used non-deterministic, random ids. This is the case for samples that did not use prompts or - conditioning for their generation. This function will sort these samples by their id and match them - by their index. - - Args: - xps: a list of XPs to match samples from. - start_epoch (int): If provided, only return samples corresponding to this epoch or newer. - end_epoch (int): If provided, only return samples corresponding to this epoch or older. - exclude_prompted (bool): If True, does not include samples that used a prompt. - exclude_unprompted (bool): If True, does not include samples that did not use a prompt. - exclude_conditioned (bool): If True, excludes samples that used conditioning. - exclude_unconditioned (bool): If True, excludes samples that did not use conditioning. - """ - managers = [SampleManager(xp) for xp in xps] - samples_per_xp = [manager.get_samples(**kwargs) for manager in managers] - stable_samples = _match_stable_samples(samples_per_xp) - unstable_samples = _match_unstable_samples(samples_per_xp) - return dict(stable_samples, **unstable_samples) diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/os_utils.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/os_utils.py deleted file mode 100644 index c78a44c04eadc3feb3c35f88c8a074f59ab23778..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/os_utils.py +++ /dev/null @@ -1,20 +0,0 @@ -import os -import subprocess - - -def link_file(from_file, to_file): - subprocess.check_call( - f'ln -s "`realpath --relative-to="{os.path.dirname(to_file)}" "{from_file}"`" "{to_file}"', shell=True) - - -def move_file(from_file, to_file): - subprocess.check_call(f'mv "{from_file}" "{to_file}"', shell=True) - - -def copy_file(from_file, to_file): - subprocess.check_call(f'cp -r "{from_file}" "{to_file}"', shell=True) - - -def remove_file(*fns): - for f in fns: - subprocess.check_call(f'rm -rf "{f}"', shell=True) \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py deleted file mode 100644 index 729d694356d0353f3e3dd18730b82db55fc66550..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" -import librosa -import torch - -from text_to_speech.modules.vocoder.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", - use_mel_loss=False): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - self.use_mel_loss = use_mel_loss - self.mel_basis = None - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - - """ - if self.window.device != x.device: - self.window = self.window.to(x.device) - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - if self.use_mel_loss: - if self.mel_basis is None: - self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T - x_mag = x_mag @ self.mel_basis - y_mag = y_mag @ self.mel_basis - - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window", - use_mel_loss=False): - """Initialize Multi resolution STFT loss module. - - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)] - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return sc_loss, mag_loss diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/$types.d.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/$types.d.ts deleted file mode 100644 index 645f0e4f66675bee5e4b815875e5e1813d274fc7..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/$types.d.ts +++ /dev/null @@ -1,8 +0,0 @@ -import type * as Kit from '@sveltejs/kit'; - -type Expand = T extends infer O ? { [K in keyof O]: O[K] } : never; -type RouteParams = { } -type RouteId = '/conversation'; - -export type RequestHandler = Kit.RequestHandler; -export type RequestEvent = Kit.RequestEvent; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.js deleted file mode 100644 index 33a5423954aec44a4cbf70f229b09dfdfe66053a..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.js +++ /dev/null @@ -1,13 +0,0 @@ -import CircleMaskImage from './CircleMaskImage.js'; -import ObjectFactory from '../ObjectFactory.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('circleMaskImage', function (x, y, key, frame, config) { - var gameObject = new CircleMaskImage(this.scene, x, y, key, frame, config); - this.scene.add.existing(gameObject); - return gameObject; -}); - -SetValue(window, 'RexPlugins.UI.CircleMaskImage', CircleMaskImage); - -export default CircleMaskImage; \ No newline at end of file diff --git a/spaces/AiMimicry/sovits-models/vdecoder/__init__.py b/spaces/AiMimicry/sovits-models/vdecoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AlekseyKorshuk/gai-project/app.py b/spaces/AlekseyKorshuk/gai-project/app.py deleted file mode 100644 index 712061fd2ca9f369e57dcb42bbd273c7a02e8388..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/gai-project/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr - -from modules.about import render_about -from modules.playground import render_playground - -with gr.Blocks() as demo: - with gr.Tabs(): - with gr.TabItem("Playground"): - render_playground(demo) - with gr.TabItem("About"): - render_about() - -demo.queue(concurrency_count=100).launch() diff --git a/spaces/AlekseyKorshuk/model-evaluation/models/base.py b/spaces/AlekseyKorshuk/model-evaluation/models/base.py deleted file mode 100644 index 00c7eff6e5fd399d11b6dd03641be54f6ff835b1..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/model-evaluation/models/base.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import requests -import gradio as gr -from conversation import Conversation - - -class BaseModel: - name: str - endpoint: str - namespace: str - generation_params: dict - - def __init__(self, name, endpoint, namespace, generation_params): - self.name = name - self.endpoint = endpoint - self.namespace = namespace - self.generation_params = generation_params - - def generate_response(self, conversation, custom_generation_params=None): - prompt = self._get_prompt(conversation) - response = self._get_response(prompt, custom_generation_params) - return response - - def _get_prompt(self, conversation: Conversation): - prompt = "\n".join( - [conversation.memory, conversation.prompt] - ).strip() - for message in conversation.messages: - prompt += f"\n{message['from'].strip()}: {message['value'].strip()}" - prompt += f"\n{conversation.bot_label}:" - return prompt - - def _get_response(self, text, custom_generation_params): - api = str(os.environ.get("API_BASE_PATH")).replace("\{\}", "{}") - api = api.format(self.endpoint, self.namespace) - parameters = self.generation_params - if custom_generation_params is not None: - parameters.update(custom_generation_params) - payload = {'instances': [text], "parameters": parameters} - resp = requests.post(api, json=payload, timeout=600) - if resp.status_code != 200: - raise gr.Error(f"Endpoint returned code: {resp.status_code}. " - f"Solution: " - f"1. Scale-to-Zero enabled, so please wait for some minutes and try again. " - f"2. Probably the response generated by the model is to big, try changing max_new_tokens. " - f"3. If nothing helps — report the problem.") - predictions = resp.json()["predictions"] - if isinstance(predictions[0], str): - return predictions[0].strip() - predictions = sorted(predictions[0], key=lambda d: d['score']) - return predictions[-1]["text"].strip() diff --git a/spaces/Alfasign/diffusers-gallery/README.md b/spaces/Alfasign/diffusers-gallery/README.md deleted file mode 100644 index ff1cbb6ee8e12c3a15d98730f50873db96260bad..0000000000000000000000000000000000000000 --- a/spaces/Alfasign/diffusers-gallery/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Diffusers Gallery -emoji: 🖼️ -colorFrom: red -colorTo: green -sdk: static -app_port: 8080 -fullWidth: true -pinned: false -license: mit -duplicated_from: huggingface-projects/diffusers-gallery ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_flax.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_flax.py deleted file mode 100644 index 717763d72aafec54899c8dbce777d00d5f42639f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_flax.py +++ /dev/null @@ -1,573 +0,0 @@ -import argparse -import logging -import math -import os -import random -from pathlib import Path - -import jax -import jax.numpy as jnp -import numpy as np -import optax -import torch -import torch.utils.checkpoint -import transformers -from datasets import load_dataset -from flax import jax_utils -from flax.training import train_state -from flax.training.common_utils import shard -from huggingface_hub import create_repo, upload_folder -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed - -from diffusers import ( - FlaxAutoencoderKL, - FlaxDDPMScheduler, - FlaxPNDMScheduler, - FlaxStableDiffusionPipeline, - FlaxUNet2DConditionModel, -) -from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker -from diffusers.utils import check_min_version - - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.19.0") - -logger = logging.getLogger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help="Revision of pretrained model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--dataset_name", - type=str, - default=None, - help=( - "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," - " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," - " or to a folder containing files that 🤗 Datasets can understand." - ), - ) - parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The config of the Dataset, leave as None if there's only one config.", - ) - parser.add_argument( - "--train_data_dir", - type=str, - default=None, - help=( - "A folder containing the training data. Folder contents must follow the structure described in" - " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" - " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." - ), - ) - parser.add_argument( - "--image_column", type=str, default="image", help="The column of the dataset containing an image." - ) - parser.add_argument( - "--caption_column", - type=str, - default="text", - help="The column of the dataset containing a caption or a list of captions.", - ) - parser.add_argument( - "--max_train_samples", - type=int, - default=None, - help=( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="sd-model-finetuned", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--cache_dir", - type=str, - default=None, - help="The directory where the downloaded models and datasets will be stored.", - ) - parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", - default=False, - action="store_true", - help=( - "Whether to center crop the input images to the resolution. If not set, the images will be randomly" - " cropped. The images will be resized to the resolution first before cropping." - ), - ) - parser.add_argument( - "--random_flip", - action="store_true", - help="whether to randomly flip images horizontally", - ) - parser.add_argument( - "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument("--num_train_epochs", type=int, default=100) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=1e-4, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' - ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - # Sanity checks - if args.dataset_name is None and args.train_data_dir is None: - raise ValueError("Need either a dataset name or a training folder.") - - return args - - -dataset_name_mapping = { - "lambdalabs/pokemon-blip-captions": ("image", "text"), -} - - -def get_params_to_save(params): - return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) - - -def main(): - args = parse_args() - - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - # Setup logging, we only want one process per machine to log things on the screen. - logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) - if jax.process_index() == 0: - transformers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if jax.process_index() == 0: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Get the datasets: you can either provide your own training and evaluation files (see below) - # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). - - # In distributed training, the load_dataset function guarantees that only one local process can concurrently - # download the dataset. - if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - args.dataset_name, - args.dataset_config_name, - cache_dir=args.cache_dir, - ) - else: - data_files = {} - if args.train_data_dir is not None: - data_files["train"] = os.path.join(args.train_data_dir, "**") - dataset = load_dataset( - "imagefolder", - data_files=data_files, - cache_dir=args.cache_dir, - ) - # See more about loading custom images at - # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - column_names = dataset["train"].column_names - - # 6. Get the column names for input/target. - dataset_columns = dataset_name_mapping.get(args.dataset_name, None) - if args.image_column is None: - image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] - else: - image_column = args.image_column - if image_column not in column_names: - raise ValueError( - f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" - ) - if args.caption_column is None: - caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] - else: - caption_column = args.caption_column - if caption_column not in column_names: - raise ValueError( - f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" - ) - - # Preprocessing the datasets. - # We need to tokenize input captions and transform the images. - def tokenize_captions(examples, is_train=True): - captions = [] - for caption in examples[caption_column]: - if isinstance(caption, str): - captions.append(caption) - elif isinstance(caption, (list, np.ndarray)): - # take a random caption if there are multiple - captions.append(random.choice(caption) if is_train else caption[0]) - else: - raise ValueError( - f"Caption column `{caption_column}` should contain either strings or lists of strings." - ) - inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding="do_not_pad", truncation=True) - input_ids = inputs.input_ids - return input_ids - - train_transforms = transforms.Compose( - [ - transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), - transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def preprocess_train(examples): - images = [image.convert("RGB") for image in examples[image_column]] - examples["pixel_values"] = [train_transforms(image) for image in images] - examples["input_ids"] = tokenize_captions(examples) - - return examples - - if args.max_train_samples is not None: - dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) - # Set the training transforms - train_dataset = dataset["train"].with_transform(preprocess_train) - - def collate_fn(examples): - pixel_values = torch.stack([example["pixel_values"] for example in examples]) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - input_ids = [example["input_ids"] for example in examples] - - padded_tokens = tokenizer.pad( - {"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt" - ) - batch = { - "pixel_values": pixel_values, - "input_ids": padded_tokens.input_ids, - } - batch = {k: v.numpy() for k, v in batch.items()} - - return batch - - total_train_batch_size = args.train_batch_size * jax.local_device_count() - train_dataloader = torch.utils.data.DataLoader( - train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=total_train_batch_size, drop_last=True - ) - - weight_dtype = jnp.float32 - if args.mixed_precision == "fp16": - weight_dtype = jnp.float16 - elif args.mixed_precision == "bf16": - weight_dtype = jnp.bfloat16 - - # Load models and create wrapper for stable diffusion - tokenizer = CLIPTokenizer.from_pretrained( - args.pretrained_model_name_or_path, revision=args.revision, subfolder="tokenizer" - ) - text_encoder = FlaxCLIPTextModel.from_pretrained( - args.pretrained_model_name_or_path, revision=args.revision, subfolder="text_encoder", dtype=weight_dtype - ) - vae, vae_params = FlaxAutoencoderKL.from_pretrained( - args.pretrained_model_name_or_path, revision=args.revision, subfolder="vae", dtype=weight_dtype - ) - unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, revision=args.revision, subfolder="unet", dtype=weight_dtype - ) - - # Optimization - if args.scale_lr: - args.learning_rate = args.learning_rate * total_train_batch_size - - constant_scheduler = optax.constant_schedule(args.learning_rate) - - adamw = optax.adamw( - learning_rate=constant_scheduler, - b1=args.adam_beta1, - b2=args.adam_beta2, - eps=args.adam_epsilon, - weight_decay=args.adam_weight_decay, - ) - - optimizer = optax.chain( - optax.clip_by_global_norm(args.max_grad_norm), - adamw, - ) - - state = train_state.TrainState.create(apply_fn=unet.__call__, params=unet_params, tx=optimizer) - - noise_scheduler = FlaxDDPMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 - ) - noise_scheduler_state = noise_scheduler.create_state() - - # Initialize our training - rng = jax.random.PRNGKey(args.seed) - train_rngs = jax.random.split(rng, jax.local_device_count()) - - def train_step(state, text_encoder_params, vae_params, batch, train_rng): - dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) - - def compute_loss(params): - # Convert images to latent space - vae_outputs = vae.apply( - {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode - ) - latents = vae_outputs.latent_dist.sample(sample_rng) - # (NHWC) -> (NCHW) - latents = jnp.transpose(latents, (0, 3, 1, 2)) - latents = latents * vae.config.scaling_factor - - # Sample noise that we'll add to the latents - noise_rng, timestep_rng = jax.random.split(sample_rng) - noise = jax.random.normal(noise_rng, latents.shape) - # Sample a random timestep for each image - bsz = latents.shape[0] - timesteps = jax.random.randint( - timestep_rng, - (bsz,), - 0, - noise_scheduler.config.num_train_timesteps, - ) - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder( - batch["input_ids"], - params=text_encoder_params, - train=False, - )[0] - - # Predict the noise residual and compute loss - model_pred = unet.apply( - {"params": params}, noisy_latents, timesteps, encoder_hidden_states, train=True - ).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - loss = (target - model_pred) ** 2 - loss = loss.mean() - - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - - new_state = state.apply_gradients(grads=grad) - - metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") - - return new_state, metrics, new_train_rng - - # Create parallel version of the train step - p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) - - # Replicate the train state on each device - state = jax_utils.replicate(state) - text_encoder_params = jax_utils.replicate(text_encoder.params) - vae_params = jax_utils.replicate(vae_params) - - # Train! - num_update_steps_per_epoch = math.ceil(len(train_dataloader)) - - # Scheduler and math around the number of training steps. - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - - global_step = 0 - - epochs = tqdm(range(args.num_train_epochs), desc="Epoch ... ", position=0) - for epoch in epochs: - # ======================== Training ================================ - - train_metrics = [] - - steps_per_epoch = len(train_dataset) // total_train_batch_size - train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) - # train - for batch in train_dataloader: - batch = shard(batch) - state, train_metric, train_rngs = p_train_step(state, text_encoder_params, vae_params, batch, train_rngs) - train_metrics.append(train_metric) - - train_step_progress_bar.update(1) - - global_step += 1 - if global_step >= args.max_train_steps: - break - - train_metric = jax_utils.unreplicate(train_metric) - - train_step_progress_bar.close() - epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") - - # Create the pipeline using using the trained modules and save it. - if jax.process_index() == 0: - scheduler = FlaxPNDMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True - ) - safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( - "CompVis/stable-diffusion-safety-checker", from_pt=True - ) - pipeline = FlaxStableDiffusionPipeline( - text_encoder=text_encoder, - vae=vae, - unet=unet, - tokenizer=tokenizer, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), - ) - - pipeline.save_pretrained( - args.output_dir, - params={ - "text_encoder": get_params_to_save(text_encoder_params), - "vae": get_params_to_save(vae_params), - "unet": get_params_to_save(state.params), - "safety_checker": safety_checker.params, - }, - ) - - if args.push_to_hub: - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py deleted file mode 100644 index c7555e2ebad4c7f6045f3975b61f271a97ec8587..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: This file is deprecated and will be removed in a future version. -# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works -from ...utils import deprecate -from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 -from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 - - -deprecate( - "stable diffusion controlnet", - "0.22.0", - "Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.", - standard_warn=False, - stacklevel=3, -) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_fpn.py b/spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_fpn.py deleted file mode 100644 index 6fc7908249e013376b343c5fc136cbbe5ff29390..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_fpn.py +++ /dev/null @@ -1,120 +0,0 @@ -# model settings -model = dict( - type='MaskRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 5e04aa7c6ac050d119e07b715e2082f692e1a1de..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py deleted file mode 100644 index fba8948a031d46918e9c28cf94fff8e384228e82..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py' -model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index abeeedf84387d7846a8a2c10480b94c9d8405559..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict( - pretrained='torchvision://resnet18', - backbone=dict(type='ResNet', depth=18), - decode_head=dict( - in_channels=512, - channels=128, - ), - auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/model-card.md b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/model-card.md deleted file mode 100644 index 54fc09645dcffefdaa6a31e386b2af242c4d3719..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/model-card.md +++ /dev/null @@ -1,59 +0,0 @@ -# Overview - -These are diffusion models and noised image classifiers described in the paper [Diffusion Models Beat GANs on Image Synthesis](https://arxiv.org/abs/2105.05233). -Included in this release are the following models: - - * Noisy ImageNet classifiers at resolutions 64x64, 128x128, 256x256, 512x512 - * A class-unconditional ImageNet diffusion model at resolution 256x256 - * Class conditional ImageNet diffusion models at 64x64, 128x128, 256x256, 512x512 resolutions - * Class-conditional ImageNet upsampling diffusion models: 64x64->256x256, 128x128->512x512 - * Diffusion models trained on three LSUN classes at 256x256 resolution: cat, horse, bedroom - -# Datasets - -All of the models we are releasing were either trained on the [ILSVRC 2012 subset of ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) or on single classes of [LSUN](https://arxiv.org/abs/1506.03365). -Here, we describe characteristics of these datasets which impact model behavior: - -**LSUN**: This dataset was collected in 2015 using a combination of human labeling (from Amazon Mechanical Turk) and automated data labeling. - * Each of the three classes we consider contain over a million images. - * The dataset creators found that the label accuracy was roughly 90% across the entire LSUN dataset when measured by trained experts. - * Images are scraped from the internet, and LSUN cat images in particular tend to often follow a “meme” format. - * We found that there are occasionally humans in these photos, including faces, especially within the cat class. - -**ILSVRC 2012 subset of ImageNet**: This dataset was curated in 2012 and consists of roughly one million images, each belonging to one of 1000 classes. - * A large portion of the classes in this dataset are animals, plants, and other naturally-occurring objects. - * Many images contain humans, although usually these humans aren’t reflected by the class label (e.g. the class “Tench, tinca tinca” contains many photos of people holding fish). - -# Performance - -These models are intended to generate samples consistent with their training distributions. -This has been measured in terms of FID, Precision, and Recall. -These metrics all rely on the representations of a [pre-trained Inception-V3 model](https://arxiv.org/abs/1512.00567), -which was trained on ImageNet, and so is likely to focus more on the ImageNet classes (such as animals) than on other visual features (such as human faces). - -Qualitatively, the samples produced by these models often look highly realistic, especially when a diffusion model is combined with a noisy classifier. - -# Intended Use - -These models are intended to be used for research purposes only. -In particular, they can be used as a baseline for generative modeling research, or as a starting point to build off of for such research. - -These models are not intended to be commercially deployed. -Additionally, they are not intended to be used to create propaganda or offensive imagery. - -Before releasing these models, we probed their ability to ease the creation of targeted imagery, since doing so could be potentially harmful. -We did this either by fine-tuning our ImageNet models on a target LSUN class, or through classifier guidance with publicly available [CLIP models](https://github.com/openai/CLIP). - * To probe fine-tuning capabilities, we restricted our compute budget to roughly $100 and tried both standard fine-tuning, -and a diffusion-specific approach where we train a specialized classifier for the LSUN class. The resulting FIDs were significantly worse than publicly available GAN models, indicating that fine-tuning an ImageNet diffusion model does not significantly lower the cost of image generation. - * To probe guidance with CLIP, we tried two approaches for using pre-trained CLIP models for classifier guidance. Either we fed the noised image to CLIP directly and used its gradients, or we fed the diffusion model's denoised prediction to the CLIP model and differentiated through the whole process. In both cases, we found that it was difficult to recover information from the CLIP model, indicating that these diffusion models are unlikely to make it significantly easier to extract knowledge from CLIP compared to existing GAN models. - -# Limitations - -These models sometimes produce highly unrealistic outputs, particularly when generating images containing human faces. -This may stem from ImageNet's emphasis on non-human objects. - -While classifier guidance can improve sample quality, it reduces diversity, resulting in some modes of the data distribution being underrepresented. -This can potentially amplify existing biases in the training dataset such as gender and racial biases. - -Because ImageNet and LSUN contain images from the internet, they include photos of real people, and the model may have memorized some of the information contained in these photos. -However, these images are already publicly available, and existing generative models trained on ImageNet have not demonstrated significant leakage of this information. \ No newline at end of file diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_train.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_train.py deleted file mode 100644 index 38ec777d8d1ad625156feca9100beb15d72965be..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_train.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -Train a diffusion model on images. -""" - -import argparse - -from guided_diffusion import dist_util, logger -from guided_diffusion.image_datasets import load_data -from guided_diffusion.resample import create_named_schedule_sampler -from guided_diffusion.script_util import ( - model_and_diffusion_defaults, - create_model_and_diffusion, - args_to_dict, - add_dict_to_argparser, -) -from guided_diffusion.train_util import TrainLoop - - -def main(): - args = create_argparser().parse_args() - - dist_util.setup_dist() - logger.configure() - - logger.log("creating model and diffusion...") - model, diffusion = create_model_and_diffusion( - **args_to_dict(args, model_and_diffusion_defaults().keys()) - ) - model.to(dist_util.dev()) - schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion) - - logger.log("creating data loader...") - data = load_data( - data_dir=args.data_dir, - batch_size=args.batch_size, - image_size=args.image_size, - class_cond=args.class_cond, - ) - - logger.log("training...") - TrainLoop( - model=model, - diffusion=diffusion, - data=data, - batch_size=args.batch_size, - microbatch=args.microbatch, - lr=args.lr, - ema_rate=args.ema_rate, - log_interval=args.log_interval, - save_interval=args.save_interval, - resume_checkpoint=args.resume_checkpoint, - use_fp16=args.use_fp16, - fp16_scale_growth=args.fp16_scale_growth, - schedule_sampler=schedule_sampler, - weight_decay=args.weight_decay, - lr_anneal_steps=args.lr_anneal_steps, - ).run_loop() - - -def create_argparser(): - defaults = dict( - data_dir="", - schedule_sampler="uniform", - lr=1e-4, - weight_decay=0.0, - lr_anneal_steps=0, - batch_size=1, - microbatch=-1, # -1 disables microbatches - ema_rate="0.9999", # comma-separated list of EMA values - log_interval=10, - save_interval=10000, - resume_checkpoint="", - use_fp16=False, - fp16_scale_growth=1e-3, - ) - defaults.update(model_and_diffusion_defaults()) - parser = argparse.ArgumentParser() - add_dict_to_argparser(parser, defaults) - return parser - - -if __name__ == "__main__": - main() diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py deleted file mode 100644 index 88bb10d44026ba9f21756eaea9e550841cd59b9f..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -import torch.nn.functional as F - -from ..builder import PIXEL_SAMPLERS -from .base_pixel_sampler import BasePixelSampler - - -@PIXEL_SAMPLERS.register_module() -class OHEMPixelSampler(BasePixelSampler): - """Online Hard Example Mining Sampler for segmentation. - - Args: - context (nn.Module): The context of sampler, subclass of - :obj:`BaseDecodeHead`. - thresh (float, optional): The threshold for hard example selection. - Below which, are prediction with low confidence. If not - specified, the hard examples will be pixels of top ``min_kept`` - loss. Default: None. - min_kept (int, optional): The minimum number of predictions to keep. - Default: 100000. - """ - - def __init__(self, context, thresh=None, min_kept=100000): - super(OHEMPixelSampler, self).__init__() - self.context = context - assert min_kept > 1 - self.thresh = thresh - self.min_kept = min_kept - - def sample(self, seg_logit, seg_label): - """Sample pixels that have high loss or with low prediction confidence. - - Args: - seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) - seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) - - Returns: - torch.Tensor: segmentation weight, shape (N, H, W) - """ - with torch.no_grad(): - assert seg_logit.shape[2:] == seg_label.shape[2:] - assert seg_label.shape[1] == 1 - seg_label = seg_label.squeeze(1).long() - batch_kept = self.min_kept * seg_label.size(0) - valid_mask = seg_label != self.context.ignore_index - seg_weight = seg_logit.new_zeros(size=seg_label.size()) - valid_seg_weight = seg_weight[valid_mask] - if self.thresh is not None: - seg_prob = F.softmax(seg_logit, dim=1) - - tmp_seg_label = seg_label.clone().unsqueeze(1) - tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 - seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) - sort_prob, sort_indices = seg_prob[valid_mask].sort() - - if sort_prob.numel() > 0: - min_threshold = sort_prob[min(batch_kept, - sort_prob.numel() - 1)] - else: - min_threshold = 0.0 - threshold = max(min_threshold, self.thresh) - valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. - else: - losses = self.context.loss_decode( - seg_logit, - seg_label, - weight=None, - ignore_index=self.context.ignore_index, - reduction_override='none') - # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa - _, sort_indices = losses[valid_mask].sort(descending=True) - valid_seg_weight[sort_indices[:batch_kept]] = 1. - - seg_weight[valid_mask] = valid_seg_weight - - return seg_weight diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/attention.py b/spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/attention.py deleted file mode 100644 index 509cd873768f0dd75a75ab3fcdd652822b12b59f..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/attention.py +++ /dev/null @@ -1,341 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat -from typing import Optional, Any - -from ldm.modules.diffusionmodules.util import checkpoint - - -try: - import xformers - import xformers.ops - XFORMERS_IS_AVAILBLE = True -except: - XFORMERS_IS_AVAILBLE = False - -# CrossAttn precision handling -import os -_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - # force cast to fp32 to avoid overflowing - if _ATTN_PRECISION =="fp32": - with torch.autocast(enabled=False, device_type = 'cuda'): - q, k = q.float(), k.float() - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - else: - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - del q, k - - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - sim = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', sim, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - - -class MemoryEfficientCrossAttention(nn.Module): - # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): - super().__init__() - print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " - f"{heads} heads.") - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.heads = heads - self.dim_head = dim_head - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) - self.attention_op: Optional[Any] = None - - def forward(self, x, context=None, mask=None): - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - b, _, _ = q.shape - q, k, v = map( - lambda t: t.unsqueeze(3) - .reshape(b, t.shape[1], self.heads, self.dim_head) - .permute(0, 2, 1, 3) - .reshape(b * self.heads, t.shape[1], self.dim_head) - .contiguous(), - (q, k, v), - ) - - # actually compute the attention, what we cannot get enough of - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) - - if exists(mask): - raise NotImplementedError - out = ( - out.unsqueeze(0) - .reshape(b, self.heads, out.shape[1], self.dim_head) - .permute(0, 2, 1, 3) - .reshape(b, out.shape[1], self.heads * self.dim_head) - ) - return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - ATTENTION_MODES = { - "softmax": CrossAttention, # vanilla attention - "softmax-xformers": MemoryEfficientCrossAttention - } - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, - disable_self_attn=False): - super().__init__() - attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax" - assert attn_mode in self.ATTENTION_MODES - attn_cls = self.ATTENTION_MODES[attn_mode] - self.disable_self_attn = disable_self_attn - self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, - context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - NEW: use_linear for more efficiency instead of the 1x1 convs - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None, - disable_self_attn=False, use_linear=False, - use_checkpoint=True): - super().__init__() - if exists(context_dim) and not isinstance(context_dim, list): - context_dim = [context_dim] - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - if not use_linear: - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - else: - self.proj_in = nn.Linear(in_channels, inner_dim) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d], - disable_self_attn=disable_self_attn, checkpoint=use_checkpoint) - for d in range(depth)] - ) - if not use_linear: - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - else: - self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) - self.use_linear = use_linear - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - if not isinstance(context, list): - context = [context] - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - if not self.use_linear: - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c').contiguous() - if self.use_linear: - x = self.proj_in(x) - for i, block in enumerate(self.transformer_blocks): - x = block(x, context=context[i]) - if self.use_linear: - x = self.proj_out(x) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() - if not self.use_linear: - x = self.proj_out(x) - return x + x_in - diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/__init__.py deleted file mode 100644 index b22f7abb93b9d7aeee50829b35746aaa3f9f5feb..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/__init__.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -pip._vendor is for vendoring dependencies of pip to prevent needing pip to -depend on something external. - -Files inside of pip._vendor should be considered immutable and should only be -updated to versions from upstream. -""" -from __future__ import absolute_import - -import glob -import os.path -import sys - -# Downstream redistributors which have debundled our dependencies should also -# patch this value to be true. This will trigger the additional patching -# to cause things like "six" to be available as pip. -DEBUNDLED = False - -# By default, look in this directory for a bunch of .whl files which we will -# add to the beginning of sys.path before attempting to import anything. This -# is done to support downstream re-distributors like Debian and Fedora who -# wish to create their own Wheels for our dependencies to aid in debundling. -WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) - - -# Define a small helper function to alias our vendored modules to the real ones -# if the vendored ones do not exist. This idea of this was taken from -# https://github.com/kennethreitz/requests/pull/2567. -def vendored(modulename): - vendored_name = "{0}.{1}".format(__name__, modulename) - - try: - __import__(modulename, globals(), locals(), level=0) - except ImportError: - # We can just silently allow import failures to pass here. If we - # got to this point it means that ``import pip._vendor.whatever`` - # failed and so did ``import whatever``. Since we're importing this - # upfront in an attempt to alias imports, not erroring here will - # just mean we get a regular import error whenever pip *actually* - # tries to import one of these modules to use it, which actually - # gives us a better error message than we would have otherwise - # gotten. - pass - else: - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) - - -# If we're operating in a debundled setup, then we want to go ahead and trigger -# the aliasing of our vendored libraries as well as looking for wheels to add -# to our sys.path. This will cause all of this code to be a no-op typically -# however downstream redistributors can enable it in a consistent way across -# all platforms. -if DEBUNDLED: - # Actually look inside of WHEEL_DIR to find .whl files and add them to the - # front of our sys.path. - sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path - - # Actually alias all of our vendored dependencies. - vendored("cachecontrol") - vendored("certifi") - vendored("colorama") - vendored("distlib") - vendored("distro") - vendored("six") - vendored("six.moves") - vendored("six.moves.urllib") - vendored("six.moves.urllib.parse") - vendored("packaging") - vendored("packaging.version") - vendored("packaging.specifiers") - vendored("pep517") - vendored("pkg_resources") - vendored("platformdirs") - vendored("progress") - vendored("requests") - vendored("requests.exceptions") - vendored("requests.packages") - vendored("requests.packages.urllib3") - vendored("requests.packages.urllib3._collections") - vendored("requests.packages.urllib3.connection") - vendored("requests.packages.urllib3.connectionpool") - vendored("requests.packages.urllib3.contrib") - vendored("requests.packages.urllib3.contrib.ntlmpool") - vendored("requests.packages.urllib3.contrib.pyopenssl") - vendored("requests.packages.urllib3.exceptions") - vendored("requests.packages.urllib3.fields") - vendored("requests.packages.urllib3.filepost") - vendored("requests.packages.urllib3.packages") - vendored("requests.packages.urllib3.packages.ordered_dict") - vendored("requests.packages.urllib3.packages.six") - vendored("requests.packages.urllib3.packages.ssl_match_hostname") - vendored("requests.packages.urllib3.packages.ssl_match_hostname." - "_implementation") - vendored("requests.packages.urllib3.poolmanager") - vendored("requests.packages.urllib3.request") - vendored("requests.packages.urllib3.response") - vendored("requests.packages.urllib3.util") - vendored("requests.packages.urllib3.util.connection") - vendored("requests.packages.urllib3.util.request") - vendored("requests.packages.urllib3.util.response") - vendored("requests.packages.urllib3.util.retry") - vendored("requests.packages.urllib3.util.ssl_") - vendored("requests.packages.urllib3.util.timeout") - vendored("requests.packages.urllib3.util.url") - vendored("resolvelib") - vendored("rich") - vendored("rich.console") - vendored("rich.highlighter") - vendored("rich.logging") - vendored("rich.markup") - vendored("rich.progress") - vendored("rich.segment") - vendored("rich.style") - vendored("rich.text") - vendored("rich.traceback") - vendored("tenacity") - vendored("tomli") - vendored("urllib3") diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/box.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/box.py deleted file mode 100644 index 97d2a94445770e195b9fc73e904b920d5ff04104..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/box.py +++ /dev/null @@ -1,517 +0,0 @@ -import sys -from typing import TYPE_CHECKING, Iterable, List - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from pip._vendor.typing_extensions import Literal # pragma: no cover - - -from ._loop import loop_last - -if TYPE_CHECKING: - from pip._vendor.rich.console import ConsoleOptions - - -class Box: - """Defines characters to render boxes. - - ┌─┬┐ top - │ ││ head - ├─┼┤ head_row - │ ││ mid - ├─┼┤ row - ├─┼┤ foot_row - │ ││ foot - └─┴┘ bottom - - Args: - box (str): Characters making up box. - ascii (bool, optional): True if this box uses ascii characters only. Default is False. - """ - - def __init__(self, box: str, *, ascii: bool = False) -> None: - self._box = box - self.ascii = ascii - line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines() - # top - self.top_left, self.top, self.top_divider, self.top_right = iter(line1) - # head - self.head_left, _, self.head_vertical, self.head_right = iter(line2) - # head_row - ( - self.head_row_left, - self.head_row_horizontal, - self.head_row_cross, - self.head_row_right, - ) = iter(line3) - - # mid - self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4) - # row - self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5) - # foot_row - ( - self.foot_row_left, - self.foot_row_horizontal, - self.foot_row_cross, - self.foot_row_right, - ) = iter(line6) - # foot - self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7) - # bottom - self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter( - line8 - ) - - def __repr__(self) -> str: - return "Box(...)" - - def __str__(self) -> str: - return self._box - - def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box": - """Substitute this box for another if it won't render due to platform issues. - - Args: - options (ConsoleOptions): Console options used in rendering. - safe (bool, optional): Substitute this for another Box if there are known problems - displaying on the platform (currently only relevant on Windows). Default is True. - - Returns: - Box: A different Box or the same Box. - """ - box = self - if options.legacy_windows and safe: - box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box) - if options.ascii_only and not box.ascii: - box = ASCII - return box - - def get_plain_headed_box(self) -> "Box": - """If this box uses special characters for the borders of the header, then - return the equivalent box that does not. - - Returns: - Box: The most similar Box that doesn't use header-specific box characters. - If the current Box already satisfies this criterion, then it's returned. - """ - return PLAIN_HEADED_SUBSTITUTIONS.get(self, self) - - def get_top(self, widths: Iterable[int]) -> str: - """Get the top of a simple box. - - Args: - widths (List[int]): Widths of columns. - - Returns: - str: A string of box characters. - """ - - parts: List[str] = [] - append = parts.append - append(self.top_left) - for last, width in loop_last(widths): - append(self.top * width) - if not last: - append(self.top_divider) - append(self.top_right) - return "".join(parts) - - def get_row( - self, - widths: Iterable[int], - level: Literal["head", "row", "foot", "mid"] = "row", - edge: bool = True, - ) -> str: - """Get the top of a simple box. - - Args: - width (List[int]): Widths of columns. - - Returns: - str: A string of box characters. - """ - if level == "head": - left = self.head_row_left - horizontal = self.head_row_horizontal - cross = self.head_row_cross - right = self.head_row_right - elif level == "row": - left = self.row_left - horizontal = self.row_horizontal - cross = self.row_cross - right = self.row_right - elif level == "mid": - left = self.mid_left - horizontal = " " - cross = self.mid_vertical - right = self.mid_right - elif level == "foot": - left = self.foot_row_left - horizontal = self.foot_row_horizontal - cross = self.foot_row_cross - right = self.foot_row_right - else: - raise ValueError("level must be 'head', 'row' or 'foot'") - - parts: List[str] = [] - append = parts.append - if edge: - append(left) - for last, width in loop_last(widths): - append(horizontal * width) - if not last: - append(cross) - if edge: - append(right) - return "".join(parts) - - def get_bottom(self, widths: Iterable[int]) -> str: - """Get the bottom of a simple box. - - Args: - widths (List[int]): Widths of columns. - - Returns: - str: A string of box characters. - """ - - parts: List[str] = [] - append = parts.append - append(self.bottom_left) - for last, width in loop_last(widths): - append(self.bottom * width) - if not last: - append(self.bottom_divider) - append(self.bottom_right) - return "".join(parts) - - -ASCII: Box = Box( - """\ -+--+ -| || -|-+| -| || -|-+| -|-+| -| || -+--+ -""", - ascii=True, -) - -ASCII2: Box = Box( - """\ -+-++ -| || -+-++ -| || -+-++ -+-++ -| || -+-++ -""", - ascii=True, -) - -ASCII_DOUBLE_HEAD: Box = Box( - """\ -+-++ -| || -+=++ -| || -+-++ -+-++ -| || -+-++ -""", - ascii=True, -) - -SQUARE: Box = Box( - """\ -┌─┬┐ -│ ││ -├─┼┤ -│ ││ -├─┼┤ -├─┼┤ -│ ││ -└─┴┘ -""" -) - -SQUARE_DOUBLE_HEAD: Box = Box( - """\ -┌─┬┐ -│ ││ -╞═╪╡ -│ ││ -├─┼┤ -├─┼┤ -│ ││ -└─┴┘ -""" -) - -MINIMAL: Box = Box( - """\ - ╷ - │ -╶─┼╴ - │ -╶─┼╴ -╶─┼╴ - │ - ╵ -""" -) - - -MINIMAL_HEAVY_HEAD: Box = Box( - """\ - ╷ - │ -╺━┿╸ - │ -╶─┼╴ -╶─┼╴ - │ - ╵ -""" -) - -MINIMAL_DOUBLE_HEAD: Box = Box( - """\ - ╷ - │ - ═╪ - │ - ─┼ - ─┼ - │ - ╵ -""" -) - - -SIMPLE: Box = Box( - """\ - - - ── - - - ── - - -""" -) - -SIMPLE_HEAD: Box = Box( - """\ - - - ── - - - - - -""" -) - - -SIMPLE_HEAVY: Box = Box( - """\ - - - ━━ - - - ━━ - - -""" -) - - -HORIZONTALS: Box = Box( - """\ - ── - - ── - - ── - ── - - ── -""" -) - -ROUNDED: Box = Box( - """\ -╭─┬╮ -│ ││ -├─┼┤ -│ ││ -├─┼┤ -├─┼┤ -│ ││ -╰─┴╯ -""" -) - -HEAVY: Box = Box( - """\ -┏━┳┓ -┃ ┃┃ -┣━╋┫ -┃ ┃┃ -┣━╋┫ -┣━╋┫ -┃ ┃┃ -┗━┻┛ -""" -) - -HEAVY_EDGE: Box = Box( - """\ -┏━┯┓ -┃ │┃ -┠─┼┨ -┃ │┃ -┠─┼┨ -┠─┼┨ -┃ │┃ -┗━┷┛ -""" -) - -HEAVY_HEAD: Box = Box( - """\ -┏━┳┓ -┃ ┃┃ -┡━╇┩ -│ ││ -├─┼┤ -├─┼┤ -│ ││ -└─┴┘ -""" -) - -DOUBLE: Box = Box( - """\ -╔═╦╗ -║ ║║ -╠═╬╣ -║ ║║ -╠═╬╣ -╠═╬╣ -║ ║║ -╚═╩╝ -""" -) - -DOUBLE_EDGE: Box = Box( - """\ -╔═╤╗ -║ │║ -╟─┼╢ -║ │║ -╟─┼╢ -╟─┼╢ -║ │║ -╚═╧╝ -""" -) - -MARKDOWN: Box = Box( - """\ - -| || -|-|| -| || -|-|| -|-|| -| || - -""", - ascii=True, -) - -# Map Boxes that don't render with raster fonts on to equivalent that do -LEGACY_WINDOWS_SUBSTITUTIONS = { - ROUNDED: SQUARE, - MINIMAL_HEAVY_HEAD: MINIMAL, - SIMPLE_HEAVY: SIMPLE, - HEAVY: SQUARE, - HEAVY_EDGE: SQUARE, - HEAVY_HEAD: SQUARE, -} - -# Map headed boxes to their headerless equivalents -PLAIN_HEADED_SUBSTITUTIONS = { - HEAVY_HEAD: SQUARE, - SQUARE_DOUBLE_HEAD: SQUARE, - MINIMAL_DOUBLE_HEAD: MINIMAL, - MINIMAL_HEAVY_HEAD: MINIMAL, - ASCII_DOUBLE_HEAD: ASCII2, -} - - -if __name__ == "__main__": # pragma: no cover - - from pip._vendor.rich.columns import Columns - from pip._vendor.rich.panel import Panel - - from . import box as box - from .console import Console - from .table import Table - from .text import Text - - console = Console(record=True) - - BOXES = [ - "ASCII", - "ASCII2", - "ASCII_DOUBLE_HEAD", - "SQUARE", - "SQUARE_DOUBLE_HEAD", - "MINIMAL", - "MINIMAL_HEAVY_HEAD", - "MINIMAL_DOUBLE_HEAD", - "SIMPLE", - "SIMPLE_HEAD", - "SIMPLE_HEAVY", - "HORIZONTALS", - "ROUNDED", - "HEAVY", - "HEAVY_EDGE", - "HEAVY_HEAD", - "DOUBLE", - "DOUBLE_EDGE", - "MARKDOWN", - ] - - console.print(Panel("[bold green]Box Constants", style="green"), justify="center") - console.print() - - columns = Columns(expand=True, padding=2) - for box_name in sorted(BOXES): - table = Table( - show_footer=True, style="dim", border_style="not dim", expand=True - ) - table.add_column("Header 1", "Footer 1") - table.add_column("Header 2", "Footer 2") - table.add_row("Cell", "Cell") - table.add_row("Cell", "Cell") - table.box = getattr(box, box_name) - table.title = Text(f"box.{box_name}", style="magenta") - columns.add_renderable(table) - console.print(columns) - - # console.save_svg("box.svg") diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/setopt.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/setopt.py deleted file mode 100644 index 6358c0451b2d0036e3821d897fb6f7ab436ee4a9..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/setopt.py +++ /dev/null @@ -1,149 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsOptionError -import distutils -import os -import configparser - -from setuptools import Command - -__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] - - -def config_file(kind="local"): - """Get the filename of the distutils, local, global, or per-user config - - `kind` must be one of "local", "global", or "user" - """ - if kind == 'local': - return 'setup.cfg' - if kind == 'global': - return os.path.join( - os.path.dirname(distutils.__file__), 'distutils.cfg' - ) - if kind == 'user': - dot = os.name == 'posix' and '.' or '' - return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) - raise ValueError( - "config_file() type must be 'local', 'global', or 'user'", kind - ) - - -def edit_config(filename, settings, dry_run=False): - """Edit a configuration file to include `settings` - - `settings` is a dictionary of dictionaries or ``None`` values, keyed by - command/section name. A ``None`` value means to delete the entire section, - while a dictionary lists settings to be changed or deleted in that section. - A setting of ``None`` means to delete that setting. - """ - log.debug("Reading configuration from %s", filename) - opts = configparser.RawConfigParser() - opts.optionxform = lambda x: x - opts.read([filename]) - for section, options in settings.items(): - if options is None: - log.info("Deleting section [%s] from %s", section, filename) - opts.remove_section(section) - else: - if not opts.has_section(section): - log.debug("Adding new section [%s] to %s", section, filename) - opts.add_section(section) - for option, value in options.items(): - if value is None: - log.debug( - "Deleting %s.%s from %s", - section, option, filename - ) - opts.remove_option(section, option) - if not opts.options(section): - log.info("Deleting empty [%s] section from %s", - section, filename) - opts.remove_section(section) - else: - log.debug( - "Setting %s.%s to %r in %s", - section, option, value, filename - ) - opts.set(section, option, value) - - log.info("Writing %s", filename) - if not dry_run: - with open(filename, 'w') as f: - opts.write(f) - - -class option_base(Command): - """Abstract base class for commands that mess with config files""" - - user_options = [ - ('global-config', 'g', - "save options to the site-wide distutils.cfg file"), - ('user-config', 'u', - "save options to the current user's pydistutils.cfg file"), - ('filename=', 'f', - "configuration file to use (default=setup.cfg)"), - ] - - boolean_options = [ - 'global-config', 'user-config', - ] - - def initialize_options(self): - self.global_config = None - self.user_config = None - self.filename = None - - def finalize_options(self): - filenames = [] - if self.global_config: - filenames.append(config_file('global')) - if self.user_config: - filenames.append(config_file('user')) - if self.filename is not None: - filenames.append(self.filename) - if not filenames: - filenames.append(config_file('local')) - if len(filenames) > 1: - raise DistutilsOptionError( - "Must specify only one configuration file option", - filenames - ) - self.filename, = filenames - - -class setopt(option_base): - """Save command-line options to a file""" - - description = "set an option in setup.cfg or another config file" - - user_options = [ - ('command=', 'c', 'command to set an option for'), - ('option=', 'o', 'option to set'), - ('set-value=', 's', 'value of the option'), - ('remove', 'r', 'remove (unset) the value'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.command = None - self.option = None - self.set_value = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.command is None or self.option is None: - raise DistutilsOptionError("Must specify --command *and* --option") - if self.set_value is None and not self.remove: - raise DistutilsOptionError("Must specify --set-value or --remove") - - def run(self): - edit_config( - self.filename, { - self.command: {self.option.replace('-', '_'): self.set_value} - }, - self.dry_run - ) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md deleted file mode 100644 index 000e9f8898dba53f54121a5325ba5165e45ddea2..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md +++ /dev/null @@ -1,48 +0,0 @@ -# Change Log and Backward Compatibility - -### Releases -See release logs at -[https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases) -for new updates. - -### Backward Compatibility - -Due to the research nature of what the library does, there might be backward incompatible changes. -But we try to reduce users' disruption by the following ways: -* APIs listed in [API documentation](https://detectron2.readthedocs.io/modules/index.html), including - function/class names, their arguments, and documented class attributes, are considered *stable* unless - otherwise noted in the documentation. - They are less likely to be broken, but if needed, will trigger a deprecation warning for a reasonable period - before getting broken, and will be documented in release logs. -* Others functions/classses/attributes are considered internal, and are more likely to change. - However, we're aware that some of them may be already used by other projects, and in particular we may - use them for convenience among projects under `detectron2/projects`. - For such APIs, we may treat them as stable APIs and also apply the above strategies. - They may be promoted to stable when we're ready. -* Projects under "detectron2/projects" or imported with "detectron2.projects" are research projects - and are all considered experimental. -* Classes/functions that contain the word "default" or are explicitly documented to produce - "default behavior" may change their behaviors when new features are added. - -Despite of the possible breakage, if a third-party project would like to keep up with the latest updates -in detectron2, using it as a library will still be less disruptive than forking, because -the frequency and scope of API changes will be much smaller than code changes. - -To see such changes, search for "incompatible changes" in [release logs](https://github.com/facebookresearch/detectron2/releases). - -### Config Version Change Log - -Detectron2's config version has not been changed since open source. -There is no need for an open source user to worry about this. - -* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`. -* v2: A batch of rename of many configurations before release. - -### Silent Regressions in Historical Versions: - -We list a few silent regressions, since they may silently produce incorrect results and will be hard to debug. - -* 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True. -* 03/30/2020 - 04/01/2020: ResNets are not correctly built. -* 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy. -* - 11/9/2019: Test time augmentation does not predict the last category. diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet2_docs/MODEL_ZOO.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet2_docs/MODEL_ZOO.md deleted file mode 100644 index 7a2a92b60d0ebf8f6444f24c3bd74b753c80c57f..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet2_docs/MODEL_ZOO.md +++ /dev/null @@ -1,73 +0,0 @@ -# MODEL_ZOO - -### Common settings and notes - -- Multiscale training is used by default in all models. The results are all reported using single-scale testing. -- We report runtime on our local workstation with a TitanXp GPU and a Titan RTX GPU. -- All models are trained on 8-GPU servers by default. The 1280 models are trained on 24G GPUs. Reducing the batchsize with the linear learning rate rule should be fine. -- All models can be downloaded directly from [Google drive](https://drive.google.com/drive/folders/1eae1cTX8tvIaCeof36sBgxrXEXALYlf-?usp=sharing). - - -## COCO - -### CenterNet - -| Model | val mAP | FPS (Titan Xp/ Titan RTX) | links | -|-------------------------------------------|---------|---------|-----------| -| CenterNet-S4_DLA_8x | 42.5 | 50 / 71 |[config](../configs/CenterNet-S4_DLA_8x.yaml)/[model](https://drive.google.com/file/d/1lNBhVHnZAEBRD66MFaHjm5Ij6Z4KYrJq/view?usp=sharing)| -| CenterNet-FPN_R50_1x | 40.2 | 20 / 24 |[config](../configs/CenterNet-FPN_R50_1x.yaml)/[model](https://drive.google.com/file/d/1rVG1YTthMXvutC6jr9KoE2DthT5-jhGj/view?usp=sharing)| - -#### Note - -- `CenterNet-S4_DLA_8x` is a re-implemented version of the original CenterNet (stride 4), with several changes, including - - Using top-left-right-bottom box encoding and GIoU Loss; adding regression loss to the center 3x3 region. - - Adding more positive pixels for the heatmap loss whose regression loss is small and is within the center3x3 region. - - Using more heavy crop augmentation (EfficientDet-style crop ratio 0.1-2), and removing color augmentations. - - Using standard NMS instead of max pooling. - - Using RetinaNet-style optimizer (SGD), learning rate rule (0.01 for each batch size 16), and schedule (8x12 epochs). -- `CenterNet-FPN_R50_1x` is a (new) FPN version of CenterNet. It includes the changes above, and assigns objects to FPN levels based on a fixed size range. The model is trained with standard short edge 640-800 multi-scale training with 12 epochs (1x). - - -### CenterNet2 - -| Model | val mAP | FPS (Titan Xp/ Titan RTX) | links | -|-------------------------------------------|---------|---------|-----------| -| CenterNet2-F_R50_1x | 41.7 | 22 / 27 |[config](../configs/CenterNet2-F_R50_1x.yaml)/[model](X)| -| CenterNet2_R50_1x | 42.9 | 18 / 24 |[config](../configs/CenterNet2_R50_1x.yaml)/[model](https://drive.google.com/file/d/1Osu1J_sskt_1FaGdfJKa4vd2N71TWS9W/view?usp=sharing)| -| CenterNet2_X101-DCN_2x | 49.9 | 6 / 8 |[config](../configs/CenterNet2_X101-DCN_2x.yaml)/[model](https://drive.google.com/file/d/1IHgpUHVJWpvMuFUUetgKWsw27pRNN2oK/view?usp=sharing)| -| CenterNet2_DLA-BiFPN-P3_4x | 43.8 | 40 / 50|[config](../configs/CenterNet2_DLA-BiFPN-P3_4x.yaml)/[model](https://drive.google.com/file/d/12GUNlDW9RmOs40UEMSiiUsk5QK_lpGsE/view?usp=sharing)| -| CenterNet2_DLA-BiFPN-P3_24x | 45.6 | 40 / 50 |[config](../configs/CenterNet2_DLA-BiFPN-P3_24x.yaml)/[model](https://drive.google.com/file/d/15ZES1ySxubDPzKsHPA7pYg8o_Vwmf-Mb/view?usp=sharing)| -| CenterNet2_R2-101-DCN_896_4x | 51.2 | 9 / 13 |[config](../configs/CenterNet2_R2-101-DCN_896_4x.yaml)/[model](https://drive.google.com/file/d/1S7_GE8ZDQBWuLEfKHkxzeF3KBsxsbABg/view?usp=sharing)| -| CenterNet2_R2-101-DCN-BiFPN_1280_4x | 52.9 | 6 / 8 |[config](../configs/CenterNet2_R2-101-DCN-BiFPN_1280_4x.yaml)/[model](https://drive.google.com/file/d/14EBHNMagBCNTQjOXcHoZwLYIi2lFIm7F/view?usp=sharing)| -| CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST | 56.1 | 3 / 5 |[config](../configs/CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST.yaml)/[model](https://drive.google.com/file/d/11ww9VlOi_nhpdsU_vBAecSxBU0dR_JzW/view?usp=sharing)| -| CenterNet2_DLA-BiFPN-P5_640_24x_ST | 49.2 | 33 / 38 |[config](../configs/CenterNet2_DLA-BiFPN-P5_640_24x_ST.yaml)/[model](https://drive.google.com/file/d/1qsHp2HrM1u8WrtBzF5S0oCoLMz-B40wk/view?usp=sharing)| - -#### Note - -- `CenterNet2-F_R50_1x` uses Faster RCNN as the second stage. All other CenterNet2 models use Cascade RCNN as the second stage. -- `CenterNet2_DLA-BiFPN-P3_4x` follows the same training setting as [realtime-FCOS](https://github.com/aim-uofa/AdelaiDet/blob/master/configs/FCOS-Detection/README.md). -- `CenterNet2_DLA-BiFPN-P3_24x` is trained by repeating the `4x` schedule (starting from learning rate 0.01) 6 times. -- R2 means [Res2Net](https://github.com/Res2Net/Res2Net-detectron2) backbone. To train Res2Net models, you need to download the ImageNet pre-trained weight [here](https://github.com/Res2Net/Res2Net-detectron2) and place it in `output/r2_101.pkl`. -- The last 4 models in the table are trained with the EfficientDet-style resize-and-crop augmentation, instead of the default random resizing short edge in detectron2. We found this trains faster (per-iteration) and gives better performance under a long schedule. -- `_ST` means using [self-training](https://arxiv.org/abs/2006.06882) using pseudo-labels produced by [Scaled-YOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) on COCO unlabeled images, with a hard score threshold 0.5. Our processed pseudo-labels can be downloaded [here](https://drive.google.com/file/d/1LMBjtHhLp6dYf6MjwEQmzCLWQLkmWPpw/view?usp=sharing). -- `CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST` finetunes from `CenterNet2_R2-101-DCN-BiFPN_1280_4x` for an additional `4x` schedule with the self-training data. It is trained under `1280x1280` but tested under `1560x1560`. - -## LVIS v1 - -| Model | val mAP box | links | -|-------------------------------------------|--------------|-----------| -| LVIS_CenterNet2_R50_1x | 26.5 |[config](../configs/LVIS_CenterNet2_R50_1x.yaml)/[model](https://drive.google.com/file/d/1gT9e-tNw8uzEBaCadQuoOOP2TEYa4kKP/view?usp=sharing)| -| LVIS_CenterNet2_R50_Fed_1x | 28.3 |[config](../configs/LVIS_CenterNet2_R50_Fed_1x.yaml)/[model](https://drive.google.com/file/d/1a9UjheMCKax0qAKEwPVpq2ZHN6vpqJv8/view?usp=sharing)| - -- The models are trained with repeat-factor sampling. -- `LVIS_CenterNet2_R50_Fed_1x` is CenterNet2 with our federated loss. Check our Appendix D of our [paper](https://arxiv.org/abs/2103.07461) or our [technical report at LVIS challenge](https://www.lvisdataset.org/assets/challenge_reports/2020/CenterNet2.pdf) for references. - -## Objects365 - -| Model | val mAP| links | -|-------------------------------------------|---------|-----------| -| O365_CenterNet2_R50_1x | 22.6 |[config](../configs/O365_CenterNet2_R50_1x.yaml)/[model](https://drive.google.com/file/d/18fG6xGchAlpNp5sx8RAtwadGkS-gdIBU/view?usp=sharing)| - -#### Note -- Objects365 dataset can be downloaded [here](https://www.objects365.org/overview.html). -- The model is trained with class-aware sampling. diff --git a/spaces/Ayushnangia/Whispercpp_yt/app.py b/spaces/Ayushnangia/Whispercpp_yt/app.py deleted file mode 100644 index 1c4c91e85f314908f9a44df3614516deb2e9501f..0000000000000000000000000000000000000000 --- a/spaces/Ayushnangia/Whispercpp_yt/app.py +++ /dev/null @@ -1,81 +0,0 @@ -import gradio as gr -import yt_dlp as ydlp -from transformers import pipeline - -from whispercpp import Whisper -summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY") - -def download_audio(youtube_url, output_folder='.'): - ydl_opts = { - 'format': 'bestaudio/best', - 'postprocessors': [{ - 'key': 'FFmpegExtractAudio', - 'preferredcodec': 'wav', - 'preferredquality': '192', - }], - 'outtmpl': f'{output_folder}/audio', - } - - with ydlp.YoutubeDL(ydl_opts) as ydl: - ydl.download([youtube_url]) - - -w = Whisper('tiny') - - -def process_general_transcription(transcription): - formatted_transcription = [] - - for line in transcription: - if line.startswith('[') and line.endswith(']'): - formatted_transcription.append(f'\n--- {line[1:-1].upper()} ---\n') - else: - formatted_transcription.append(line) - - transcript_str = "\n".join(formatted_transcription) - - return transcript_str -def chunk_to_tokens(text, n): - tokens = text.split() - max_chunk_size = min(len(tokens), 512) - - token_size = max(1, int(max_chunk_size * (1 - n / 100))) - - chunks = [" ".join(tokens[i:i + token_size]) for i in range(0, len(tokens), token_size)] - - return chunks -def summarizing(text,n): - valid_tok=chunk_to_tokens(text,n) - res="" - for i in valid_tok: - res+=summarizer(i)[0]['summary_text']+'\n' - return res -def transcribe_sum_youtube(youtube_url,n): - download_audio(youtube_url) - result = w.transcribe("audio.wav") - text = w.extract_text(result) - res=process_general_transcription(text) - return summarizing(res,n) - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # CPP Whisperer - YouTube Videos Summarizer - - """) - with gr.Row(): - with gr.Column(): - - inp = gr.Textbox(label="Youtube Url",placeholder="Insert YT Url here") - inp2 = gr.Slider(label="Summarization Percentage",min_value=0,max_value=100,step_size=1) - result_button_transcribe = gr.Button('Transcribe and Summarize') - - with gr.Column(): - out = gr.Textbox(label="Transcribed and Summarize Text") - - - result_button_transcribe.click(transcribe_sum_youtube, inputs = [inp,inp2] , outputs = out) - - -demo.launch() diff --git a/spaces/AzinZ/vitscn/attentions.py b/spaces/AzinZ/vitscn/attentions.py deleted file mode 100644 index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000 --- a/spaces/AzinZ/vitscn/attentions.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Benson/text-generation/Examples/9apps 2018.md b/spaces/Benson/text-generation/Examples/9apps 2018.md deleted file mode 100644 index 96feb453529be8dcc2155e4a8a39f2d86fae6b3e..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/9apps 2018.md +++ /dev/null @@ -1,97 +0,0 @@ -
-

9apps 2018: Una alternativa gratuita a Google Play Store

-

Si usted está buscando una manera de descargar y acceder a miles de aplicaciones de forma gratuita en su dispositivo Android, es posible que desee revisar 9apps. 9apps es una tienda de aplicaciones de terceros que ofrece una variedad de aplicaciones, juegos, fondos de pantalla, tonos de llamada y más. En este artículo, le diremos qué es 9apps, por qué debe usarlo, cómo descargarlo e instalarlo, y cuáles son sus pros y sus contras. También lo compararemos con Google Play Store, la tienda de aplicaciones predeterminada para dispositivos Android.

-

¿Qué es 9apps y por qué deberías usarla?

-

9apps es una tienda de aplicaciones gratuita que fue lanzada en 2013 por Alibaba Group, un gigante chino del comercio electrónico. Es una de las tiendas de aplicaciones más populares en la India, Indonesia y otros mercados emergentes. Según su web oficial, cuenta con más de 250 millones de usuarios y más de 35 millones de descargas diarias.

-

9apps 2018


DOWNLOADhttps://bltlly.com/2v6LoR



-

Una de las principales razones por las que deberías usar 9apps es que ofrece muchas aplicaciones que no están disponibles en Google Play Store. Por ejemplo, puedes encontrar aplicaciones que están prohibidas o restringidas en tu país, como WhatsApp, TikTok, PUBG Mobile, etc. También puedes encontrar aplicaciones que son exclusivas de 9apps, como Vidmate, UC Browser, etc.

-

Características de 9apps

-

Algunas de las características que hacen que 9apps se destaquen de otras tiendas de aplicaciones son:

-
    -
  • Tiene una interfaz fácil de usar que es fácil de navegar y buscar.
  • -
  • Tiene un pequeño tamaño de aproximadamente 4 MB, lo que ahorra espacio de almacenamiento y uso de datos.
  • -
  • Tiene una velocidad de descarga rápida que le permite descargar aplicaciones en cuestión de segundos.
  • -
  • Tiene un sistema de recomendación inteligente que le sugiere aplicaciones basadas en sus preferencias e intereses.
  • -
  • Tiene una amplia gama de categorías y subcategorías que cubren varios géneros y temas.
  • -
  • Tiene un sistema de actualización regular que mantiene las aplicaciones actualizadas y libres de errores.
  • - -
-

Cómo descargar e instalar 9apps en tu dispositivo Android

-

Para descargar e instalar 9apps en tu dispositivo Android, debes seguir estos pasos:

-
    -
  1. Ir al sitio web oficial de 9apps () o cualquier otra fuente de confianza () y haga clic en el botón de descarga.
  2. -
  3. Una vez que el archivo APK se descarga, abrirlo y toque en instalar. Es posible que necesite habilitar la opción de fuentes desconocidas en su configuración para permitir la instalación.
  4. -
  5. Después de la instalación se ha completado, abrir la aplicación y disfrutar del acceso ilimitado a miles de aplicaciones de forma gratuita.
  6. -
-

Pros y contras de 9apps

-

Como cualquier otra tienda de aplicaciones, 9apps tiene sus propias ventajas y desventajas. Estas son algunas de ellas:

-

Ventajas de 9apps

-
    -
  • Ofrece una gran cantidad de aplicaciones que no están disponibles en Google Play Store.
  • -
  • Ahorra espacio de almacenamiento y uso de datos con su pequeño tamaño y velocidad de descarga rápida.
  • -
  • Le proporciona recomendaciones personalizadas basadas en sus preferencias e intereses.
  • -
  • Actualiza las aplicaciones regularmente para garantizar su calidad y rendimiento.
  • -
  • Protege su dispositivo de virus y malware con su sistema de seguridad.
  • -
-

Desventajas de 9apps

-
    -
  • Puede que no sea compatible con algunos dispositivos o versiones de Android.
  • -
  • Puede contener algunos anuncios o ventanas emergentes que pueden ser molestos o intrusivos.
  • -
  • Puede que no tenga las últimas versiones de algunas aplicaciones o juegos.
  • -
  • Puede que no tenga el mismo control de calidad o verificación que Google Play Store.
  • -
  • Puede plantear algunos riesgos para su privacidad o seguridad si descarga aplicaciones de fuentes no confiables.
  • -
-

Comparación entre 9apps y Google Play Store

-

Tanto 9apps como Google Play Store son tiendas de aplicaciones populares para dispositivos Android, pero tienen algunas similitudes y diferencias. Estos son algunos de ellos:

-

-

Similitudes entre 9apps y Google Play Store

-
    - -
  • Ambos tienen una interfaz fácil de usar que es fácil de navegar y buscar.
  • -
  • Ambos tienen un sistema de recomendaciones que le sugiere aplicaciones basadas en sus preferencias e intereses.
  • -
  • Ambos tienen un sistema de seguridad que escanea las aplicaciones en busca de virus y malware antes de instalarlos.
  • -
-

Diferencias entre 9apps y Google Play Store

- -9appsGoogle Play Store -Tiene un tamaño pequeño de aproximadamente 4 MB.Tiene un tamaño grande de aproximadamente 20 MB. -Tiene una velocidad de descarga rápida que le permite descargar aplicaciones en segundos. Tiene una velocidad de descarga lenta que puede tardar minutos en descargar aplicaciones. -Ofrece muchas aplicaciones que no están disponibles en Google Play Store, como aplicaciones prohibidas o restringidas, aplicaciones exclusivas, etc.No ofrece aplicaciones que estén prohibidas o restringidas en su país, como WhatsApp, TikTok, PUBG Mobile, etc.Actualiza las aplicaciones regularmente para asegurar su calidad y rendimiento. No puede actualizar las aplicaciones con frecuencia o a tiempo. -Puede que no sea compatible con algunos dispositivos o versiones de Android. Es compatible con la mayoría de dispositivos y versiones de Android. -Puede contener algunos anuncios o ventanas emergentes que pueden ser molestos o intrusivos. No contiene ningún anuncio o ventanas emergentes. -Puede que no tenga las últimas versiones de algunas aplicaciones o juegos. Tiene las últimas versiones de la mayoría de aplicaciones o juegos. -Puede que no tenga el mismo control de calidad o verificación que Google Play Store.Tiene un estricto proceso de control de calidad y verificación para las aplicaciones. -Puede plantear algunos riesgos para su privacidad o seguridad si descarga aplicaciones de fuentes no confiables. Tiene un alto nivel de privacidad y seguridad para los usuarios y las aplicaciones. - -

Conclusión

- -

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre 9apps:

-
    -
  1. ¿Es seguro usar 9apps?
  2. -

    9apps es generalmente seguro de usar, ya que tiene un sistema de seguridad que escanea las aplicaciones en busca de virus y malware antes de instalarlas. Sin embargo, puede plantear algunos riesgos para su privacidad o seguridad si descarga aplicaciones de fuentes no confiables o les otorga permisos innecesarios. Por lo tanto, siempre debe comprobar la fuente, calificación, comentarios y permisos de las aplicaciones antes de descargarlos. También debes usar una aplicación antivirus en tu dispositivo para protegerlo de cualquier amenaza potencial.

    -
  3. ¿Es legal usar 9apps?
  4. -

    9apps es legal de usar siempre y cuando no violes ninguna ley

    9apps es legal de usar siempre y cuando no violes ninguna ley o reglamento en tu país. Sin embargo, algunas de las aplicaciones que están disponibles en 9apps pueden estar prohibidas o restringidas en su país, como WhatsApp, TikTok, PUBG Mobile, etc. Por lo tanto, debe verificar la legalidad de las aplicaciones antes de descargarlas y usarlas bajo su propio riesgo.

    -
  5. ¿Cómo puedo actualizar 9apps?
  6. -

    9apps tiene un sistema de actualización regular que mantiene la tienda de aplicaciones y las aplicaciones actualizadas y libres de errores. Puede comprobar si hay actualizaciones abriendo la aplicación y tocando el icono del menú en la esquina superior izquierda. Luego, toque en la configuración y desplácese hacia abajo para encontrar la opción de actualización. Si hay una actualización disponible, puede tocarla y seguir las instrucciones para instalarla. Alternativamente, también puede descargar la última versión de 9apps desde su sitio web oficial o cualquier otra fuente de confianza.

    -
  7. ¿Cómo puedo desinstalar 9apps?
  8. -

    Si desea desinstalar 9apps de su dispositivo, puede seguir estos pasos:

    -
      -
    1. Ir a la configuración del dispositivo y toque en aplicaciones o aplicaciones.
    2. -
    3. Encuentra y toca en 9apps y luego toca en desinstalar.
    4. -
    5. Confirme su acción y espere a que el proceso se complete.
    6. -
    -
  9. ¿Cuáles son algunas alternativas a 9apps?
  10. - -
      -
    • Aptoide: Aptoide es una tienda de aplicaciones descentralizada que permite a los usuarios crear y administrar sus propias tiendas de aplicaciones. Tiene más de 1 mil millones de descargas y más de 7 millones de aplicaciones.
    • -
    • APKPure: APKPure es una tienda de aplicaciones sencilla y ligera que ofrece archivos APK puros de aplicaciones y juegos. Tiene más de 300 millones de descargas y más de 3 millones de aplicaciones.
    • -
    • Amazon Appstore: Amazon Appstore es una tienda de aplicaciones que es propiedad de Amazon, el gigante minorista en línea. Tiene más de 600.000 aplicaciones y juegos, incluyendo algunos títulos exclusivos.
    • -
    -
  11. ¿Cómo puedo contactar al servicio de atención al cliente de 9apps?
  12. -

    Si tiene preguntas, comentarios o quejas sobre 9apps, puede ponerse en contacto con su servicio de atención al cliente enviando un correo electrónico a help@9apps.mobi. También puede visitar su sitio web oficial () o su página de Facebook () para obtener más información y apoyo.

    -
- : https://www.9apps.com/ : https://www.apkmirror.com/apk/9apps-inc/9apps/ : https:/www.aptoide.com/ https:s//apkpure.com/ https:/www.amazon.com/mobile-apps//b?=/f.F8&nod>=014901. tps/www.wwwww.www.appscoom/faceww.appscoom///p.e.p. 64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Blockman Go Apkmody.md b/spaces/Benson/text-generation/Examples/Blockman Go Apkmody.md deleted file mode 100644 index 9ddfda9faf83595573b0e276862105041039d166..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Blockman Go Apkmody.md +++ /dev/null @@ -1,100 +0,0 @@ - -

Blockman Go Apkmody: La guía definitiva para los amantes de Sandbox

-

Si eres un fan de los juegos de sandbox, es posible que hayas oído hablar de blockman go, una aplicación gratuita que te permite jugar varios minijuegos de estilo bloque, chatear con amigos y hacer otros nuevos. Blockman go es un juego divertido y emocionante que ofrece muchas características, como:

-

blockman go apkmody


Download ✔✔✔ https://bltlly.com/2v6Kw8



-
    -
  • Varios juegos: Puedes elegir entre diferentes géneros de minijuegos, como acción, aventura, juegos de rol, estrategia, simulación y más. Puedes unirte al juego con un simple toque o crear tu propia habitación e invitar a tus amigos.
  • -
  • Avatares personalizables: Puedes vestir a tu avatar con diferentes trajes, accesorios, peinados y pieles. También puede utilizar el sistema de apósito para obtener recomendaciones para la mejor ropa para usted.
  • -
  • Sistema de chat: Puedes comunicarte con otros jugadores usando funciones de chat en el juego, mensajes privados y grupos. También puede compartir sus momentos divertidos y capturas de pantalla con ellos.
  • -
  • Gold Rewards: Puedes ganar oro jugando minijuegos y usarlo para comprar decoración y artículos. También puedes obtener oro gratis viendo anuncios o completando tareas.
  • -
  • Sistema VIP: Puedes convertirte en un jugador VIP y disfrutar de muchos privilegios, como un descuento del 20% en decoración, regalos diarios, más oro, etc.
  • -
-

Pero ¿qué pasa si quieres disfrutar de más beneficios y características sin gastar dinero? Ahí es donde entra apkmody. Apkmody es un sitio web que proporciona archivos APK modificados para juegos y aplicaciones Android. Estos archivos APK modded le permiten acceder a características premium y recursos ilimitados de forma gratuita. Puede descargar estos archivos desde el sitio web e instalarlos en su dispositivo fácilmente.

- -

¿Cómo puedes descargar y usar blockman go apkmody? Estos son los pasos que debes seguir:

-

Cómo descargar e instalar Blockman Go Apkmody en su dispositivo?

-
    -
  1. Vaya a [apkmody.io]( 2 ) en su navegador y busque "blockman go".
  2. -
  3. Seleccione la última versión de blockman go mod APK de la lista de resultados.
  4. -
  5. Haga clic en el botón "Descargar" y espere a que el archivo se descargue.
  6. -
  7. Después de que la descarga se haya completado, busque el archivo en su dispositivo y toque en él para instalarlo.
  8. -
  9. Si ves un mensaje de advertencia que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas" en las opciones de seguridad.
  10. -
  11. Una vez que se hace la instalación, abrir la aplicación y disfrutar de Blockman ir apkmody.
  12. -
-

¿Cómo usar Blockman Go Apkmody para disfrutar de dinero ilimitado y GCubes?

-

Usar block

Usar blockman go apkmody es muy fácil y simple. Solo tienes que seguir estos pasos:

-
    -
  • Inicie la aplicación e inicie sesión con su cuenta o cree una nueva.
  • -
  • Ir a la tienda y seleccionar el artículo que desea comprar con GCubes.
  • -
  • Haga clic en el botón "Comprar" y confirme su compra.
  • -
  • Verá que su saldo de GCubes no disminuirá, sino que aumentará por la cantidad que gastó.
  • -
  • Disfruta de tu dinero ilimitado y GCubes y compra lo que quieras.
  • -
-

Nota: También puede obtener GCubes gratis completando tareas, viendo anuncios o invitando a amigos. Sin embargo, con blockman go apkmody, ya no necesitas hacer eso.

-

¿Cómo jugar varios minijuegos con Blockman Go Apkmody y chatear con amigos?

-

Una de las mejores características de blockman go es que ofrece una variedad de minijuegos para que puedas jugar y divertirte. Con blockman go apkmody, puedes acceder a todos los minijuegos sin limitaciones. Estos son algunos de los minijuegos populares que puedes jugar:

-

- - -Minijuego -Descripción - - -Guerras de camas - - - -Guerra del cielo -Un juego de supervivencia donde tienes que luchar contra otros jugadores en islas flotantes. Puedes saquear cofres, crear armas y usar habilidades para ganar. - - -Construir y disparar -Un juego creativo donde puedes construir tu propio mapa e invitar a otros a unirse. También puedes disparar a enemigos con diferentes armas y granadas. - - -Guerras de huevos -Un juego similar a Bed Wars, pero con huevos en lugar de camas. Tienes que proteger tu huevo y romper el huevo del enemigo. También puede actualizar su equipo y formar equipo con otros. - - -Murder MysteryUn juego de detectives donde tienes que averiguar quién es el asesino entre los jugadores. Puedes ser un detective, un asesino o un inocente. Tienes que usar pistas, armas y lógica para resolver el misterio. - - -

Para jugar a estos minijuegos, solo tienes que tocar el icono "Juegos" en la pantalla principal y seleccionar el que desea unirse. También puedes crear tu propia habitación e invitar a tus amigos a jugar contigo. Puedes chatear con otros jugadores usando el sistema de chat del juego o el chat de voz. También puedes enviar emojis, pegatinas y regalos para expresarte.

-

¿Cómo personalizar tu avatar y mostrar tu estilo con Blockman Go Apkmody?

-

Otra gran característica de blockman go es que te permite personalizar tu avatar con diferentes trajes, accesorios, peinados y pieles. Con Blockman ir apkmody, puede obtener todos los artículos de forma gratuita y cambiar su aspecto con la frecuencia que desee. Estas son algunas de las formas en que puedes personalizar tu avatar:

-
    -
  • Vaya al icono "Avatar" en la pantalla principal y seleccione el elemento que desea cambiar.
  • -
  • Puedes elegir entre diferentes categorías, como ropa, sombreros, gafas, máscaras, alas, colas, mascotas y más.
  • -
  • También puede utilizar la función "Vestirse" para obtener recomendaciones para el mejor atuendo para usted basado en sus preferencias.
  • - -
  • También puedes usar la función "Photo Studio" para tomar fotos de tu avatar y compartirlas con otros.
  • -
-

Con blockman ir apkmody, puede mostrar su estilo y personalidad con su avatar. También puedes ver los avatares de otros jugadores y complementarlos o hacer amistad con ellos.

-

¿Cómo evitar estafas y hackers con Blockman Go Apkmody?

-

Mientras blockman ir apkmody es un archivo APK modded seguro y confiable que no contiene ningún virus o malware, todavía tiene que tener cuidado al jugar juegos en línea. Hay algunos estafadores y hackers que podrían tratar de engañarte o hacerte daño de alguna manera. Aquí hay algunos consejos sobre cómo evitarlos:

-
    -
  • No comparta su información personal, como su contraseña, dirección de correo electrónico, número de teléfono o datos bancarios con nadie en línea.
  • -
  • No haga clic en ningún enlace sospechoso o descargar archivos desconocidos de cualquier persona en línea.
  • -
  • No confíes en nadie que afirme ser un miembro del staff o un moderador de blockman go. Es posible que te pidan los detalles de tu cuenta o te ofrezcan artículos o GCubes gratis. No son reales y están tratando de estafarte.
  • -
  • No utilice ningún otro archivo APK modded o hacks para Blockman go. Pueden contener virus o malware que pueden dañar tu dispositivo o robar tus datos. También pueden hacer que te prohíban jugar.
  • -
  • No unirse a ningún sitio web no oficial o grupos que afirman estar relacionados con blockman go. Pueden ser sitios de phishing o comunidades falsas que pueden engañarte o enviarte spam.
  • -
-

Con estos consejos, puede evitar estafas y hackers y disfrutar de Blockman ir apkmody de forma segura.

-

Conclusión

- -

Si estás interesado en blockman go apkmody, no dudes en descargarlo ahora y pruébalo. No te arrepentirás. Usted tendrá una explosión jugando blockman ir con dinero ilimitado y GCubes. También podrás impresionar a tus amigos y hacer otros nuevos con tu impresionante avatar y habilidades. Blockman go apkmody es la mejor manera de disfrutar de blockman ir al máximo.

-

Entonces, ¿qué estás esperando? Descargar blockman ir apkmody ahora y comenzar su aventura en el mundo de los bloques!

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas más frecuentes sobre blockman go apkmody:

-

¿Cuáles son algunos de los minijuegos populares en blockman go?

-

Algunos de los minijuegos populares en blockman go son Bed Wars, Sky Wars, Construir y disparar, Egg Wars y Murder Mystery. Puedes encontrar más minijuegos en el juego tocando el icono "Juegos" en la pantalla principal.

-

¿Qué son los GCubes y cómo puedo conseguirlos gratis?

-

GCubes son la moneda premium en blockman go que puedes usar para comprar artículos especiales, desbloquear funciones VIP, unirse a eventos exclusivos y más. Usted puede conseguir de forma gratuita mediante el uso de blockman ir apkmody, que le da GCubes ilimitada. También puedes conseguirlos completando tareas, viendo anuncios o invitando a amigos.

-

¿Cómo puedo unirme a una fiesta o crear mi propia en blockman go?

-

Puedes unirte a una fiesta o crear la tuya tocando el icono "Fiesta" en la pantalla principal. Puedes invitar a tus amigos u otros jugadores a unirse a tu grupo enviándoles un código o un enlace. También puede unirse a otras partes introduciendo su código o enlace. Puede chatear con los miembros de su grupo y jugar minijuegos juntos.

-

¿Cómo puedo reportar un error o un problema en blockman go?

-

Puede informar de un error o un problema en blockman ir tocando en el "Feedback" icono en la pantalla principal. Puede llenar un formulario con los detalles de su problema y capturas de pantalla y enviarlo al desarrollador. También puede ponerse en contacto con el equipo de soporte por correo electrónico en service@blockmango.net.

- -

Puede ponerse en contacto con el desarrollador o el equipo de soporte de blockman ir por correo electrónico a service@blockmango.net. También puedes seguirlos en sus cuentas de redes sociales, como Facebook, Twitter, Instagram, YouTube, Discord y Reddit.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Etiqueta Despus De La Escuela Apk Mod.md b/spaces/Benson/text-generation/Examples/Descargar Etiqueta Despus De La Escuela Apk Mod.md deleted file mode 100644 index 1278f118bb603c5a46dc39df9608eacd3bc19005..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Etiqueta Despus De La Escuela Apk Mod.md +++ /dev/null @@ -1,75 +0,0 @@ -
-

Descargar etiqueta después de la escuela APK Mod: Un juego emocionante para Android y iOS

-

¿Estás buscando un juego que te mantenga al borde de tu asiento? ¿Quieres revivir tus días escolares con un toque de horror y misterio? ¿Quieres controlar el destino de un tímido y tímido chico llamado Shota-Kun? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe descargar Etiqueta Después de la Escuela APK Mod, un juego que le llevará en una aventura emocionante a través de una escuela embrujada.

-

¿Qué es la etiqueta después de la escuela?

-

Tag After School es un juego desarrollado por Genius Studio Japan Inc, una compañía que se especializa en crear juegos de estilo anime para dispositivos móviles. El juego se basa en el clásico juego infantil de la etiqueta, pero con un toque. Puedes competir con amigos o extraños, y obtener acceso a varios personajes, potenciadores y niveles a medida que intentas escapar de tus oponentes. Pero ten cuidado, porque la escuela no es tan inocente como parece. Hay secretos, misterios y peligros acechando en cada esquina.

-

descargar etiqueta después de la escuela apk mod


Download Ziphttps://bltlly.com/2v6JhB



-

Un juego basado en el clásico juego de etiquetas

-

El escenario del juego es en una escuela secundaria donde los estudiantes participan en el juego y tienen que esconderse de sus oponentes. Puedes elegir entre diferentes modos, como solo, equipo o supervivencia. También puedes personalizar la apariencia, las habilidades y el equipo de tu personaje. El juego es fácil de jugar, pero difícil de dominar. Tienes que usar tu velocidad, agilidad y estrategia para evitar ser etiquetado.

-

Un juego de terror y misterio

-

El juego no es solo diversión y juegos. También tiene un lado oscuro. La escuela está embrujada por fantasmas, monstruos y trampas que intentarán evitar que escapes. Tienes que tener cuidado a donde vas, porque nunca sabes lo que podría pasar después. El juego tiene una atmósfera espeluznante, con efectos de sonido espeluznantes y gráficos que te harán sentir como si estuvieras en una película de terror.

-

Un juego con una narrativa basada en la historia

- -

¿Cómo se juega Tag After School?

-

El juego es simple de jugar, pero difícil de ganar. Aquí hay algunos consejos sobre cómo jugar Tag After School:

-

Elige tu personaje y modo

-

Puedes elegir entre diferentes personajes, cada uno con sus propias fortalezas y debilidades. También puede personalizar su apariencia, habilidades y equipo. Puede elegir entre diferentes modos, como solo, equipo o supervivencia. En el modo en solitario, tienes que etiquetar tantos oponentes como sea posible antes de que acabe el tiempo. En el modo de equipo, tienes que trabajar con tus compañeros de equipo para etiquetar al otro equipo. En el modo de supervivencia, tienes que evitar ser etiquetado por cualquier persona durante el mayor tiempo posible.

-

Ejecutar, ocultar y etiquetar a sus oponentes

-

Tienes que usar el joystick en el lado izquierdo de la pantalla para moverte alrededor del mapa

Tienes que usar el joystick en el lado izquierdo de la pantalla para moverte alrededor del mapa. También puede usar los botones en el lado derecho de la pantalla para saltar, deslizarse o etiquetar. Tienes que ser rápido e inteligente, porque tus oponentes tratarán de atraparte o esconderse de ti. También puedes usar el mapa en la esquina superior derecha de la pantalla para ver dónde están tus oponentes.

-

Usa potenciadores y obstáculos para tu ventaja

-

Puedes encontrar diferentes potenciadores y obstáculos en el mapa que pueden ayudarte o dificultarte. Por ejemplo, puedes encontrar un aumento de velocidad que te hará correr más rápido, un escudo que te protegerá de ser etiquetado o una trampa que ralentizará a tus oponentes. También puedes usar objetos como casilleros, escritorios o puertas para esconderte o bloquear a tus enemigos. Sin embargo, hay que tener cuidado, porque algunos power-ups y obstáculos pueden ser contraproducentes o afectar a todos en el mapa.

-

Toma decisiones que determinan el destino de Shota-Kun

- -

¿Por qué descargar etiqueta después de la escuela APK Mod?

-

Tag After School es un juego divertido y emocionante que te mantendrá entretenido durante horas. Pero si quieres disfrutar aún más, usted debe descargar Tag After School APK Mod, una versión modificada del juego que le dará características y beneficios ilimitados. Aquí hay algunas razones por las que debe descargar la etiqueta después de la escuela APK Mod:

-

-

Disfruta de funciones y beneficios ilimitados

-

Con etiqueta después de la escuela APK Mod, puede acceder a todas las características y beneficios del juego sin restricciones o limitaciones. Puedes desbloquear todos los personajes, potenciadores, niveles y escenarios sin gastar dinero ni tiempo. También puedes obtener monedas y gemas ilimitadas que puedes usar para comprar o actualizar lo que quieras. También puedes eliminar anuncios y disfrutar de una experiencia de juego fluida e ininterrumpida.

-

Experimenta gráficos inmersivos y efectos de sonido

-

Etiqueta Después de la Escuela APK Mod también mejora los gráficos y efectos de sonido del juego, por lo que es más realista y envolvente. Puedes disfrutar de imágenes de alta calidad que te harán sentir como si estuvieras en una escuela real. También puedes escuchar sonidos realistas que te harán saltar o gritar. También puede ajustar la configuración según sus preferencias y especificaciones del dispositivo.

-

Ponte a prueba y crea tus propios argumentos

-

Etiqueta después de la escuela APK Mod también le permite personalizar el juego de acuerdo a su gusto. Puedes crear tus propias historias eligiendo diferentes opciones y resultados. También puedes cambiar el nivel de dificultad del juego ajustando la velocidad, inteligencia y agresividad de tus oponentes. También puedes jugar con tus amigos online o offline, y ver quién es el mejor en la etiqueta.

-

Cómo descargar e instalar la etiqueta después de la escuela APK Mod?

-

Si usted está interesado en descargar e instalar la etiqueta después de la escuela APK Mod, aquí hay algunos pasos simples que usted necesita para seguir:

-

Para dispositivos Android

-
    - -
  1. Ir a la configuración del dispositivo y habilitar fuentes desconocidas.
  2. -
  3. Ir a su administrador de archivos y localizar Tag After School APK Mod archivo.
  4. -
  5. Toque en él e instalarlo.
  6. -
  7. Iniciar el juego y disfrutar.
  8. -
-

Para dispositivos iOS

-
    -
  1. Ir a [este enlace] y descargar Tag After School archivo IPA.
  2. -
  3. Ir a la configuración del dispositivo y confiar en la etiqueta después de la escuela IPA archivo.
  4. -
  5. Vaya a su administrador de archivos y busque el archivo IPA de Tag After School.
  6. -
  7. Toque en él e instalarlo.
  8. -
  9. Iniciar el juego y disfrutar.
  10. -
-

Para dispositivos PC

-
    -
  1. Ir a [este enlace] y descargar Tag After School archivo EXE.
  2. -
  3. Ir a la configuración de su PC y permitir Tag After School archivo EXE.
  4. -
  5. Vaya a su carpeta de descargas y busque el archivo EXE de Tag After School.
  6. -
  7. Haga doble clic en él e instalarlo.
  8. -
  9. Iniciar el juego y disfrutar.
  10. -
-

Conclusión

-

Tag After School es un juego que te hará sentir

Tag After School es un juego que te hará sentir nostálgico, asustado y emocionado al mismo tiempo. Es un juego que combina la diversión de la etiqueta con la emoción del horror y el misterio. Es un juego que te permite crear tu propia historia y destino. Es un juego que deberías descargar y jugar ahora mismo.

-

Si quieres tener la mejor experiencia de juego, usted debe descargar Etiqueta Después de la Escuela APK Mod, una versión modificada del juego que le dará características y beneficios ilimitados. Puedes desbloquear todos los personajes, potenciadores, niveles y escenarios sin problemas. También puedes disfrutar de gráficos y efectos de sonido de alta calidad que te sumergirán en el juego. También puedes desafiarte a ti mismo y crear tus propias historias personalizando el juego según tus preferencias.

- -

Tag After School es un juego que no debes perderte. Es un juego que te mantendrá entretenido durante horas. Es un juego que te hará sentir emociones diferentes. Es un juego que te hará querer jugar más. Entonces, ¿qué estás esperando? Descargar etiqueta después de la escuela APK Mod ahora y divertirse!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre la etiqueta después de la escuela APK Mod:

-

¿Es seguro descargar e instalar Tag After School APK Mod?

-

Sí, Etiqueta después de la escuela APK Mod es seguro para descargar e instalar. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. También es compatible con la mayoría de los dispositivos y sistemas operativos.

-

¿Es Tag After School APK Mod gratis para descargar y jugar?

-

Sí, Etiqueta Después de la Escuela APK Mod es gratis para descargar y jugar. Usted no necesita pagar ningún dinero o cuotas para acceder al juego o sus características. También puedes jugar el juego offline o online sin ningún cargo.

-

¿Cómo puedo actualizar la etiqueta después de la escuela APK Mod?

-

Puede actualizar la etiqueta después de la escuela APK Mod visitando [este enlace] y descargar la última versión del juego. A continuación, puede instalarlo sobre la versión existente del juego sin perder ningún dato o progreso.

-

¿Cómo puedo contactar a los desarrolladores de Tag After School APK Mod?

-

Puede ponerse en contacto con los desarrolladores de Tag After School APK Mod visitando su sitio web oficial o páginas de medios sociales. También puede enviarles un correo electrónico o dejar un comentario en su blog o foro. Siempre están felices de escuchar a sus fans y clientes.

-

¿Cómo puedo compartir mis comentarios o sugerencias sobre Tag After School APK Mod?

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Bianca0930/Bianca/README.md b/spaces/Bianca0930/Bianca/README.md deleted file mode 100644 index 4dfe4aff591ac83d79ccd2084bcccc6d12610b46..0000000000000000000000000000000000000000 --- a/spaces/Bianca0930/Bianca/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bianca -emoji: 🌍 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Boadiwaa/Recipes/openai/api_resources/embedding.py b/spaces/Boadiwaa/Recipes/openai/api_resources/embedding.py deleted file mode 100644 index fd9c7ac27d2f150228aa5de03ee02cf3413a918a..0000000000000000000000000000000000000000 --- a/spaces/Boadiwaa/Recipes/openai/api_resources/embedding.py +++ /dev/null @@ -1,58 +0,0 @@ -import base64 -import time - -import numpy as np - -from openai import util -from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource -from openai.api_resources.abstract.engine_api_resource import EngineAPIResource -from openai.error import InvalidRequestError, TryAgain - - -class Embedding(EngineAPIResource, ListableAPIResource, DeletableAPIResource): - engine_required = False - OBJECT_NAME = "embeddings" - - @classmethod - def create(cls, *args, **kwargs): - """ - Creates a new embedding for the provided input and parameters. - - See https://beta.openai.com/docs/api-reference/embeddings for a list - of valid parameters. - """ - start = time.time() - timeout = kwargs.pop("timeout", None) - if kwargs.get("model", None) is None and kwargs.get("engine", None) is None: - raise InvalidRequestError( - "Must provide an 'engine' or 'model' parameter to create an Embedding.", - param="engine", - ) - - user_provided_encoding_format = kwargs.get("encoding_format", None) - - # If encoding format was not explicitly specified, we opaquely use base64 for performance - if not user_provided_encoding_format: - kwargs["encoding_format"] = "base64" - - while True: - try: - response = super().create(*args, **kwargs) - - # If a user specifies base64, we'll just return the encoded string. - # This is only for the default case. - if not user_provided_encoding_format: - for data in response.data: - - # If an engine isn't using this optimization, don't do anything - if type(data["embedding"]) == str: - data["embedding"] = np.frombuffer( - base64.b64decode(data["embedding"]), dtype="float32" - ).tolist() - - return response - except TryAgain as e: - if timeout is not None and time.time() > start + timeout: - raise - - util.log_info("Waiting for model to warm up", error=e) diff --git a/spaces/CNXT/PiX2TXT/README.md b/spaces/CNXT/PiX2TXT/README.md deleted file mode 100644 index 9a53792eccdbd14508f2f5deb189efbd1c5f7484..0000000000000000000000000000000000000000 --- a/spaces/CNXT/PiX2TXT/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: '#://CNXT ❌ #://PiX2TXT' -emoji: 👁 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/CVH-vn1210/make_hair/app.py b/spaces/CVH-vn1210/make_hair/app.py deleted file mode 100644 index 9bfc258e0ebcb1ee7af7fd345a3717eb9a820488..0000000000000000000000000000000000000000 --- a/spaces/CVH-vn1210/make_hair/app.py +++ /dev/null @@ -1,158 +0,0 @@ -import argparse -import os -import random - -import numpy as np -import torch -import torch.backends.cudnn as cudnn -import gradio as gr - -import huggingface_hub as hf - -from minigpt4.common.config import Config -from minigpt4.common.dist_utils import get_rank -from minigpt4.common.registry import registry -from minigpt4.conversation.conversation import Chat, CONV_VISION - -# imports modules for registration -from minigpt4.datasets.builders import * -from minigpt4.models import * -from minigpt4.processors import * -from minigpt4.runners import * -from minigpt4.tasks import * - -hf.login(token=os.environ['model_token']) - -def parse_args(): - parser = argparse.ArgumentParser(description="Demo") - parser.add_argument("--cfg-path", type=str, default='eval_configs/minigpt4.yaml', help="path to configuration file.") - parser.add_argument( - "--options", - nargs="+", - help="override some settings in the used config, the key-value pair " - "in xxx=yyy format will be merged into config file (deprecate), " - "change to --cfg-options instead.", - ) - args = parser.parse_args() - return args - - -def setup_seeds(config): - seed = config.run_cfg.seed + get_rank() - - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - - cudnn.benchmark = False - cudnn.deterministic = True - -# ======================================== -# Model Initialization -# ======================================== - -SHARED_UI_WARNING = f'''### [NOTE] It is possible that you are waiting in a lengthy queue. - -You can duplicate and use it with a paid private GPU. - -Duplicate Space - -Alternatively, you can also use the demo on our [project page](https://minigpt-4.github.io). -''' - -print('Initializing Chat') -cfg = Config(parse_args()) - -model_config = cfg.model_cfg -model_cls = registry.get_model_class(model_config.arch) -model = model_cls.from_config(model_config).to('cuda:0') - -vis_processor_cfg = cfg.datasets_cfg.cc_align.vis_processor.train -vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) -chat = Chat(model, vis_processor) -print('Initialization Finished') - -# ======================================== -# Gradio Setting -# ======================================== - -def gradio_reset(chat_state, img_list): - if chat_state is not None: - chat_state.messages = [] - if img_list is not None: - img_list = [] - return None, gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your image first', interactive=False), gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list - -def upload_img(gr_img, text_input, chat_state): - if gr_img is None: - return None, None, gr.update(interactive=True), chat_state, None - chat_state = CONV_VISION.copy() - img_list = [] - llm_message = chat.upload_img(gr_img, chat_state, img_list) - return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list - -def gradio_ask(user_message, chatbot, chat_state): - if len(user_message) == 0: - return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state - chat.ask(user_message, chat_state) - chatbot = chatbot + [[user_message, None]] - return '', chatbot, chat_state - - -def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature): - llm_message = chat.answer(conv=chat_state, img_list=img_list, max_new_tokens=300, num_beams=1, temperature=temperature, max_length=2000)[0] - chatbot[-1][1] = llm_message - return chatbot, chat_state, img_list - -title = """

Demo of MiniGPT-4

""" -description = """

This is the demo of MiniGPT-4. Upload your images and start chatting!

""" -article = """
-""" - -#TODO show examples below - -with gr.Blocks() as demo: - gr.Markdown(title) - gr.Markdown(SHARED_UI_WARNING) - gr.Markdown(description) - gr.Markdown(article) - - with gr.Row(): - with gr.Column(scale=0.5): - image = gr.Image(type="pil") - upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary") - clear = gr.Button("Restart") - - num_beams = gr.Slider( - minimum=1, - maximum=5, - value=1, - step=1, - interactive=True, - label="beam search numbers)", - ) - - temperature = gr.Slider( - minimum=0.1, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="Temperature", - ) - - - with gr.Column(): - chat_state = gr.State() - img_list = gr.State() - chatbot = gr.Chatbot(label='MiniGPT-4') - text_input = gr.Textbox(label='User', placeholder='Please upload your image first', interactive=False) - - upload_button.click(upload_img, [image, text_input, chat_state], [image, text_input, upload_button, chat_state, img_list]) - - text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then( - gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list] - ) - clear.click(gradio_reset, [chat_state, img_list], [chatbot, image, text_input, upload_button, chat_state, img_list], queue=False) - -demo.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/detection_features_converter.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/detection_features_converter.py deleted file mode 100644 index 67923704c1183e2917a1f18cc675e6f1d1b05bf1..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/detection_features_converter.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -Reads in a tsv file with pre-trained bottom up attention features and -stores it in HDF5 format. Also store {image_id: feature_idx} - as a pickle file. - -Hierarchy of HDF5 file: - -{ 'image_features': num_images x num_boxes x 2048 array of features - 'image_bb': num_images x num_boxes x 4 array of bounding boxes } -""" -from __future__ import print_function - -import os -import sys -import argparse -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -import base64 -import csv -import h5py -# import cPickle -import _pickle as cPickle -import numpy as np -import utils -import tqdm - -csv.field_size_limit(sys.maxsize) -FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features'] - - -def detection_features_converter(dataroot, ver, detector, feature_length, num_fixed_boxes): - infile = os.path.join(dataroot, ver, "trainval_%s_%i.tsv"%(detector, num_fixed_boxes)) - - train_data_file = os.path.join(dataroot, ver, 'train_%s_%i.hdf5'%(detector, num_fixed_boxes)) - val_data_file = os.path.join(dataroot, ver, 'val_%s_%i.hdf5'%(detector, num_fixed_boxes)) - train_indices_file = os.path.join(dataroot, ver, 'train_%s_%i_imgid2idx.pkl'%(detector, num_fixed_boxes)) - val_indices_file = os.path.join(dataroot, ver, 'val_%s_%i_imgid2idx.pkl'%(detector, num_fixed_boxes)) - train_ids_file = os.path.join(dataroot, 'train_ids.pkl') - val_ids_file = os.path.join(dataroot, 'val_ids.pkl') - - h_train = h5py.File(train_data_file, "w") - h_val = h5py.File(val_data_file, "w") - - if os.path.exists(train_ids_file) and os.path.exists(val_ids_file): - train_imgids = cPickle.load(open(train_ids_file, 'rb')) - val_imgids = cPickle.load(open(val_ids_file, 'rb')) - else: - train_imgids = utils.load_imageid(os.path.join(dataroot, 'clean', 'train2014')) - val_imgids = utils.load_imageid(os.path.join(dataroot, 'clean', 'val2014')) - cPickle.dump(train_imgids, open(train_ids_file, 'wb')) - cPickle.dump(val_imgids, open(val_ids_file, 'wb')) - - train_indices = {} - val_indices = {} - - train_img_features = h_train.create_dataset( - 'image_features', (len(train_imgids), num_fixed_boxes, feature_length), 'f') - train_img_bb = h_train.create_dataset( - 'image_bb', (len(train_imgids), num_fixed_boxes, 4), 'f') - train_spatial_img_features = h_train.create_dataset( - 'spatial_features', (len(train_imgids), num_fixed_boxes, 6), 'f') - - val_img_bb = h_val.create_dataset( - 'image_bb', (len(val_imgids), num_fixed_boxes, 4), 'f') - val_img_features = h_val.create_dataset( - 'image_features', (len(val_imgids), num_fixed_boxes, feature_length), 'f') - val_spatial_img_features = h_val.create_dataset( - 'spatial_features', (len(val_imgids), num_fixed_boxes, 6), 'f') - - train_counter = 0 - val_counter = 0 - - print("reading tsv...") - # with open(infile, "r+b") as tsv_in_file: - with open(infile, "r") as tsv_in_file: - reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES) - for item in tqdm.tqdm(reader): - item['num_boxes'] = int(item['num_boxes']) - image_id = int(item['image_id']) - image_w = float(item['image_w']) - image_h = float(item['image_h']) - # bboxes = np.frombuffer( - # base64.decodestring(item['boxes']), - # dtype=np.float32).reshape((item['num_boxes'], -1)) - bboxes = np.frombuffer( - base64.b64decode(item['boxes']), - dtype=np.float32).reshape((item['num_boxes'], -1)) - box_width = bboxes[:, 2] - bboxes[:, 0] - box_height = bboxes[:, 3] - bboxes[:, 1] - scaled_width = box_width / image_w - scaled_height = box_height / image_h - scaled_x = bboxes[:, 0] / image_w - scaled_y = bboxes[:, 1] / image_h - - box_width = box_width[..., np.newaxis] - box_height = box_height[..., np.newaxis] - scaled_width = scaled_width[..., np.newaxis] - scaled_height = scaled_height[..., np.newaxis] - scaled_x = scaled_x[..., np.newaxis] - scaled_y = scaled_y[..., np.newaxis] - - spatial_features = np.concatenate( - (scaled_x, - scaled_y, - scaled_x + scaled_width, - scaled_y + scaled_height, - scaled_width, - scaled_height), - axis=1) - - if image_id in train_imgids: - train_imgids.remove(image_id) - train_indices[image_id] = train_counter - train_img_bb[train_counter, :, :] = bboxes - # train_img_features[train_counter, :, :] = np.frombuffer( - # base64.decodestring(item['features']), - # dtype=np.float32).reshape((item['num_boxes'], -1)) - train_img_features[train_counter, :, :] = np.frombuffer( - base64.b64decode(item['features']), - dtype=np.float32).reshape((item['num_boxes'], -1)) - train_spatial_img_features[train_counter, :, :] = spatial_features - train_counter += 1 - elif image_id in val_imgids: - val_imgids.remove(image_id) - val_indices[image_id] = val_counter - val_img_bb[val_counter, :, :] = bboxes - # val_img_features[val_counter, :, :] = np.frombuffer( - # base64.decodestring(item['features']), - # dtype=np.float32).reshape((item['num_boxes'], -1)) - val_img_features[val_counter, :, :] = np.frombuffer( - base64.b64decode(item['features']), - dtype=np.float32).reshape((item['num_boxes'], -1)) - val_spatial_img_features[val_counter, :, :] = spatial_features - val_counter += 1 - else: - assert False, 'Unknown image id: %d' % image_id - - if len(train_imgids) != 0: - print('Warning: train_image_ids is not empty') - - if len(val_imgids) != 0: - print('Warning: val_image_ids is not empty') - - cPickle.dump(train_indices, open(train_indices_file, 'wb')) - cPickle.dump(val_indices, open(val_indices_file, 'wb')) - # pickle.dump(train_indices, open(train_indices_file, 'w')) - # pickle.dump(val_indices, open(val_indices_file, 'w')) - h_train.close() - h_val.close() - print("done!") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--dataroot', type=str, default='../data/') - parser.add_argument('--ver', type=str, default='clean', help='version of the VQAv2 dataset to process. "clean" for the original data. default: clean') - parser.add_argument('--detector', type=str, default='R-50') - parser.add_argument('--feat', type=int, default=1024, help='feature size') - parser.add_argument('--nb', type=int, default=36) - args = parser.parse_args() - detection_features_converter(args.dataroot, args.ver, args.detector, args.feat, args.nb) diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/count.h b/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/count.h deleted file mode 100644 index fde1728b77261d75c561b9042ec365281d78cee9..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/count.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits count -#include - diff --git a/spaces/CVPR/regionclip-demo/install.sh b/spaces/CVPR/regionclip-demo/install.sh deleted file mode 100644 index 43abad3a5b87ff72b771bbbd07c5c68c3c427c7d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/install.sh +++ /dev/null @@ -1,14 +0,0 @@ -pip install dataclasses ftfy regex tqdm timm diffdist spacy -pip install git+https://github.com/lvis-dataset/lvis-api.git -python -m spacy download en_core_web_sm - -CURRENT_DIR=${PWD##*/} -cd ../ -python -m pip install -e $CURRENT_DIR -cd $CURRENT_DIR - -# for tsv loading -pip install Pillow==7.1.2 - -ln -s /mnt/data_storage/coco datasets/coco -ln -s /mnt/data_storage/lvis datasets/lvis \ No newline at end of file diff --git a/spaces/CofAI/CurrencyConverter/README.md b/spaces/CofAI/CurrencyConverter/README.md deleted file mode 100644 index 43919917a03f71fea933b1adb3b32480018ce4b3..0000000000000000000000000000000000000000 --- a/spaces/CofAI/CurrencyConverter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CurrencyConverter -emoji: 💱☕💱 -colorFrom: gray -colorTo: red -sdk: static -pinned: false ---- - -This is Super Currency Converter 💱 by CofAI ☕! - -Это супер конвертер валют 💱 от CofAI ☕! \ No newline at end of file diff --git a/spaces/DEEMOSTECH/ChatAvatar/static/js/main.6015292d.js b/spaces/DEEMOSTECH/ChatAvatar/static/js/main.6015292d.js deleted file mode 100644 index 3233933d1b2f1e5f237934b88b15c83cbdb60095..0000000000000000000000000000000000000000 --- a/spaces/DEEMOSTECH/ChatAvatar/static/js/main.6015292d.js +++ /dev/null @@ -1,3 +0,0 @@ -/*! For license information please see main.6015292d.js.LICENSE.txt */ -!function(){var e={498:function(e){e.exports=function(){"use strict";var e=function(t,n){return e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])},e(t,n)};function t(t,n){if("function"!==typeof n&&null!==n)throw new TypeError("Class extends value "+String(n)+" is not a constructor or null");function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}var n=function(){return n=Object.assign||function(e){for(var t,n=1,r=arguments.length;n0&&i[i.length-1])&&(6===A[0]||2===A[0])){a=0;continue}if(3===A[0]&&(!i||A[1]>i[0]&&A[1]=55296&&i<=56319&&n>10),a%1024+56320)),(i+1===n||r.length>16384)&&(A+=String.fromCharCode.apply(String,r),r.length=0)}return A},c="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",d="undefined"===typeof Uint8Array?[]:new Uint8Array(256),h=0;h>4,u[s++]=(15&r)<<4|i>>2,u[s++]=(3&i)<<6|63&A;return l},v=function(e){for(var t=e.length,n=[],r=0;r>w,x=(1<>w)+32,S=65536>>B,E=(1<=0){if(e<55296||e>56319&&e<=65535)return t=((t=this.index[e>>w])<<_)+(e&x),this.data[t];if(e<=65535)return t=((t=this.index[b+(e-55296>>w)])<<_)+(e&x),this.data[t];if(e>B),t=this.index[t],t+=e>>w&E,t=((t=this.index[t])<<_)+(e&x),this.data[t];if(e<=1114111)return this.data[this.highValueIndex]}return this.errorValue},e}(),k="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",Q="undefined"===typeof Uint8Array?[]:new Uint8Array(256),L=0;LD?(i.push(!0),a-=D):i.push(!1),-1!==["normal","auto","loose"].indexOf(t)&&-1!==[8208,8211,12316,12448].indexOf(e))return r.push(A),n.push(Y);if(a===P||a===K){if(0===A)return r.push(A),n.push(ue);var o=n[A-1];return-1===Qe.indexOf(o)?(r.push(r[A-1]),n.push(o)):(r.push(A),n.push(ue))}return r.push(A),a===ce?n.push("strict"===t?te:me):a===_e||a===le?n.push(ue):a===be?e>=131072&&e<=196605||e>=196608&&e<=262141?n.push(me):n.push(ue):void n.push(a)})),[r,n,i]},Re=function(e,t,n,r){var i=r[n];if(Array.isArray(e)?-1!==e.indexOf(i):e===i)for(var A=n;A<=r.length;){if((s=r[++A])===t)return!0;if(s!==G)break}if(i===G)for(A=n;A>0;){var a=r[--A];if(Array.isArray(e)?-1!==e.indexOf(a):e===a)for(var o=n;o<=r.length;){var s;if((s=r[++o])===t)return!0;if(s!==G)break}if(a!==G)break}return!1},He=function(e,t){for(var n=e;n>=0;){var r=t[n];if(r!==G)return r;n--}return 0},Pe=function(e,t,n,r,i){if(0===n[r])return Se;var A=r-1;if(Array.isArray(i)&&!0===i[A])return Se;var a=A-1,o=A+1,s=t[A],l=a>=0?t[a]:0,u=t[o];if(s===R&&u===H)return Se;if(-1!==Fe.indexOf(s))return Ce;if(-1!==Fe.indexOf(u))return Se;if(-1!==Te.indexOf(u))return Se;if(He(A,t)===V)return Ee;if(Ue.get(e[A])===K)return Se;if((s===de||s===he)&&Ue.get(e[o])===K)return Se;if(s===O||u===O)return Se;if(s===z)return Se;if(-1===[G,j,q].indexOf(s)&&u===z)return Se;if(-1!==[J,Z,$,ie,se].indexOf(u))return Se;if(He(A,t)===ne)return Se;if(Re(re,ne,A,t))return Se;if(Re([J,Z],te,A,t))return Se;if(Re(W,W,A,t))return Se;if(s===G)return Ee;if(s===re||u===re)return Se;if(u===Y||s===Y)return Ee;if(-1!==[j,q,te].indexOf(u)||s===X)return Se;if(l===ge&&-1!==De.indexOf(s))return Se;if(s===se&&u===ge)return Se;if(u===ee)return Se;if(-1!==Me.indexOf(u)&&s===Ae||-1!==Me.indexOf(s)&&u===Ae)return Se;if(s===oe&&-1!==[me,de,he].indexOf(u)||-1!==[me,de,he].indexOf(s)&&u===ae)return Se;if(-1!==Me.indexOf(s)&&-1!==ke.indexOf(u)||-1!==ke.indexOf(s)&&-1!==Me.indexOf(u))return Se;if(-1!==[oe,ae].indexOf(s)&&(u===Ae||-1!==[ne,q].indexOf(u)&&t[o+1]===Ae)||-1!==[ne,q].indexOf(s)&&u===Ae||s===Ae&&-1!==[Ae,se,ie].indexOf(u))return Se;if(-1!==[Ae,se,ie,J,Z].indexOf(u))for(var c=A;c>=0;){if((d=t[c])===Ae)return Se;if(-1===[se,ie].indexOf(d))break;c--}if(-1!==[oe,ae].indexOf(u))for(c=-1!==[J,Z].indexOf(s)?a:A;c>=0;){var d;if((d=t[c])===Ae)return Se;if(-1===[se,ie].indexOf(d))break;c--}if(ve===s&&-1!==[ve,ye,fe,pe].indexOf(u)||-1!==[ye,fe].indexOf(s)&&-1!==[ye,we].indexOf(u)||-1!==[we,pe].indexOf(s)&&u===we)return Se;if(-1!==Le.indexOf(s)&&-1!==[ee,ae].indexOf(u)||-1!==Le.indexOf(u)&&s===oe)return Se;if(-1!==Me.indexOf(s)&&-1!==Me.indexOf(u))return Se;if(s===ie&&-1!==Me.indexOf(u))return Se;if(-1!==Me.concat(Ae).indexOf(s)&&u===ne&&-1===xe.indexOf(e[o])||-1!==Me.concat(Ae).indexOf(u)&&s===Z)return Se;if(s===Be&&u===Be){for(var h=n[A],f=1;h>0&&t[--h]===Be;)f++;if(f%2!==0)return Se}return s===de&&u===he?Se:Ee},Ne=function(e,t){t||(t={lineBreak:"normal",wordBreak:"normal"});var n=Ie(e,t.lineBreak),r=n[0],i=n[1],A=n[2];"break-all"!==t.wordBreak&&"break-word"!==t.wordBreak||(i=i.map((function(e){return-1!==[Ae,ue,_e].indexOf(e)?me:e})));var a="keep-all"===t.wordBreak?A.map((function(t,n){return t&&e[n]>=19968&&e[n]<=40959})):void 0;return[r,i,a]},Oe=function(){function e(e,t,n,r){this.codePoints=e,this.required=t===Ce,this.start=n,this.end=r}return e.prototype.slice=function(){return u.apply(void 0,this.codePoints.slice(this.start,this.end))},e}(),Ve=function(e,t){var n=l(e),r=Ne(n,t),i=r[0],A=r[1],a=r[2],o=n.length,s=0,u=0;return{next:function(){if(u>=o)return{done:!0,value:null};for(var e=Se;u=Dt&&e<=57},jt=function(e){return e>=55296&&e<=57343},Xt=function(e){return Wt(e)||e>=Ot&&e<=zt||e>=It&&e<=Ht},qt=function(e){return e>=It&&e<=Nt},Yt=function(e){return e>=Ot&&e<=Kt},Jt=function(e){return qt(e)||Yt(e)},Zt=function(e){return e>=wt},$t=function(e){return e===je||e===Ye||e===Je},en=function(e){return Jt(e)||Zt(e)||e===at},tn=function(e){return en(e)||Wt(e)||e===ot},nn=function(e){return e>=Ut&&e<=Mt||e===Ft||e>=Tt&&e<=kt||e===Qt},rn=function(e,t){return e===qe&&t!==je},An=function(e,t,n){return e===ot?en(t)||rn(t,n):!!en(e)||!(e!==qe||!rn(e,t))},an=function(e,t,n){return e===bt||e===ot?!!Wt(t)||t===Et&&Wt(n):Wt(e===Et?t:e)},on=function(e){var t=0,n=1;e[t]!==bt&&e[t]!==ot||(e[t]===ot&&(n=-1),t++);for(var r=[];Wt(e[t]);)r.push(e[t++]);var i=r.length?parseInt(u.apply(void 0,r),10):0;e[t]===Et&&t++;for(var A=[];Wt(e[t]);)A.push(e[t++]);var a=A.length,o=a?parseInt(u.apply(void 0,A),10):0;e[t]!==Vt&&e[t]!==Rt||t++;var s=1;e[t]!==bt&&e[t]!==ot||(e[t]===ot&&(s=-1),t++);for(var l=[];Wt(e[t]);)l.push(e[t++]);var c=l.length?parseInt(u.apply(void 0,l),10):0;return n*(i+o*Math.pow(10,-a))*Math.pow(10,s*c)},sn={type:2},ln={type:3},un={type:4},cn={type:13},dn={type:8},hn={type:21},fn={type:9},pn={type:10},gn={type:11},mn={type:12},vn={type:14},yn={type:23},wn={type:1},Bn={type:25},_n={type:24},bn={type:26},xn={type:27},Cn={type:28},Sn={type:29},En={type:31},Un={type:32},Mn=function(){function e(){this._value=[]}return e.prototype.write=function(e){this._value=this._value.concat(l(e))},e.prototype.read=function(){for(var e=[],t=this.consumeToken();t!==Un;)e.push(t),t=this.consumeToken();return e},e.prototype.consumeToken=function(){var e=this.consumeCodePoint();switch(e){case Ze:return this.consumeStringToken(Ze);case et:var t=this.peekCodePoint(0),n=this.peekCodePoint(1),r=this.peekCodePoint(2);if(tn(t)||rn(n,r)){var i=An(t,n,r)?Ge:ze;return{type:5,value:this.consumeName(),flags:i}}break;case tt:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),cn;break;case rt:return this.consumeStringToken(rt);case it:return sn;case At:return ln;case _t:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),vn;break;case bt:if(an(e,this.peekCodePoint(0),this.peekCodePoint(1)))return this.reconsumeCodePoint(e),this.consumeNumericToken();break;case xt:return un;case ot:var A=e,a=this.peekCodePoint(0),o=this.peekCodePoint(1);if(an(A,a,o))return this.reconsumeCodePoint(e),this.consumeNumericToken();if(An(A,a,o))return this.reconsumeCodePoint(e),this.consumeIdentLikeToken();if(a===ot&&o===ut)return this.consumeCodePoint(),this.consumeCodePoint(),_n;break;case Et:if(an(e,this.peekCodePoint(0),this.peekCodePoint(1)))return this.reconsumeCodePoint(e),this.consumeNumericToken();break;case Xe:if(this.peekCodePoint(0)===_t)for(this.consumeCodePoint();;){var s=this.consumeCodePoint();if(s===_t&&(s=this.consumeCodePoint())===Xe)return this.consumeToken();if(s===Lt)return this.consumeToken()}break;case Ct:return bn;case St:return xn;case lt:if(this.peekCodePoint(0)===st&&this.peekCodePoint(1)===ot&&this.peekCodePoint(2)===ot)return this.consumeCodePoint(),this.consumeCodePoint(),Bn;break;case ct:var l=this.peekCodePoint(0),c=this.peekCodePoint(1),d=this.peekCodePoint(2);if(An(l,c,d))return{type:7,value:this.consumeName()};break;case dt:return Cn;case qe:if(rn(e,this.peekCodePoint(0)))return this.reconsumeCodePoint(e),this.consumeIdentLikeToken();break;case ht:return Sn;case ft:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),dn;break;case pt:return gn;case mt:return mn;case Pt:case Gt:var h=this.peekCodePoint(0),f=this.peekCodePoint(1);return h!==bt||!Xt(f)&&f!==gt||(this.consumeCodePoint(),this.consumeUnicodeRangeToken()),this.reconsumeCodePoint(e),this.consumeIdentLikeToken();case vt:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),fn;if(this.peekCodePoint(0)===vt)return this.consumeCodePoint(),hn;break;case yt:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),pn;break;case Lt:return Un}return $t(e)?(this.consumeWhiteSpace(),En):Wt(e)?(this.reconsumeCodePoint(e),this.consumeNumericToken()):en(e)?(this.reconsumeCodePoint(e),this.consumeIdentLikeToken()):{type:6,value:u(e)}},e.prototype.consumeCodePoint=function(){var e=this._value.shift();return"undefined"===typeof e?-1:e},e.prototype.reconsumeCodePoint=function(e){this._value.unshift(e)},e.prototype.peekCodePoint=function(e){return e>=this._value.length?-1:this._value[e]},e.prototype.consumeUnicodeRangeToken=function(){for(var e=[],t=this.consumeCodePoint();Xt(t)&&e.length<6;)e.push(t),t=this.consumeCodePoint();for(var n=!1;t===gt&&e.length<6;)e.push(t),t=this.consumeCodePoint(),n=!0;if(n)return{type:30,start:parseInt(u.apply(void 0,e.map((function(e){return e===gt?Dt:e}))),16),end:parseInt(u.apply(void 0,e.map((function(e){return e===gt?zt:e}))),16)};var r=parseInt(u.apply(void 0,e),16);if(this.peekCodePoint(0)===ot&&Xt(this.peekCodePoint(1))){this.consumeCodePoint(),t=this.consumeCodePoint();for(var i=[];Xt(t)&&i.length<6;)i.push(t),t=this.consumeCodePoint();return{type:30,start:r,end:parseInt(u.apply(void 0,i),16)}}return{type:30,start:r,end:r}},e.prototype.consumeIdentLikeToken=function(){var e=this.consumeName();return"url"===e.toLowerCase()&&this.peekCodePoint(0)===it?(this.consumeCodePoint(),this.consumeUrlToken()):this.peekCodePoint(0)===it?(this.consumeCodePoint(),{type:19,value:e}):{type:20,value:e}},e.prototype.consumeUrlToken=function(){var e=[];if(this.consumeWhiteSpace(),this.peekCodePoint(0)===Lt)return{type:22,value:""};var t=this.peekCodePoint(0);if(t===rt||t===Ze){var n=this.consumeStringToken(this.consumeCodePoint());return 0===n.type&&(this.consumeWhiteSpace(),this.peekCodePoint(0)===Lt||this.peekCodePoint(0)===At)?(this.consumeCodePoint(),{type:22,value:n.value}):(this.consumeBadUrlRemnants(),yn)}for(;;){var r=this.consumeCodePoint();if(r===Lt||r===At)return{type:22,value:u.apply(void 0,e)};if($t(r))return this.consumeWhiteSpace(),this.peekCodePoint(0)===Lt||this.peekCodePoint(0)===At?(this.consumeCodePoint(),{type:22,value:u.apply(void 0,e)}):(this.consumeBadUrlRemnants(),yn);if(r===Ze||r===rt||r===it||nn(r))return this.consumeBadUrlRemnants(),yn;if(r===qe){if(!rn(r,this.peekCodePoint(0)))return this.consumeBadUrlRemnants(),yn;e.push(this.consumeEscapedCodePoint())}else e.push(r)}},e.prototype.consumeWhiteSpace=function(){for(;$t(this.peekCodePoint(0));)this.consumeCodePoint()},e.prototype.consumeBadUrlRemnants=function(){for(;;){var e=this.consumeCodePoint();if(e===At||e===Lt)return;rn(e,this.peekCodePoint(0))&&this.consumeEscapedCodePoint()}},e.prototype.consumeStringSlice=function(e){for(var t=5e4,n="";e>0;){var r=Math.min(t,e);n+=u.apply(void 0,this._value.splice(0,r)),e-=r}return this._value.shift(),n},e.prototype.consumeStringToken=function(e){for(var t="",n=0;;){var r=this._value[n];if(r===Lt||void 0===r||r===e)return{type:0,value:t+=this.consumeStringSlice(n)};if(r===je)return this._value.splice(0,n),wn;if(r===qe){var i=this._value[n+1];i!==Lt&&void 0!==i&&(i===je?(t+=this.consumeStringSlice(n),n=-1,this._value.shift()):rn(r,i)&&(t+=this.consumeStringSlice(n),t+=u(this.consumeEscapedCodePoint()),n=-1))}n++}},e.prototype.consumeNumber=function(){var e=[],t=Ke,n=this.peekCodePoint(0);for(n!==bt&&n!==ot||e.push(this.consumeCodePoint());Wt(this.peekCodePoint(0));)e.push(this.consumeCodePoint());n=this.peekCodePoint(0);var r=this.peekCodePoint(1);if(n===Et&&Wt(r))for(e.push(this.consumeCodePoint(),this.consumeCodePoint()),t=We;Wt(this.peekCodePoint(0));)e.push(this.consumeCodePoint());n=this.peekCodePoint(0),r=this.peekCodePoint(1);var i=this.peekCodePoint(2);if((n===Vt||n===Rt)&&((r===bt||r===ot)&&Wt(i)||Wt(r)))for(e.push(this.consumeCodePoint(),this.consumeCodePoint()),t=We;Wt(this.peekCodePoint(0));)e.push(this.consumeCodePoint());return[on(e),t]},e.prototype.consumeNumericToken=function(){var e=this.consumeNumber(),t=e[0],n=e[1],r=this.peekCodePoint(0),i=this.peekCodePoint(1),A=this.peekCodePoint(2);return An(r,i,A)?{type:15,number:t,flags:n,unit:this.consumeName()}:r===nt?(this.consumeCodePoint(),{type:16,number:t,flags:n}):{type:17,number:t,flags:n}},e.prototype.consumeEscapedCodePoint=function(){var e=this.consumeCodePoint();if(Xt(e)){for(var t=u(e);Xt(this.peekCodePoint(0))&&t.length<6;)t+=u(this.consumeCodePoint());$t(this.peekCodePoint(0))&&this.consumeCodePoint();var n=parseInt(t,16);return 0===n||jt(n)||n>1114111?Bt:n}return e===Lt?Bt:e},e.prototype.consumeName=function(){for(var e="";;){var t=this.consumeCodePoint();if(tn(t))e+=u(t);else{if(!rn(t,this.peekCodePoint(0)))return this.reconsumeCodePoint(t),e;e+=u(this.consumeEscapedCodePoint())}}},e}(),Fn=function(){function e(e){this._tokens=e}return e.create=function(t){var n=new Mn;return n.write(t),new e(n.read())},e.parseValue=function(t){return e.create(t).parseComponentValue()},e.parseValues=function(t){return e.create(t).parseComponentValues()},e.prototype.parseComponentValue=function(){for(var e=this.consumeToken();31===e.type;)e=this.consumeToken();if(32===e.type)throw new SyntaxError("Error parsing CSS component value, unexpected EOF");this.reconsumeToken(e);var t=this.consumeComponentValue();do{e=this.consumeToken()}while(31===e.type);if(32===e.type)return t;throw new SyntaxError("Error parsing CSS component value, multiple values found when expecting only one")},e.prototype.parseComponentValues=function(){for(var e=[];;){var t=this.consumeComponentValue();if(32===t.type)return e;e.push(t),e.push()}},e.prototype.consumeComponentValue=function(){var e=this.consumeToken();switch(e.type){case 11:case 28:case 2:return this.consumeSimpleBlock(e.type);case 19:return this.consumeFunction(e)}return e},e.prototype.consumeSimpleBlock=function(e){for(var t={type:e,values:[]},n=this.consumeToken();;){if(32===n.type||Pn(n,e))return t;this.reconsumeToken(n),t.values.push(this.consumeComponentValue()),n=this.consumeToken()}},e.prototype.consumeFunction=function(e){for(var t={name:e.value,values:[],type:18};;){var n=this.consumeToken();if(32===n.type||3===n.type)return t;this.reconsumeToken(n),t.values.push(this.consumeComponentValue())}},e.prototype.consumeToken=function(){var e=this._tokens.shift();return"undefined"===typeof e?Un:e},e.prototype.reconsumeToken=function(e){this._tokens.unshift(e)},e}(),Tn=function(e){return 15===e.type},kn=function(e){return 17===e.type},Qn=function(e){return 20===e.type},Ln=function(e){return 0===e.type},Dn=function(e,t){return Qn(e)&&e.value===t},In=function(e){return 31!==e.type},Rn=function(e){return 31!==e.type&&4!==e.type},Hn=function(e){var t=[],n=[];return e.forEach((function(e){if(4===e.type){if(0===n.length)throw new Error("Error parsing function args, zero tokens for arg");return t.push(n),void(n=[])}31!==e.type&&n.push(e)})),n.length&&t.push(n),t},Pn=function(e,t){return 11===t&&12===e.type||28===t&&29===e.type||2===t&&3===e.type},Nn=function(e){return 17===e.type||15===e.type},On=function(e){return 16===e.type||Nn(e)},Vn=function(e){return e.length>1?[e[0],e[1]]:[e[0]]},zn={type:17,number:0,flags:Ke},Gn={type:16,number:50,flags:Ke},Kn={type:16,number:100,flags:Ke},Wn=function(e,t,n){var r=e[0],i=e[1];return[jn(r,t),jn("undefined"!==typeof i?i:r,n)]},jn=function(e,t){if(16===e.type)return e.number/100*t;if(Tn(e))switch(e.unit){case"rem":case"em":return 16*e.number;default:return e.number}return e.number},Xn="deg",qn="grad",Yn="rad",Jn="turn",Zn={name:"angle",parse:function(e,t){if(15===t.type)switch(t.unit){case Xn:return Math.PI*t.number/180;case qn:return Math.PI/200*t.number;case Yn:return t.number;case Jn:return 2*Math.PI*t.number}throw new Error("Unsupported angle type")}},$n=function(e){return 15===e.type&&(e.unit===Xn||e.unit===qn||e.unit===Yn||e.unit===Jn)},er=function(e){switch(e.filter(Qn).map((function(e){return e.value})).join(" ")){case"to bottom right":case"to right bottom":case"left top":case"top left":return[zn,zn];case"to top":case"bottom":return tr(0);case"to bottom left":case"to left bottom":case"right top":case"top right":return[zn,Kn];case"to right":case"left":return tr(90);case"to top left":case"to left top":case"right bottom":case"bottom right":return[Kn,Kn];case"to bottom":case"top":return tr(180);case"to top right":case"to right top":case"left bottom":case"bottom left":return[Kn,zn];case"to left":case"right":return tr(270)}return 0},tr=function(e){return Math.PI*e/180},nr={name:"color",parse:function(e,t){if(18===t.type){var n=ur[t.name];if("undefined"===typeof n)throw new Error('Attempting to parse an unsupported color function "'+t.name+'"');return n(e,t.values)}if(5===t.type){if(3===t.value.length){var r=t.value.substring(0,1),i=t.value.substring(1,2),A=t.value.substring(2,3);return Ar(parseInt(r+r,16),parseInt(i+i,16),parseInt(A+A,16),1)}if(4===t.value.length){r=t.value.substring(0,1),i=t.value.substring(1,2),A=t.value.substring(2,3);var a=t.value.substring(3,4);return Ar(parseInt(r+r,16),parseInt(i+i,16),parseInt(A+A,16),parseInt(a+a,16)/255)}if(6===t.value.length)return r=t.value.substring(0,2),i=t.value.substring(2,4),A=t.value.substring(4,6),Ar(parseInt(r,16),parseInt(i,16),parseInt(A,16),1);if(8===t.value.length)return r=t.value.substring(0,2),i=t.value.substring(2,4),A=t.value.substring(4,6),a=t.value.substring(6,8),Ar(parseInt(r,16),parseInt(i,16),parseInt(A,16),parseInt(a,16)/255)}if(20===t.type){var o=dr[t.value.toUpperCase()];if("undefined"!==typeof o)return o}return dr.TRANSPARENT}},rr=function(e){return 0===(255&e)},ir=function(e){var t=255&e,n=255&e>>8,r=255&e>>16,i=255&e>>24;return t<255?"rgba("+i+","+r+","+n+","+t/255+")":"rgb("+i+","+r+","+n+")"},Ar=function(e,t,n,r){return(e<<24|t<<16|n<<8|Math.round(255*r)<<0)>>>0},ar=function(e,t){if(17===e.type)return e.number;if(16===e.type){var n=3===t?1:255;return 3===t?e.number/100*n:Math.round(e.number/100*n)}return 0},or=function(e,t){var n=t.filter(Rn);if(3===n.length){var r=n.map(ar),i=r[0],A=r[1],a=r[2];return Ar(i,A,a,1)}if(4===n.length){var o=n.map(ar),s=(i=o[0],A=o[1],a=o[2],o[3]);return Ar(i,A,a,s)}return 0};function sr(e,t,n){return n<0&&(n+=1),n>=1&&(n-=1),n<1/6?(t-e)*n*6+e:n<.5?t:n<2/3?6*(t-e)*(2/3-n)+e:e}var lr=function(e,t){var n=t.filter(Rn),r=n[0],i=n[1],A=n[2],a=n[3],o=(17===r.type?tr(r.number):Zn.parse(e,r))/(2*Math.PI),s=On(i)?i.number/100:0,l=On(A)?A.number/100:0,u="undefined"!==typeof a&&On(a)?jn(a,1):1;if(0===s)return Ar(255*l,255*l,255*l,1);var c=l<=.5?l*(s+1):l+s-l*s,d=2*l-c,h=sr(d,c,o+1/3),f=sr(d,c,o),p=sr(d,c,o-1/3);return Ar(255*h,255*f,255*p,u)},ur={hsl:lr,hsla:lr,rgb:or,rgba:or},cr=function(e,t){return nr.parse(e,Fn.create(t).parseComponentValue())},dr={ALICEBLUE:4042850303,ANTIQUEWHITE:4209760255,AQUA:16777215,AQUAMARINE:2147472639,AZURE:4043309055,BEIGE:4126530815,BISQUE:4293182719,BLACK:255,BLANCHEDALMOND:4293643775,BLUE:65535,BLUEVIOLET:2318131967,BROWN:2771004159,BURLYWOOD:3736635391,CADETBLUE:1604231423,CHARTREUSE:2147418367,CHOCOLATE:3530104575,CORAL:4286533887,CORNFLOWERBLUE:1687547391,CORNSILK:4294499583,CRIMSON:3692313855,CYAN:16777215,DARKBLUE:35839,DARKCYAN:9145343,DARKGOLDENROD:3095837695,DARKGRAY:2846468607,DARKGREEN:6553855,DARKGREY:2846468607,DARKKHAKI:3182914559,DARKMAGENTA:2332068863,DARKOLIVEGREEN:1433087999,DARKORANGE:4287365375,DARKORCHID:2570243327,DARKRED:2332033279,DARKSALMON:3918953215,DARKSEAGREEN:2411499519,DARKSLATEBLUE:1211993087,DARKSLATEGRAY:793726975,DARKSLATEGREY:793726975,DARKTURQUOISE:13554175,DARKVIOLET:2483082239,DEEPPINK:4279538687,DEEPSKYBLUE:12582911,DIMGRAY:1768516095,DIMGREY:1768516095,DODGERBLUE:512819199,FIREBRICK:2988581631,FLORALWHITE:4294635775,FORESTGREEN:579543807,FUCHSIA:4278255615,GAINSBORO:3705462015,GHOSTWHITE:4177068031,GOLD:4292280575,GOLDENROD:3668254975,GRAY:2155905279,GREEN:8388863,GREENYELLOW:2919182335,GREY:2155905279,HONEYDEW:4043305215,HOTPINK:4285117695,INDIANRED:3445382399,INDIGO:1258324735,IVORY:4294963455,KHAKI:4041641215,LAVENDER:3873897215,LAVENDERBLUSH:4293981695,LAWNGREEN:2096890111,LEMONCHIFFON:4294626815,LIGHTBLUE:2916673279,LIGHTCORAL:4034953471,LIGHTCYAN:3774873599,LIGHTGOLDENRODYELLOW:4210742015,LIGHTGRAY:3553874943,LIGHTGREEN:2431553791,LIGHTGREY:3553874943,LIGHTPINK:4290167295,LIGHTSALMON:4288707327,LIGHTSEAGREEN:548580095,LIGHTSKYBLUE:2278488831,LIGHTSLATEGRAY:2005441023,LIGHTSLATEGREY:2005441023,LIGHTSTEELBLUE:2965692159,LIGHTYELLOW:4294959359,LIME:16711935,LIMEGREEN:852308735,LINEN:4210091775,MAGENTA:4278255615,MAROON:2147483903,MEDIUMAQUAMARINE:1724754687,MEDIUMBLUE:52735,MEDIUMORCHID:3126187007,MEDIUMPURPLE:2473647103,MEDIUMSEAGREEN:1018393087,MEDIUMSLATEBLUE:2070474495,MEDIUMSPRINGGREEN:16423679,MEDIUMTURQUOISE:1221709055,MEDIUMVIOLETRED:3340076543,MIDNIGHTBLUE:421097727,MINTCREAM:4127193855,MISTYROSE:4293190143,MOCCASIN:4293178879,NAVAJOWHITE:4292783615,NAVY:33023,OLDLACE:4260751103,OLIVE:2155872511,OLIVEDRAB:1804477439,ORANGE:4289003775,ORANGERED:4282712319,ORCHID:3664828159,PALEGOLDENROD:4008225535,PALEGREEN:2566625535,PALETURQUOISE:2951671551,PALEVIOLETRED:3681588223,PAPAYAWHIP:4293907967,PEACHPUFF:4292524543,PERU:3448061951,PINK:4290825215,PLUM:3718307327,POWDERBLUE:2967529215,PURPLE:2147516671,REBECCAPURPLE:1714657791,RED:4278190335,ROSYBROWN:3163525119,ROYALBLUE:1097458175,SADDLEBROWN:2336560127,SALMON:4202722047,SANDYBROWN:4104413439,SEAGREEN:780883967,SEASHELL:4294307583,SIENNA:2689740287,SILVER:3233857791,SKYBLUE:2278484991,SLATEBLUE:1784335871,SLATEGRAY:1887473919,SLATEGREY:1887473919,SNOW:4294638335,SPRINGGREEN:16744447,STEELBLUE:1182971135,TAN:3535047935,TEAL:8421631,THISTLE:3636451583,TOMATO:4284696575,TRANSPARENT:0,TURQUOISE:1088475391,VIOLET:4001558271,WHEAT:4125012991,WHITE:4294967295,WHITESMOKE:4126537215,YELLOW:4294902015,YELLOWGREEN:2597139199},hr={name:"background-clip",initialValue:"border-box",prefix:!1,type:1,parse:function(e,t){return t.map((function(e){if(Qn(e))switch(e.value){case"padding-box":return 1;case"content-box":return 2}return 0}))}},fr={name:"background-color",initialValue:"transparent",prefix:!1,type:3,format:"color"},pr=function(e,t){var n=nr.parse(e,t[0]),r=t[1];return r&&On(r)?{color:n,stop:r}:{color:n,stop:null}},gr=function(e,t){var n=e[0],r=e[e.length-1];null===n.stop&&(n.stop=zn),null===r.stop&&(r.stop=Kn);for(var i=[],A=0,a=0;aA?i.push(s):i.push(A),A=s}else i.push(null)}var l=null;for(a=0;ae.optimumDistance)?{optimumCorner:t,optimumDistance:o}:e}),{optimumDistance:i?1/0:-1/0,optimumCorner:null}).optimumCorner},Br=function(e,t,n,r,i){var A=0,a=0;switch(e.size){case 0:0===e.shape?A=a=Math.min(Math.abs(t),Math.abs(t-r),Math.abs(n),Math.abs(n-i)):1===e.shape&&(A=Math.min(Math.abs(t),Math.abs(t-r)),a=Math.min(Math.abs(n),Math.abs(n-i)));break;case 2:if(0===e.shape)A=a=Math.min(yr(t,n),yr(t,n-i),yr(t-r,n),yr(t-r,n-i));else if(1===e.shape){var o=Math.min(Math.abs(n),Math.abs(n-i))/Math.min(Math.abs(t),Math.abs(t-r)),s=wr(r,i,t,n,!0),l=s[0],u=s[1];a=o*(A=yr(l-t,(u-n)/o))}break;case 1:0===e.shape?A=a=Math.max(Math.abs(t),Math.abs(t-r),Math.abs(n),Math.abs(n-i)):1===e.shape&&(A=Math.max(Math.abs(t),Math.abs(t-r)),a=Math.max(Math.abs(n),Math.abs(n-i)));break;case 3:if(0===e.shape)A=a=Math.max(yr(t,n),yr(t,n-i),yr(t-r,n),yr(t-r,n-i));else if(1===e.shape){o=Math.max(Math.abs(n),Math.abs(n-i))/Math.max(Math.abs(t),Math.abs(t-r));var c=wr(r,i,t,n,!1);l=c[0],u=c[1],a=o*(A=yr(l-t,(u-n)/o))}}return Array.isArray(e.size)&&(A=jn(e.size[0],r),a=2===e.size.length?jn(e.size[1],i):A),[A,a]},_r=function(e,t){var n=tr(180),r=[];return Hn(t).forEach((function(t,i){if(0===i){var A=t[0];if(20===A.type&&-1!==["top","left","right","bottom"].indexOf(A.value))return void(n=er(t));if($n(A))return void(n=(Zn.parse(e,A)+tr(270))%tr(360))}var a=pr(e,t);r.push(a)})),{angle:n,stops:r,type:1}},br="closest-side",xr="farthest-side",Cr="closest-corner",Sr="farthest-corner",Er="circle",Ur="ellipse",Mr="cover",Fr="contain",Tr=function(e,t){var n=0,r=3,i=[],A=[];return Hn(t).forEach((function(t,a){var o=!0;if(0===a?o=t.reduce((function(e,t){if(Qn(t))switch(t.value){case"center":return A.push(Gn),!1;case"top":case"left":return A.push(zn),!1;case"right":case"bottom":return A.push(Kn),!1}else if(On(t)||Nn(t))return A.push(t),!1;return e}),o):1===a&&(o=t.reduce((function(e,t){if(Qn(t))switch(t.value){case Er:return n=0,!1;case Ur:return n=1,!1;case Fr:case br:return r=0,!1;case xr:return r=1,!1;case Cr:return r=2,!1;case Mr:case Sr:return r=3,!1}else if(Nn(t)||On(t))return Array.isArray(r)||(r=[]),r.push(t),!1;return e}),o)),o){var s=pr(e,t);i.push(s)}})),{size:r,shape:n,stops:i,position:A,type:2}},kr=function(e){return 1===e.type},Qr=function(e){return 2===e.type},Lr={name:"image",parse:function(e,t){if(22===t.type){var n={url:t.value,type:0};return e.cache.addImage(t.value),n}if(18===t.type){var r=Rr[t.name];if("undefined"===typeof r)throw new Error('Attempting to parse an unsupported image function "'+t.name+'"');return r(e,t.values)}throw new Error("Unsupported image type "+t.type)}};function Dr(e){return!(20===e.type&&"none"===e.value)&&(18!==e.type||!!Rr[e.name])}var Ir,Rr={"linear-gradient":function(e,t){var n=tr(180),r=[];return Hn(t).forEach((function(t,i){if(0===i){var A=t[0];if(20===A.type&&"to"===A.value)return void(n=er(t));if($n(A))return void(n=Zn.parse(e,A))}var a=pr(e,t);r.push(a)})),{angle:n,stops:r,type:1}},"-moz-linear-gradient":_r,"-ms-linear-gradient":_r,"-o-linear-gradient":_r,"-webkit-linear-gradient":_r,"radial-gradient":function(e,t){var n=0,r=3,i=[],A=[];return Hn(t).forEach((function(t,a){var o=!0;if(0===a){var s=!1;o=t.reduce((function(e,t){if(s)if(Qn(t))switch(t.value){case"center":return A.push(Gn),e;case"top":case"left":return A.push(zn),e;case"right":case"bottom":return A.push(Kn),e}else(On(t)||Nn(t))&&A.push(t);else if(Qn(t))switch(t.value){case Er:return n=0,!1;case Ur:return n=1,!1;case"at":return s=!0,!1;case br:return r=0,!1;case Mr:case xr:return r=1,!1;case Fr:case Cr:return r=2,!1;case Sr:return r=3,!1}else if(Nn(t)||On(t))return Array.isArray(r)||(r=[]),r.push(t),!1;return e}),o)}if(o){var l=pr(e,t);i.push(l)}})),{size:r,shape:n,stops:i,position:A,type:2}},"-moz-radial-gradient":Tr,"-ms-radial-gradient":Tr,"-o-radial-gradient":Tr,"-webkit-radial-gradient":Tr,"-webkit-gradient":function(e,t){var n=tr(180),r=[],i=1,A=0,a=3,o=[];return Hn(t).forEach((function(t,n){var A=t[0];if(0===n){if(Qn(A)&&"linear"===A.value)return void(i=1);if(Qn(A)&&"radial"===A.value)return void(i=2)}if(18===A.type)if("from"===A.name){var a=nr.parse(e,A.values[0]);r.push({stop:zn,color:a})}else if("to"===A.name)a=nr.parse(e,A.values[0]),r.push({stop:Kn,color:a});else if("color-stop"===A.name){var o=A.values.filter(Rn);if(2===o.length){a=nr.parse(e,o[1]);var s=o[0];kn(s)&&r.push({stop:{type:16,number:100*s.number,flags:s.flags},color:a})}}})),1===i?{angle:(n+tr(180))%tr(360),stops:r,type:i}:{size:a,shape:A,stops:r,position:o,type:i}}},Hr={name:"background-image",initialValue:"none",type:1,prefix:!1,parse:function(e,t){if(0===t.length)return[];var n=t[0];return 20===n.type&&"none"===n.value?[]:t.filter((function(e){return Rn(e)&&Dr(e)})).map((function(t){return Lr.parse(e,t)}))}},Pr={name:"background-origin",initialValue:"border-box",prefix:!1,type:1,parse:function(e,t){return t.map((function(e){if(Qn(e))switch(e.value){case"padding-box":return 1;case"content-box":return 2}return 0}))}},Nr={name:"background-position",initialValue:"0% 0%",type:1,prefix:!1,parse:function(e,t){return Hn(t).map((function(e){return e.filter(On)})).map(Vn)}},Or={name:"background-repeat",initialValue:"repeat",prefix:!1,type:1,parse:function(e,t){return Hn(t).map((function(e){return e.filter(Qn).map((function(e){return e.value})).join(" ")})).map(Vr)}},Vr=function(e){switch(e){case"no-repeat":return 1;case"repeat-x":case"repeat no-repeat":return 2;case"repeat-y":case"no-repeat repeat":return 3;default:return 0}};!function(e){e.AUTO="auto",e.CONTAIN="contain",e.COVER="cover"}(Ir||(Ir={}));var zr,Gr={name:"background-size",initialValue:"0",prefix:!1,type:1,parse:function(e,t){return Hn(t).map((function(e){return e.filter(Kr)}))}},Kr=function(e){return Qn(e)||On(e)},Wr=function(e){return{name:"border-"+e+"-color",initialValue:"transparent",prefix:!1,type:3,format:"color"}},jr=Wr("top"),Xr=Wr("right"),qr=Wr("bottom"),Yr=Wr("left"),Jr=function(e){return{name:"border-radius-"+e,initialValue:"0 0",prefix:!1,type:1,parse:function(e,t){return Vn(t.filter(On))}}},Zr=Jr("top-left"),$r=Jr("top-right"),ei=Jr("bottom-right"),ti=Jr("bottom-left"),ni=function(e){return{name:"border-"+e+"-style",initialValue:"solid",prefix:!1,type:2,parse:function(e,t){switch(t){case"none":return 0;case"dashed":return 2;case"dotted":return 3;case"double":return 4}return 1}}},ri=ni("top"),ii=ni("right"),Ai=ni("bottom"),ai=ni("left"),oi=function(e){return{name:"border-"+e+"-width",initialValue:"0",type:0,prefix:!1,parse:function(e,t){return Tn(t)?t.number:0}}},si=oi("top"),li=oi("right"),ui=oi("bottom"),ci=oi("left"),di={name:"color",initialValue:"transparent",prefix:!1,type:3,format:"color"},hi={name:"direction",initialValue:"ltr",prefix:!1,type:2,parse:function(e,t){return"rtl"===t?1:0}},fi={name:"display",initialValue:"inline-block",prefix:!1,type:1,parse:function(e,t){return t.filter(Qn).reduce((function(e,t){return e|pi(t.value)}),0)}},pi=function(e){switch(e){case"block":case"-webkit-box":return 2;case"inline":return 4;case"run-in":return 8;case"flow":return 16;case"flow-root":return 32;case"table":return 64;case"flex":case"-webkit-flex":return 128;case"grid":case"-ms-grid":return 256;case"ruby":return 512;case"subgrid":return 1024;case"list-item":return 2048;case"table-row-group":return 4096;case"table-header-group":return 8192;case"table-footer-group":return 16384;case"table-row":return 32768;case"table-cell":return 65536;case"table-column-group":return 131072;case"table-column":return 262144;case"table-caption":return 524288;case"ruby-base":return 1048576;case"ruby-text":return 2097152;case"ruby-base-container":return 4194304;case"ruby-text-container":return 8388608;case"contents":return 16777216;case"inline-block":return 33554432;case"inline-list-item":return 67108864;case"inline-table":return 134217728;case"inline-flex":return 268435456;case"inline-grid":return 536870912}return 0},gi={name:"float",initialValue:"none",prefix:!1,type:2,parse:function(e,t){switch(t){case"left":return 1;case"right":return 2;case"inline-start":return 3;case"inline-end":return 4}return 0}},mi={name:"letter-spacing",initialValue:"0",prefix:!1,type:0,parse:function(e,t){return 20===t.type&&"normal"===t.value?0:17===t.type||15===t.type?t.number:0}};!function(e){e.NORMAL="normal",e.STRICT="strict"}(zr||(zr={}));var vi,yi={name:"line-break",initialValue:"normal",prefix:!1,type:2,parse:function(e,t){return"strict"===t?zr.STRICT:zr.NORMAL}},wi={name:"line-height",initialValue:"normal",prefix:!1,type:4},Bi=function(e,t){return Qn(e)&&"normal"===e.value?1.2*t:17===e.type?t*e.number:On(e)?jn(e,t):t},_i={name:"list-style-image",initialValue:"none",type:0,prefix:!1,parse:function(e,t){return 20===t.type&&"none"===t.value?null:Lr.parse(e,t)}},bi={name:"list-style-position",initialValue:"outside",prefix:!1,type:2,parse:function(e,t){return"inside"===t?0:1}},xi={name:"list-style-type",initialValue:"none",prefix:!1,type:2,parse:function(e,t){switch(t){case"disc":return 0;case"circle":return 1;case"square":return 2;case"decimal":return 3;case"cjk-decimal":return 4;case"decimal-leading-zero":return 5;case"lower-roman":return 6;case"upper-roman":return 7;case"lower-greek":return 8;case"lower-alpha":return 9;case"upper-alpha":return 10;case"arabic-indic":return 11;case"armenian":return 12;case"bengali":return 13;case"cambodian":return 14;case"cjk-earthly-branch":return 15;case"cjk-heavenly-stem":return 16;case"cjk-ideographic":return 17;case"devanagari":return 18;case"ethiopic-numeric":return 19;case"georgian":return 20;case"gujarati":return 21;case"gurmukhi":case"hebrew":return 22;case"hiragana":return 23;case"hiragana-iroha":return 24;case"japanese-formal":return 25;case"japanese-informal":return 26;case"kannada":return 27;case"katakana":return 28;case"katakana-iroha":return 29;case"khmer":return 30;case"korean-hangul-formal":return 31;case"korean-hanja-formal":return 32;case"korean-hanja-informal":return 33;case"lao":return 34;case"lower-armenian":return 35;case"malayalam":return 36;case"mongolian":return 37;case"myanmar":return 38;case"oriya":return 39;case"persian":return 40;case"simp-chinese-formal":return 41;case"simp-chinese-informal":return 42;case"tamil":return 43;case"telugu":return 44;case"thai":return 45;case"tibetan":return 46;case"trad-chinese-formal":return 47;case"trad-chinese-informal":return 48;case"upper-armenian":return 49;case"disclosure-open":return 50;case"disclosure-closed":return 51;default:return-1}}},Ci=function(e){return{name:"margin-"+e,initialValue:"0",prefix:!1,type:4}},Si=Ci("top"),Ei=Ci("right"),Ui=Ci("bottom"),Mi=Ci("left"),Fi={name:"overflow",initialValue:"visible",prefix:!1,type:1,parse:function(e,t){return t.filter(Qn).map((function(e){switch(e.value){case"hidden":return 1;case"scroll":return 2;case"clip":return 3;case"auto":return 4;default:return 0}}))}},Ti={name:"overflow-wrap",initialValue:"normal",prefix:!1,type:2,parse:function(e,t){return"break-word"===t?"break-word":"normal"}},ki=function(e){return{name:"padding-"+e,initialValue:"0",prefix:!1,type:3,format:"length-percentage"}},Qi=ki("top"),Li=ki("right"),Di=ki("bottom"),Ii=ki("left"),Ri={name:"text-align",initialValue:"left",prefix:!1,type:2,parse:function(e,t){switch(t){case"right":return 2;case"center":case"justify":return 1;default:return 0}}},Hi={name:"position",initialValue:"static",prefix:!1,type:2,parse:function(e,t){switch(t){case"relative":return 1;case"absolute":return 2;case"fixed":return 3;case"sticky":return 4}return 0}},Pi={name:"text-shadow",initialValue:"none",type:1,prefix:!1,parse:function(e,t){return 1===t.length&&Dn(t[0],"none")?[]:Hn(t).map((function(t){for(var n={color:dr.TRANSPARENT,offsetX:zn,offsetY:zn,blur:zn},r=0,i=0;i1?1:0],this.overflowWrap=vA(e,Ti,t.overflowWrap),this.paddingTop=vA(e,Qi,t.paddingTop),this.paddingRight=vA(e,Li,t.paddingRight),this.paddingBottom=vA(e,Di,t.paddingBottom),this.paddingLeft=vA(e,Ii,t.paddingLeft),this.paintOrder=vA(e,dA,t.paintOrder),this.position=vA(e,Hi,t.position),this.textAlign=vA(e,Ri,t.textAlign),this.textDecorationColor=vA(e,Ji,null!==(n=t.textDecorationColor)&&void 0!==n?n:t.color),this.textDecorationLine=vA(e,Zi,null!==(r=t.textDecorationLine)&&void 0!==r?r:t.textDecoration),this.textShadow=vA(e,Pi,t.textShadow),this.textTransform=vA(e,Ni,t.textTransform),this.transform=vA(e,Oi,t.transform),this.transformOrigin=vA(e,Ki,t.transformOrigin),this.visibility=vA(e,Wi,t.visibility),this.webkitTextStrokeColor=vA(e,hA,t.webkitTextStrokeColor),this.webkitTextStrokeWidth=vA(e,fA,t.webkitTextStrokeWidth),this.wordBreak=vA(e,ji,t.wordBreak),this.zIndex=vA(e,Xi,t.zIndex)}return e.prototype.isVisible=function(){return this.display>0&&this.opacity>0&&0===this.visibility},e.prototype.isTransparent=function(){return rr(this.backgroundColor)},e.prototype.isTransformed=function(){return null!==this.transform},e.prototype.isPositioned=function(){return 0!==this.position},e.prototype.isPositionedWithZIndex=function(){return this.isPositioned()&&!this.zIndex.auto},e.prototype.isFloating=function(){return 0!==this.float},e.prototype.isInlineLevel=function(){return iA(this.display,4)||iA(this.display,33554432)||iA(this.display,268435456)||iA(this.display,536870912)||iA(this.display,67108864)||iA(this.display,134217728)},e}(),gA=function(){function e(e,t){this.content=vA(e,AA,t.content),this.quotes=vA(e,lA,t.quotes)}return e}(),mA=function(){function e(e,t){this.counterIncrement=vA(e,aA,t.counterIncrement),this.counterReset=vA(e,oA,t.counterReset)}return e}(),vA=function(e,t,n){var r=new Mn,i=null!==n&&"undefined"!==typeof n?n.toString():t.initialValue;r.write(i);var A=new Fn(r.read());switch(t.type){case 2:var a=A.parseComponentValue();return t.parse(e,Qn(a)?a.value:t.initialValue);case 0:return t.parse(e,A.parseComponentValue());case 1:return t.parse(e,A.parseComponentValues());case 4:return A.parseComponentValue();case 3:switch(t.format){case"angle":return Zn.parse(e,A.parseComponentValue());case"color":return nr.parse(e,A.parseComponentValue());case"image":return Lr.parse(e,A.parseComponentValue());case"length":var o=A.parseComponentValue();return Nn(o)?o:zn;case"length-percentage":var s=A.parseComponentValue();return On(s)?s:zn;case"time":return qi.parse(e,A.parseComponentValue())}}},yA="data-html2canvas-debug",wA=function(e){switch(e.getAttribute(yA)){case"all":return 1;case"clone":return 2;case"parse":return 3;case"render":return 4;default:return 0}},BA=function(e,t){var n=wA(e);return 1===n||t===n},_A=function(){function e(e,t){this.context=e,this.textNodes=[],this.elements=[],this.flags=0,BA(t,3),this.styles=new pA(e,window.getComputedStyle(t,null)),lo(t)&&(this.styles.animationDuration.some((function(e){return e>0}))&&(t.style.animationDuration="0s"),null!==this.styles.transform&&(t.style.transform="none")),this.bounds=o(this.context,t),BA(t,4)&&(this.flags|=16)}return e}(),bA="AAAAAAAAAAAAEA4AGBkAAFAaAAACAAAAAAAIABAAGAAwADgACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAAQABIAEQATAAIABAACAAQAAgAEAAIABAAVABcAAgAEAAIABAACAAQAGAAaABwAHgAgACIAI4AlgAIABAAmwCjAKgAsAC2AL4AvQDFAMoA0gBPAVYBWgEIAAgACACMANoAYgFkAWwBdAF8AX0BhQGNAZUBlgGeAaMBlQGWAasBswF8AbsBwwF0AcsBYwHTAQgA2wG/AOMBdAF8AekB8QF0AfkB+wHiAHQBfAEIAAMC5gQIAAsCEgIIAAgAFgIeAggAIgIpAggAMQI5AkACygEIAAgASAJQAlgCYAIIAAgACAAKBQoFCgUTBRMFGQUrBSsFCAAIAAgACAAIAAgACAAIAAgACABdAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABoAmgCrwGvAQgAbgJ2AggAHgEIAAgACADnAXsCCAAIAAgAgwIIAAgACAAIAAgACACKAggAkQKZAggAPADJAAgAoQKkAqwCsgK6AsICCADJAggA0AIIAAgACAAIANYC3gIIAAgACAAIAAgACABAAOYCCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAkASoB+QIEAAgACAA8AEMCCABCBQgACABJBVAFCAAIAAgACAAIAAgACAAIAAgACABTBVoFCAAIAFoFCABfBWUFCAAIAAgACAAIAAgAbQUIAAgACAAIAAgACABzBXsFfQWFBYoFigWKBZEFigWKBYoFmAWfBaYFrgWxBbkFCAAIAAgACAAIAAgACAAIAAgACAAIAMEFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAMgFCADQBQgACAAIAAgACAAIAAgACAAIAAgACAAIAO4CCAAIAAgAiQAIAAgACABAAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAD0AggACAD8AggACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIANYFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAMDvwAIAAgAJAIIAAgACAAIAAgACAAIAAgACwMTAwgACAB9BOsEGwMjAwgAKwMyAwsFYgE3A/MEPwMIAEUDTQNRAwgAWQOsAGEDCAAIAAgACAAIAAgACABpAzQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFIQUoBSwFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABtAwgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABMAEwACAAIAAgACAAIABgACAAIAAgACAC/AAgACAAyAQgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACAAIAAwAAgACAAIAAgACAAIAAgACAAIAAAARABIAAgACAAIABQASAAIAAgAIABwAEAAjgCIABsAqAC2AL0AigDQAtwC+IJIQqVAZUBWQqVAZUBlQGVAZUBlQGrC5UBlQGVAZUBlQGVAZUBlQGVAXsKlQGVAbAK6wsrDGUMpQzlDJUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAfAKAAuZA64AtwCJALoC6ADwAAgAuACgA/oEpgO6AqsD+AAIAAgAswMIAAgACAAIAIkAuwP5AfsBwwPLAwgACAAIAAgACADRA9kDCAAIAOED6QMIAAgACAAIAAgACADuA/YDCAAIAP4DyQAIAAgABgQIAAgAXQAOBAgACAAIAAgACAAIABMECAAIAAgACAAIAAgACAD8AAQBCAAIAAgAGgQiBCoECAExBAgAEAEIAAgACAAIAAgACAAIAAgACAAIAAgACAA4BAgACABABEYECAAIAAgATAQYAQgAVAQIAAgACAAIAAgACAAIAAgACAAIAFoECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAOQEIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAB+BAcACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAEABhgSMBAgACAAIAAgAlAQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAwAEAAQABAADAAMAAwADAAQABAAEAAQABAAEAAQABHATAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAdQMIAAgACAAIAAgACAAIAMkACAAIAAgAfQMIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACFA4kDCAAIAAgACAAIAOcBCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAIcDCAAIAAgACAAIAAgACAAIAAgACAAIAJEDCAAIAAgACADFAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABgBAgAZgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAbAQCBXIECAAIAHkECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABAAJwEQACjBKoEsgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAC6BMIECAAIAAgACAAIAAgACABmBAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAxwQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAGYECAAIAAgAzgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBd0FXwUIAOIF6gXxBYoF3gT5BQAGCAaKBYoFigWKBYoFigWKBYoFigWKBYoFigXWBIoFigWKBYoFigWKBYoFigWKBYsFEAaKBYoFigWKBYoFigWKBRQGCACKBYoFigWKBQgACAAIANEECAAIABgGigUgBggAJgYIAC4GMwaKBYoF0wQ3Bj4GigWKBYoFigWKBYoFigWKBYoFigWKBYoFigUIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWLBf///////wQABAAEAAQABAAEAAQABAAEAAQAAwAEAAQAAgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAQADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUAAAAFAAUAAAAFAAUAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAQAAAAUABQAFAAUABQAFAAAAAAAFAAUAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAFAAUAAQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAAABwAHAAcAAAAHAAcABwAFAAEAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAcABwAFAAUAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAQABAAAAAAAAAAAAAAAFAAUABQAFAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAHAAcAAAAHAAcAAAAAAAUABQAHAAUAAQAHAAEABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwABAAUABQAFAAUAAAAAAAAAAAAAAAEAAQABAAEAAQABAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABQANAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAABQAHAAUABQAFAAAAAAAAAAcABQAFAAUABQAFAAQABAAEAAQABAAEAAQABAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUAAAAFAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAUAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAcABwAFAAcABwAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUABwAHAAUABQAFAAUAAAAAAAcABwAAAAAABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAAAAAAAAAAABQAFAAAAAAAFAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAFAAUABQAFAAUAAAAFAAUABwAAAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABwAFAAUABQAFAAAAAAAHAAcAAAAAAAcABwAFAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAAAAAAAAAHAAcABwAAAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAUABQAFAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAHAAcABQAHAAcAAAAFAAcABwAAAAcABwAFAAUAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAFAAcABwAFAAUABQAAAAUAAAAHAAcABwAHAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAHAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUAAAAFAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAUAAAAFAAUAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABwAFAAUABQAFAAUABQAAAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABQAFAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAFAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAHAAUABQAFAAUABQAFAAUABwAHAAcABwAHAAcABwAHAAUABwAHAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABwAHAAcABwAFAAUABwAHAAcAAAAAAAAAAAAHAAcABQAHAAcABwAHAAcABwAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAUABQAFAAUABQAFAAUAAAAFAAAABQAAAAAABQAFAAUABQAFAAUABQAFAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAUABQAFAAUABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABwAFAAcABwAHAAcABwAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAUABQAFAAUABwAHAAUABQAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABQAFAAcABwAHAAUABwAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAcABQAFAAUABQAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAAAAAABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAUABQAHAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAFAAUABQAFAAcABwAFAAUABwAHAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAcABwAFAAUABwAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABQAAAAAABQAFAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAcABwAAAAAAAAAAAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAcABwAFAAcABwAAAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAFAAUABQAAAAUABQAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABwAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAHAAcABQAHAAUABQAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAAABwAHAAAAAAAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAFAAUABwAFAAcABwAFAAcABQAFAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAAAAAABwAHAAcABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAFAAcABwAFAAUABQAFAAUABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAUABQAFAAcABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABQAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAAAAAAFAAUABwAHAAcABwAFAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAHAAUABQAFAAUABQAFAAUABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAABQAAAAUABQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAHAAcAAAAFAAUAAAAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABQAFAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAABQAFAAUABQAFAAUABQAAAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAFAAUABQAFAAUADgAOAA4ADgAOAA4ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAMAAwADAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAAAAAAAAAAAAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAAAAAAAAAAAAsADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwACwAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAADgAOAA4AAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAAAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4AAAAOAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAAAAAAAAAAAA4AAAAOAAAAAAAAAAAADgAOAA4AAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAA=",xA="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",CA="undefined"===typeof Uint8Array?[]:new Uint8Array(256),SA=0;SA>4,u[s++]=(15&r)<<4|i>>2,u[s++]=(3&i)<<6|63&A;return l},UA=function(e){for(var t=e.length,n=[],r=0;r>FA,LA=(1<>FA)+32,IA=65536>>TA,RA=(1<=0){if(e<55296||e>56319&&e<=65535)return t=((t=this.index[e>>FA])<>FA)])<>TA),t=this.index[t],t+=e>>FA&RA,t=((t=this.index[t])<=55296&&i<=56319&&n>10),a%1024+56320)),(i+1===n||r.length>16384)&&(A+=String.fromCharCode.apply(String,r),r.length=0)}return A},sa=NA(bA),la="\xd7",ua="\xf7",ca=function(e){return sa.get(e)},da=function(e,t,n){var r=n-2,i=t[r],A=t[n-1],a=t[n];if(A===jA&&a===XA)return la;if(A===jA||A===XA||A===qA)return ua;if(a===jA||a===XA||a===qA)return ua;if(A===ZA&&-1!==[ZA,$A,ta,na].indexOf(a))return la;if((A===ta||A===$A)&&(a===$A||a===ea))return la;if((A===na||A===ea)&&a===ea)return la;if(a===ra||a===YA)return la;if(a===JA)return la;if(A===WA)return la;if(A===ra&&a===ia){for(;i===YA;)i=t[--r];if(i===ia)return la}if(A===Aa&&a===Aa){for(var o=0;i===Aa;)o++,i=t[--r];if(o%2===0)return la}return ua},ha=function(e){var t=aa(e),n=t.length,r=0,i=0,A=t.map(ca);return{next:function(){if(r>=n)return{done:!0,value:null};for(var e=la;ra.x||i.y>a.y;return a=i,0===t||o}));return e.body.removeChild(t),o},ma=function(){return"undefined"!==typeof(new Image).crossOrigin},va=function(){return"string"===typeof(new XMLHttpRequest).responseType},ya=function(e){var t=new Image,n=e.createElement("canvas"),r=n.getContext("2d");if(!r)return!1;t.src="data:image/svg+xml,";try{r.drawImage(t,0,0),n.toDataURL()}catch(Rt){return!1}return!0},wa=function(e){return 0===e[0]&&255===e[1]&&0===e[2]&&255===e[3]},Ba=function(e){var t=e.createElement("canvas"),n=100;t.width=n,t.height=n;var r=t.getContext("2d");if(!r)return Promise.reject(!1);r.fillStyle="rgb(0, 255, 0)",r.fillRect(0,0,n,n);var i=new Image,A=t.toDataURL();i.src=A;var a=_a(n,n,0,0,i);return r.fillStyle="red",r.fillRect(0,0,n,n),ba(a).then((function(t){r.drawImage(t,0,0);var i=r.getImageData(0,0,n,n).data;r.fillStyle="red",r.fillRect(0,0,n,n);var a=e.createElement("div");return a.style.backgroundImage="url("+A+")",a.style.height=n+"px",wa(i)?ba(_a(n,n,0,0,a)):Promise.reject(!1)})).then((function(e){return r.drawImage(e,0,0),wa(r.getImageData(0,0,n,n).data)})).catch((function(){return!1}))},_a=function(e,t,n,r,i){var A="http://www.w3.org/2000/svg",a=document.createElementNS(A,"svg"),o=document.createElementNS(A,"foreignObject");return a.setAttributeNS(null,"width",e.toString()),a.setAttributeNS(null,"height",t.toString()),o.setAttributeNS(null,"width","100%"),o.setAttributeNS(null,"height","100%"),o.setAttributeNS(null,"x",n.toString()),o.setAttributeNS(null,"y",r.toString()),o.setAttributeNS(null,"externalResourcesRequired","true"),a.appendChild(o),o.appendChild(i),a},ba=function(e){return new Promise((function(t,n){var r=new Image;r.onload=function(){return t(r)},r.onerror=n,r.src="data:image/svg+xml;charset=utf-8,"+encodeURIComponent((new XMLSerializer).serializeToString(e))}))},xa={get SUPPORT_RANGE_BOUNDS(){var e=pa(document);return Object.defineProperty(xa,"SUPPORT_RANGE_BOUNDS",{value:e}),e},get SUPPORT_WORD_BREAKING(){var e=xa.SUPPORT_RANGE_BOUNDS&&ga(document);return Object.defineProperty(xa,"SUPPORT_WORD_BREAKING",{value:e}),e},get SUPPORT_SVG_DRAWING(){var e=ya(document);return Object.defineProperty(xa,"SUPPORT_SVG_DRAWING",{value:e}),e},get SUPPORT_FOREIGNOBJECT_DRAWING(){var e="function"===typeof Array.from&&"function"===typeof window.fetch?Ba(document):Promise.resolve(!1);return Object.defineProperty(xa,"SUPPORT_FOREIGNOBJECT_DRAWING",{value:e}),e},get SUPPORT_CORS_IMAGES(){var e=ma();return Object.defineProperty(xa,"SUPPORT_CORS_IMAGES",{value:e}),e},get SUPPORT_RESPONSE_TYPE(){var e=va();return Object.defineProperty(xa,"SUPPORT_RESPONSE_TYPE",{value:e}),e},get SUPPORT_CORS_XHR(){var e="withCredentials"in new XMLHttpRequest;return Object.defineProperty(xa,"SUPPORT_CORS_XHR",{value:e}),e},get SUPPORT_NATIVE_TEXT_SEGMENTATION(){var e=!("undefined"===typeof Intl||!Intl.Segmenter);return Object.defineProperty(xa,"SUPPORT_NATIVE_TEXT_SEGMENTATION",{value:e}),e}},Ca=function(){function e(e,t){this.text=e,this.bounds=t}return e}(),Sa=function(e,t,n,r){var i=Ta(t,n),A=[],o=0;return i.forEach((function(t){if(n.textDecorationLine.length||t.trim().length>0)if(xa.SUPPORT_RANGE_BOUNDS){var i=Ua(r,o,t.length).getClientRects();if(i.length>1){var s=Ma(t),l=0;s.forEach((function(t){A.push(new Ca(t,a.fromDOMRectList(e,Ua(r,l+o,t.length).getClientRects()))),l+=t.length}))}else A.push(new Ca(t,a.fromDOMRectList(e,i)))}else{var u=r.splitText(t.length);A.push(new Ca(t,Ea(e,r))),r=u}else xa.SUPPORT_RANGE_BOUNDS||(r=r.splitText(t.length));o+=t.length})),A},Ea=function(e,t){var n=t.ownerDocument;if(n){var r=n.createElement("html2canvaswrapper");r.appendChild(t.cloneNode(!0));var i=t.parentNode;if(i){i.replaceChild(r,t);var A=o(e,r);return r.firstChild&&i.replaceChild(r.firstChild,r),A}}return a.EMPTY},Ua=function(e,t,n){var r=e.ownerDocument;if(!r)throw new Error("Node has no owner document");var i=r.createRange();return i.setStart(e,t),i.setEnd(e,t+n),i},Ma=function(e){if(xa.SUPPORT_NATIVE_TEXT_SEGMENTATION){var t=new Intl.Segmenter(void 0,{granularity:"grapheme"});return Array.from(t.segment(e)).map((function(e){return e.segment}))}return fa(e)},Fa=function(e,t){if(xa.SUPPORT_NATIVE_TEXT_SEGMENTATION){var n=new Intl.Segmenter(void 0,{granularity:"word"});return Array.from(n.segment(e)).map((function(e){return e.segment}))}return Qa(e,t)},Ta=function(e,t){return 0!==t.letterSpacing?Ma(e):Fa(e,t)},ka=[32,160,4961,65792,65793,4153,4241],Qa=function(e,t){for(var n,r=Ve(e,{lineBreak:t.lineBreak,wordBreak:"break-word"===t.overflowWrap?"break-word":t.wordBreak}),i=[],A=function(){if(n.value){var e=n.value.slice(),t=l(e),r="";t.forEach((function(e){-1===ka.indexOf(e)?r+=u(e):(r.length&&i.push(r),i.push(u(e)),r="")})),r.length&&i.push(r)}};!(n=r.next()).done;)A();return i},La=function(){function e(e,t,n){this.text=Da(t.data,n.textTransform),this.textBounds=Sa(e,this.text,n,t)}return e}(),Da=function(e,t){switch(t){case 1:return e.toLowerCase();case 3:return e.replace(Ia,Ra);case 2:return e.toUpperCase();default:return e}},Ia=/(^|\s|:|-|\(|\))([a-z])/g,Ra=function(e,t,n){return e.length>0?t+n.toUpperCase():e},Ha=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.src=n.currentSrc||n.src,r.intrinsicWidth=n.naturalWidth,r.intrinsicHeight=n.naturalHeight,r.context.cache.addImage(r.src),r}return t(n,e),n}(_A),Pa=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.canvas=n,r.intrinsicWidth=n.width,r.intrinsicHeight=n.height,r}return t(n,e),n}(_A),Na=function(e){function n(t,n){var r=e.call(this,t,n)||this,i=new XMLSerializer,A=o(t,n);return n.setAttribute("width",A.width+"px"),n.setAttribute("height",A.height+"px"),r.svg="data:image/svg+xml,"+encodeURIComponent(i.serializeToString(n)),r.intrinsicWidth=n.width.baseVal.value,r.intrinsicHeight=n.height.baseVal.value,r.context.cache.addImage(r.svg),r}return t(n,e),n}(_A),Oa=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.value=n.value,r}return t(n,e),n}(_A),Va=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.start=n.start,r.reversed="boolean"===typeof n.reversed&&!0===n.reversed,r}return t(n,e),n}(_A),za=[{type:15,flags:0,unit:"px",number:3}],Ga=[{type:16,flags:0,number:50}],Ka=function(e){return e.width>e.height?new a(e.left+(e.width-e.height)/2,e.top,e.height,e.height):e.width0)r.textNodes.push(new La(t,A,r.styles));else if(so(A))if(So(A)&&A.assignedNodes)A.assignedNodes().forEach((function(n){return e(t,n,r,i)}));else{var o=ro(t,A);o.styles.isVisible()&&(Ao(A,o,i)?o.flags|=4:ao(o.styles)&&(o.flags|=2),-1!==to.indexOf(A.tagName)&&(o.flags|=8),r.elements.push(o),A.slot,A.shadowRoot?e(t,A.shadowRoot,o,i):xo(A)||go(A)||Co(A)||e(t,A,o,i))}},ro=function(e,t){return wo(t)?new Ha(e,t):vo(t)?new Pa(e,t):go(t)?new Na(e,t):co(t)?new Oa(e,t):ho(t)?new Va(e,t):fo(t)?new Ja(e,t):Co(t)?new Za(e,t):xo(t)?new $a(e,t):Bo(t)?new eo(e,t):new _A(e,t)},io=function(e,t){var n=ro(e,t);return n.flags|=4,no(e,t,n,n),n},Ao=function(e,t,n){return t.styles.isPositionedWithZIndex()||t.styles.opacity<1||t.styles.isTransformed()||mo(e)&&n.styles.isTransparent()},ao=function(e){return e.isPositioned()||e.isFloating()},oo=function(e){return e.nodeType===Node.TEXT_NODE},so=function(e){return e.nodeType===Node.ELEMENT_NODE},lo=function(e){return so(e)&&"undefined"!==typeof e.style&&!uo(e)},uo=function(e){return"object"===typeof e.className},co=function(e){return"LI"===e.tagName},ho=function(e){return"OL"===e.tagName},fo=function(e){return"INPUT"===e.tagName},po=function(e){return"HTML"===e.tagName},go=function(e){return"svg"===e.tagName},mo=function(e){return"BODY"===e.tagName},vo=function(e){return"CANVAS"===e.tagName},yo=function(e){return"VIDEO"===e.tagName},wo=function(e){return"IMG"===e.tagName},Bo=function(e){return"IFRAME"===e.tagName},_o=function(e){return"STYLE"===e.tagName},bo=function(e){return"SCRIPT"===e.tagName},xo=function(e){return"TEXTAREA"===e.tagName},Co=function(e){return"SELECT"===e.tagName},So=function(e){return"SLOT"===e.tagName},Eo=function(e){return e.tagName.indexOf("-")>0},Uo=function(){function e(){this.counters={}}return e.prototype.getCounterValue=function(e){var t=this.counters[e];return t&&t.length?t[t.length-1]:1},e.prototype.getCounterValues=function(e){var t=this.counters[e];return t||[]},e.prototype.pop=function(e){var t=this;e.forEach((function(e){return t.counters[e].pop()}))},e.prototype.parse=function(e){var t=this,n=e.counterIncrement,r=e.counterReset,i=!0;null!==n&&n.forEach((function(e){var n=t.counters[e.counter];n&&0!==e.increment&&(i=!1,n.length||n.push(1),n[Math.max(0,n.length-1)]+=e.increment)}));var A=[];return i&&r.forEach((function(e){var n=t.counters[e.counter];A.push(e.counter),n||(n=t.counters[e.counter]=[]),n.push(e.reset)})),A},e}(),Mo={integers:[1e3,900,500,400,100,90,50,40,10,9,5,4,1],values:["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]},Fo={integers:[9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,900,800,700,600,500,400,300,200,100,90,80,70,60,50,40,30,20,10,9,8,7,6,5,4,3,2,1],values:["\u0554","\u0553","\u0552","\u0551","\u0550","\u054f","\u054e","\u054d","\u054c","\u054b","\u054a","\u0549","\u0548","\u0547","\u0546","\u0545","\u0544","\u0543","\u0542","\u0541","\u0540","\u053f","\u053e","\u053d","\u053c","\u053b","\u053a","\u0539","\u0538","\u0537","\u0536","\u0535","\u0534","\u0533","\u0532","\u0531"]},To={integers:[1e4,9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,400,300,200,100,90,80,70,60,50,40,30,20,19,18,17,16,15,10,9,8,7,6,5,4,3,2,1],values:["\u05d9\u05f3","\u05d8\u05f3","\u05d7\u05f3","\u05d6\u05f3","\u05d5\u05f3","\u05d4\u05f3","\u05d3\u05f3","\u05d2\u05f3","\u05d1\u05f3","\u05d0\u05f3","\u05ea","\u05e9","\u05e8","\u05e7","\u05e6","\u05e4","\u05e2","\u05e1","\u05e0","\u05de","\u05dc","\u05db","\u05d9\u05d8","\u05d9\u05d7","\u05d9\u05d6","\u05d8\u05d6","\u05d8\u05d5","\u05d9","\u05d8","\u05d7","\u05d6","\u05d5","\u05d4","\u05d3","\u05d2","\u05d1","\u05d0"]},ko={integers:[1e4,9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,900,800,700,600,500,400,300,200,100,90,80,70,60,50,40,30,20,10,9,8,7,6,5,4,3,2,1],values:["\u10f5","\u10f0","\u10ef","\u10f4","\u10ee","\u10ed","\u10ec","\u10eb","\u10ea","\u10e9","\u10e8","\u10e7","\u10e6","\u10e5","\u10e4","\u10f3","\u10e2","\u10e1","\u10e0","\u10df","\u10de","\u10dd","\u10f2","\u10dc","\u10db","\u10da","\u10d9","\u10d8","\u10d7","\u10f1","\u10d6","\u10d5","\u10d4","\u10d3","\u10d2","\u10d1","\u10d0"]},Qo=function(e,t,n,r,i,A){return en?Wo(e,i,A.length>0):r.integers.reduce((function(t,n,i){for(;e>=n;)e-=n,t+=r.values[i];return t}),"")+A},Lo=function(e,t,n,r){var i="";do{n||e--,i=r(e)+i,e/=t}while(e*t>=t);return i},Do=function(e,t,n,r,i){var A=n-t+1;return(e<0?"-":"")+(Lo(Math.abs(e),A,r,(function(e){return u(Math.floor(e%A)+t)}))+i)},Io=function(e,t,n){void 0===n&&(n=". ");var r=t.length;return Lo(Math.abs(e),r,!1,(function(e){return t[Math.floor(e%r)]}))+n},Ro=1,Ho=2,Po=4,No=8,Oo=function(e,t,n,r,i,A){if(e<-9999||e>9999)return Wo(e,4,i.length>0);var a=Math.abs(e),o=i;if(0===a)return t[0]+o;for(var s=0;a>0&&s<=4;s++){var l=a%10;0===l&&iA(A,Ro)&&""!==o?o=t[l]+o:l>1||1===l&&0===s||1===l&&1===s&&iA(A,Ho)||1===l&&1===s&&iA(A,Po)&&e>100||1===l&&s>1&&iA(A,No)?o=t[l]+(s>0?n[s-1]:"")+o:1===l&&s>0&&(o=n[s-1]+o),a=Math.floor(a/10)}return(e<0?r:"")+o},Vo="\u5341\u767e\u5343\u842c",zo="\u62fe\u4f70\u4edf\u842c",Go="\u30de\u30a4\u30ca\u30b9",Ko="\ub9c8\uc774\ub108\uc2a4",Wo=function(e,t,n){var r=n?". ":"",i=n?"\u3001":"",A=n?", ":"",a=n?" ":"";switch(t){case 0:return"\u2022"+a;case 1:return"\u25e6"+a;case 2:return"\u25fe"+a;case 5:var o=Do(e,48,57,!0,r);return o.length<4?"0"+o:o;case 4:return Io(e,"\u3007\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d",i);case 6:return Qo(e,1,3999,Mo,3,r).toLowerCase();case 7:return Qo(e,1,3999,Mo,3,r);case 8:return Do(e,945,969,!1,r);case 9:return Do(e,97,122,!1,r);case 10:return Do(e,65,90,!1,r);case 11:return Do(e,1632,1641,!0,r);case 12:case 49:return Qo(e,1,9999,Fo,3,r);case 35:return Qo(e,1,9999,Fo,3,r).toLowerCase();case 13:return Do(e,2534,2543,!0,r);case 14:case 30:return Do(e,6112,6121,!0,r);case 15:return Io(e,"\u5b50\u4e11\u5bc5\u536f\u8fb0\u5df3\u5348\u672a\u7533\u9149\u620c\u4ea5",i);case 16:return Io(e,"\u7532\u4e59\u4e19\u4e01\u620a\u5df1\u5e9a\u8f9b\u58ec\u7678",i);case 17:case 48:return Oo(e,"\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d",Vo,"\u8ca0",i,Ho|Po|No);case 47:return Oo(e,"\u96f6\u58f9\u8cb3\u53c3\u8086\u4f0d\u9678\u67d2\u634c\u7396",zo,"\u8ca0",i,Ro|Ho|Po|No);case 42:return Oo(e,"\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d",Vo,"\u8d1f",i,Ho|Po|No);case 41:return Oo(e,"\u96f6\u58f9\u8d30\u53c1\u8086\u4f0d\u9646\u67d2\u634c\u7396",zo,"\u8d1f",i,Ro|Ho|Po|No);case 26:return Oo(e,"\u3007\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d","\u5341\u767e\u5343\u4e07",Go,i,0);case 25:return Oo(e,"\u96f6\u58f1\u5f10\u53c2\u56db\u4f0d\u516d\u4e03\u516b\u4e5d","\u62fe\u767e\u5343\u4e07",Go,i,Ro|Ho|Po);case 31:return Oo(e,"\uc601\uc77c\uc774\uc0bc\uc0ac\uc624\uc721\uce60\ud314\uad6c","\uc2ed\ubc31\ucc9c\ub9cc",Ko,A,Ro|Ho|Po);case 33:return Oo(e,"\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d","\u5341\u767e\u5343\u842c",Ko,A,0);case 32:return Oo(e,"\u96f6\u58f9\u8cb3\u53c3\u56db\u4e94\u516d\u4e03\u516b\u4e5d","\u62fe\u767e\u5343",Ko,A,Ro|Ho|Po);case 18:return Do(e,2406,2415,!0,r);case 20:return Qo(e,1,19999,ko,3,r);case 21:return Do(e,2790,2799,!0,r);case 22:return Do(e,2662,2671,!0,r);case 22:return Qo(e,1,10999,To,3,r);case 23:return Io(e,"\u3042\u3044\u3046\u3048\u304a\u304b\u304d\u304f\u3051\u3053\u3055\u3057\u3059\u305b\u305d\u305f\u3061\u3064\u3066\u3068\u306a\u306b\u306c\u306d\u306e\u306f\u3072\u3075\u3078\u307b\u307e\u307f\u3080\u3081\u3082\u3084\u3086\u3088\u3089\u308a\u308b\u308c\u308d\u308f\u3090\u3091\u3092\u3093");case 24:return Io(e,"\u3044\u308d\u306f\u306b\u307b\u3078\u3068\u3061\u308a\u306c\u308b\u3092\u308f\u304b\u3088\u305f\u308c\u305d\u3064\u306d\u306a\u3089\u3080\u3046\u3090\u306e\u304a\u304f\u3084\u307e\u3051\u3075\u3053\u3048\u3066\u3042\u3055\u304d\u3086\u3081\u307f\u3057\u3091\u3072\u3082\u305b\u3059");case 27:return Do(e,3302,3311,!0,r);case 28:return Io(e,"\u30a2\u30a4\u30a6\u30a8\u30aa\u30ab\u30ad\u30af\u30b1\u30b3\u30b5\u30b7\u30b9\u30bb\u30bd\u30bf\u30c1\u30c4\u30c6\u30c8\u30ca\u30cb\u30cc\u30cd\u30ce\u30cf\u30d2\u30d5\u30d8\u30db\u30de\u30df\u30e0\u30e1\u30e2\u30e4\u30e6\u30e8\u30e9\u30ea\u30eb\u30ec\u30ed\u30ef\u30f0\u30f1\u30f2\u30f3",i);case 29:return Io(e,"\u30a4\u30ed\u30cf\u30cb\u30db\u30d8\u30c8\u30c1\u30ea\u30cc\u30eb\u30f2\u30ef\u30ab\u30e8\u30bf\u30ec\u30bd\u30c4\u30cd\u30ca\u30e9\u30e0\u30a6\u30f0\u30ce\u30aa\u30af\u30e4\u30de\u30b1\u30d5\u30b3\u30a8\u30c6\u30a2\u30b5\u30ad\u30e6\u30e1\u30df\u30b7\u30f1\u30d2\u30e2\u30bb\u30b9",i);case 34:return Do(e,3792,3801,!0,r);case 37:return Do(e,6160,6169,!0,r);case 38:return Do(e,4160,4169,!0,r);case 39:return Do(e,2918,2927,!0,r);case 40:return Do(e,1776,1785,!0,r);case 43:return Do(e,3046,3055,!0,r);case 44:return Do(e,3174,3183,!0,r);case 45:return Do(e,3664,3673,!0,r);case 46:return Do(e,3872,3881,!0,r);default:return Do(e,48,57,!0,r)}},jo="data-html2canvas-ignore",Xo=function(){function e(e,t,n){if(this.context=e,this.options=n,this.scrolledElements=[],this.referenceElement=t,this.counters=new Uo,this.quoteDepth=0,!t.ownerDocument)throw new Error("Cloned element does not have an owner document");this.documentElement=this.cloneNode(t.ownerDocument.documentElement,!1)}return e.prototype.toIFrame=function(e,t){var n=this,A=Yo(e,t);if(!A.contentWindow)return Promise.reject("Unable to find iframe window");var a=e.defaultView.pageXOffset,o=e.defaultView.pageYOffset,s=A.contentWindow,l=s.document,u=$o(A).then((function(){return r(n,void 0,void 0,(function(){var e,n;return i(this,(function(r){switch(r.label){case 0:return this.scrolledElements.forEach(is),s&&(s.scrollTo(t.left,t.top),!/(iPad|iPhone|iPod)/g.test(navigator.userAgent)||s.scrollY===t.top&&s.scrollX===t.left||(this.context.logger.warn("Unable to restore scroll position for cloned document"),this.context.windowBounds=this.context.windowBounds.add(s.scrollX-t.left,s.scrollY-t.top,0,0))),e=this.options.onclone,"undefined"===typeof(n=this.clonedReferenceElement)?[2,Promise.reject("Error finding the "+this.referenceElement.nodeName+" in the cloned document")]:l.fonts&&l.fonts.ready?[4,l.fonts.ready]:[3,2];case 1:r.sent(),r.label=2;case 2:return/(AppleWebKit)/g.test(navigator.userAgent)?[4,Zo(l)]:[3,4];case 3:r.sent(),r.label=4;case 4:return"function"===typeof e?[2,Promise.resolve().then((function(){return e(l,n)})).then((function(){return A}))]:[2,A]}}))}))}));return l.open(),l.write(ns(document.doctype)+""),rs(this.referenceElement.ownerDocument,a,o),l.replaceChild(l.adoptNode(this.documentElement),l.documentElement),l.close(),u},e.prototype.createElementClone=function(e){if(BA(e,2),vo(e))return this.createCanvasClone(e);if(yo(e))return this.createVideoClone(e);if(_o(e))return this.createStyleClone(e);var t=e.cloneNode(!1);return wo(t)&&(wo(e)&&e.currentSrc&&e.currentSrc!==e.src&&(t.src=e.currentSrc,t.srcset=""),"lazy"===t.loading&&(t.loading="eager")),Eo(t)?this.createCustomElementClone(t):t},e.prototype.createCustomElementClone=function(e){var t=document.createElement("html2canvascustomelement");return ts(e.style,t),t},e.prototype.createStyleClone=function(e){try{var t=e.sheet;if(t&&t.cssRules){var n=[].slice.call(t.cssRules,0).reduce((function(e,t){return t&&"string"===typeof t.cssText?e+t.cssText:e}),""),r=e.cloneNode(!1);return r.textContent=n,r}}catch(Rt){if(this.context.logger.error("Unable to access cssRules property",Rt),"SecurityError"!==Rt.name)throw Rt}return e.cloneNode(!1)},e.prototype.createCanvasClone=function(e){var t;if(this.options.inlineImages&&e.ownerDocument){var n=e.ownerDocument.createElement("img");try{return n.src=e.toDataURL(),n}catch(Rt){this.context.logger.info("Unable to inline canvas contents, canvas is tainted",e)}}var r=e.cloneNode(!1);try{r.width=e.width,r.height=e.height;var i=e.getContext("2d"),A=r.getContext("2d");if(A)if(!this.options.allowTaint&&i)A.putImageData(i.getImageData(0,0,e.width,e.height),0,0);else{var a=null!==(t=e.getContext("webgl2"))&&void 0!==t?t:e.getContext("webgl");if(a){var o=a.getContextAttributes();!1===(null===o||void 0===o?void 0:o.preserveDrawingBuffer)&&this.context.logger.warn("Unable to clone WebGL context as it has preserveDrawingBuffer=false",e)}A.drawImage(e,0,0)}return r}catch(Rt){this.context.logger.info("Unable to clone canvas as it is tainted",e)}return r},e.prototype.createVideoClone=function(e){var t=e.ownerDocument.createElement("canvas");t.width=e.offsetWidth,t.height=e.offsetHeight;var n=t.getContext("2d");try{return n&&(n.drawImage(e,0,0,t.width,t.height),this.options.allowTaint||n.getImageData(0,0,t.width,t.height)),t}catch(Rt){this.context.logger.info("Unable to clone video as it is tainted",e)}var r=e.ownerDocument.createElement("canvas");return r.width=e.offsetWidth,r.height=e.offsetHeight,r},e.prototype.appendChildNode=function(e,t,n){so(t)&&(bo(t)||t.hasAttribute(jo)||"function"===typeof this.options.ignoreElements&&this.options.ignoreElements(t))||this.options.copyStyles&&so(t)&&_o(t)||e.appendChild(this.cloneNode(t,n))},e.prototype.cloneChildNodes=function(e,t,n){for(var r=this,i=e.shadowRoot?e.shadowRoot.firstChild:e.firstChild;i;i=i.nextSibling)if(so(i)&&So(i)&&"function"===typeof i.assignedNodes){var A=i.assignedNodes();A.length&&A.forEach((function(e){return r.appendChildNode(t,e,n)}))}else this.appendChildNode(t,i,n)},e.prototype.cloneNode=function(e,t){if(oo(e))return document.createTextNode(e.data);if(!e.ownerDocument)return e.cloneNode(!1);var n=e.ownerDocument.defaultView;if(n&&so(e)&&(lo(e)||uo(e))){var r=this.createElementClone(e);r.style.transitionProperty="none";var i=n.getComputedStyle(e),A=n.getComputedStyle(e,":before"),a=n.getComputedStyle(e,":after");this.referenceElement===e&&lo(r)&&(this.clonedReferenceElement=r),mo(r)&&us(r);var o=this.counters.parse(new mA(this.context,i)),s=this.resolvePseudoContent(e,r,A,KA.BEFORE);Eo(e)&&(t=!0),yo(e)||this.cloneChildNodes(e,r,t),s&&r.insertBefore(s,r.firstChild);var l=this.resolvePseudoContent(e,r,a,KA.AFTER);return l&&r.appendChild(l),this.counters.pop(o),(i&&(this.options.copyStyles||uo(e))&&!Bo(e)||t)&&ts(i,r),0===e.scrollTop&&0===e.scrollLeft||this.scrolledElements.push([r,e.scrollLeft,e.scrollTop]),(xo(e)||Co(e))&&(xo(r)||Co(r))&&(r.value=e.value),r}return e.cloneNode(!1)},e.prototype.resolvePseudoContent=function(e,t,n,r){var i=this;if(n){var A=n.content,a=t.ownerDocument;if(a&&A&&"none"!==A&&"-moz-alt-content"!==A&&"none"!==n.display){this.counters.parse(new mA(this.context,n));var o=new gA(this.context,n),s=a.createElement("html2canvaspseudoelement");ts(n,s),o.content.forEach((function(t){if(0===t.type)s.appendChild(a.createTextNode(t.value));else if(22===t.type){var n=a.createElement("img");n.src=t.value,n.style.opacity="1",s.appendChild(n)}else if(18===t.type){if("attr"===t.name){var r=t.values.filter(Qn);r.length&&s.appendChild(a.createTextNode(e.getAttribute(r[0].value)||""))}else if("counter"===t.name){var A=t.values.filter(Rn),l=A[0],u=A[1];if(l&&Qn(l)){var c=i.counters.getCounterValue(l.value),d=u&&Qn(u)?xi.parse(i.context,u.value):3;s.appendChild(a.createTextNode(Wo(c,d,!1)))}}else if("counters"===t.name){var h=t.values.filter(Rn),f=(l=h[0],h[1]);if(u=h[2],l&&Qn(l)){var p=i.counters.getCounterValues(l.value),g=u&&Qn(u)?xi.parse(i.context,u.value):3,m=f&&0===f.type?f.value:"",v=p.map((function(e){return Wo(e,g,!1)})).join(m);s.appendChild(a.createTextNode(v))}}}else if(20===t.type)switch(t.value){case"open-quote":s.appendChild(a.createTextNode(uA(o.quotes,i.quoteDepth++,!0)));break;case"close-quote":s.appendChild(a.createTextNode(uA(o.quotes,--i.quoteDepth,!1)));break;default:s.appendChild(a.createTextNode(t.value))}})),s.className=os+" "+ss;var l=r===KA.BEFORE?" "+os:" "+ss;return uo(t)?t.className.baseValue+=l:t.className+=l,s}}},e.destroy=function(e){return!!e.parentNode&&(e.parentNode.removeChild(e),!0)},e}();!function(e){e[e.BEFORE=0]="BEFORE",e[e.AFTER=1]="AFTER"}(KA||(KA={}));var qo,Yo=function(e,t){var n=e.createElement("iframe");return n.className="html2canvas-container",n.style.visibility="hidden",n.style.position="fixed",n.style.left="-10000px",n.style.top="0px",n.style.border="0",n.width=t.width.toString(),n.height=t.height.toString(),n.scrolling="no",n.setAttribute(jo,"true"),e.body.appendChild(n),n},Jo=function(e){return new Promise((function(t){e.complete?t():e.src?(e.onload=t,e.onerror=t):t()}))},Zo=function(e){return Promise.all([].slice.call(e.images,0).map(Jo))},$o=function(e){return new Promise((function(t,n){var r=e.contentWindow;if(!r)return n("No window assigned for iframe");var i=r.document;r.onload=e.onload=function(){r.onload=e.onload=null;var n=setInterval((function(){i.body.childNodes.length>0&&"complete"===i.readyState&&(clearInterval(n),t(e))}),50)}}))},es=["all","d","content"],ts=function(e,t){for(var n=e.length-1;n>=0;n--){var r=e.item(n);-1===es.indexOf(r)&&t.style.setProperty(r,e.getPropertyValue(r))}return t},ns=function(e){var t="";return e&&(t+=""),t},rs=function(e,t,n){e&&e.defaultView&&(t!==e.defaultView.pageXOffset||n!==e.defaultView.pageYOffset)&&e.defaultView.scrollTo(t,n)},is=function(e){var t=e[0],n=e[1],r=e[2];t.scrollLeft=n,t.scrollTop=r},As=":before",as=":after",os="___html2canvas___pseudoelement_before",ss="___html2canvas___pseudoelement_after",ls='{\n content: "" !important;\n display: none !important;\n}',us=function(e){cs(e,"."+os+As+ls+"\n ."+ss+as+ls)},cs=function(e,t){var n=e.ownerDocument;if(n){var r=n.createElement("style");r.textContent=t,e.appendChild(r)}},ds=function(){function e(){}return e.getOrigin=function(t){var n=e._link;return n?(n.href=t,n.href=n.href,n.protocol+n.hostname+n.port):"about:blank"},e.isSameOrigin=function(t){return e.getOrigin(t)===e._origin},e.setContext=function(t){e._link=t.document.createElement("a"),e._origin=e.getOrigin(t.location.href)},e._origin="about:blank",e}(),hs=function(){function e(e,t){this.context=e,this._options=t,this._cache={}}return e.prototype.addImage=function(e){var t=Promise.resolve();return this.has(e)?t:ws(e)||ms(e)?((this._cache[e]=this.loadImage(e)).catch((function(){})),t):t},e.prototype.match=function(e){return this._cache[e]},e.prototype.loadImage=function(e){return r(this,void 0,void 0,(function(){var t,n,r,A,a=this;return i(this,(function(i){switch(i.label){case 0:return t=ds.isSameOrigin(e),n=!vs(e)&&!0===this._options.useCORS&&xa.SUPPORT_CORS_IMAGES&&!t,r=!vs(e)&&!t&&!ws(e)&&"string"===typeof this._options.proxy&&xa.SUPPORT_CORS_XHR&&!n,t||!1!==this._options.allowTaint||vs(e)||ws(e)||r||n?(A=e,r?[4,this.proxy(A)]:[3,2]):[2];case 1:A=i.sent(),i.label=2;case 2:return this.context.logger.debug("Added image "+e.substring(0,256)),[4,new Promise((function(e,t){var r=new Image;r.onload=function(){return e(r)},r.onerror=t,(ys(A)||n)&&(r.crossOrigin="anonymous"),r.src=A,!0===r.complete&&setTimeout((function(){return e(r)}),500),a._options.imageTimeout>0&&setTimeout((function(){return t("Timed out ("+a._options.imageTimeout+"ms) loading image")}),a._options.imageTimeout)}))];case 3:return[2,i.sent()]}}))}))},e.prototype.has=function(e){return"undefined"!==typeof this._cache[e]},e.prototype.keys=function(){return Promise.resolve(Object.keys(this._cache))},e.prototype.proxy=function(e){var t=this,n=this._options.proxy;if(!n)throw new Error("No proxy defined");var r=e.substring(0,256);return new Promise((function(i,A){var a=xa.SUPPORT_RESPONSE_TYPE?"blob":"text",o=new XMLHttpRequest;o.onload=function(){if(200===o.status)if("text"===a)i(o.response);else{var e=new FileReader;e.addEventListener("load",(function(){return i(e.result)}),!1),e.addEventListener("error",(function(e){return A(e)}),!1),e.readAsDataURL(o.response)}else A("Failed to proxy resource "+r+" with status code "+o.status)},o.onerror=A;var s=n.indexOf("?")>-1?"&":"?";if(o.open("GET",""+n+s+"url="+encodeURIComponent(e)+"&responseType="+a),"text"!==a&&o instanceof XMLHttpRequest&&(o.responseType=a),t._options.imageTimeout){var l=t._options.imageTimeout;o.timeout=l,o.ontimeout=function(){return A("Timed out ("+l+"ms) proxying "+r)}}o.send()}))},e}(),fs=/^data:image\/svg\+xml/i,ps=/^data:image\/.*;base64,/i,gs=/^data:image\/.*/i,ms=function(e){return xa.SUPPORT_SVG_DRAWING||!Bs(e)},vs=function(e){return gs.test(e)},ys=function(e){return ps.test(e)},ws=function(e){return"blob"===e.substr(0,4)},Bs=function(e){return"svg"===e.substr(-3).toLowerCase()||fs.test(e)},_s=function(){function e(e,t){this.type=0,this.x=e,this.y=t}return e.prototype.add=function(t,n){return new e(this.x+t,this.y+n)},e}(),bs=function(e,t,n){return new _s(e.x+(t.x-e.x)*n,e.y+(t.y-e.y)*n)},xs=function(){function e(e,t,n,r){this.type=1,this.start=e,this.startControl=t,this.endControl=n,this.end=r}return e.prototype.subdivide=function(t,n){var r=bs(this.start,this.startControl,t),i=bs(this.startControl,this.endControl,t),A=bs(this.endControl,this.end,t),a=bs(r,i,t),o=bs(i,A,t),s=bs(a,o,t);return n?new e(this.start,r,a,s):new e(s,o,A,this.end)},e.prototype.add=function(t,n){return new e(this.start.add(t,n),this.startControl.add(t,n),this.endControl.add(t,n),this.end.add(t,n))},e.prototype.reverse=function(){return new e(this.end,this.endControl,this.startControl,this.start)},e}(),Cs=function(e){return 1===e.type},Ss=function(){function e(e){var t=e.styles,n=e.bounds,r=Wn(t.borderTopLeftRadius,n.width,n.height),i=r[0],A=r[1],a=Wn(t.borderTopRightRadius,n.width,n.height),o=a[0],s=a[1],l=Wn(t.borderBottomRightRadius,n.width,n.height),u=l[0],c=l[1],d=Wn(t.borderBottomLeftRadius,n.width,n.height),h=d[0],f=d[1],p=[];p.push((i+o)/n.width),p.push((h+u)/n.width),p.push((A+f)/n.height),p.push((s+c)/n.height);var g=Math.max.apply(Math,p);g>1&&(i/=g,A/=g,o/=g,s/=g,u/=g,c/=g,h/=g,f/=g);var m=n.width-o,v=n.height-c,y=n.width-u,w=n.height-f,B=t.borderTopWidth,_=t.borderRightWidth,b=t.borderBottomWidth,x=t.borderLeftWidth,C=jn(t.paddingTop,e.bounds.width),S=jn(t.paddingRight,e.bounds.width),E=jn(t.paddingBottom,e.bounds.width),U=jn(t.paddingLeft,e.bounds.width);this.topLeftBorderDoubleOuterBox=i>0||A>0?Es(n.left+x/3,n.top+B/3,i-x/3,A-B/3,qo.TOP_LEFT):new _s(n.left+x/3,n.top+B/3),this.topRightBorderDoubleOuterBox=i>0||A>0?Es(n.left+m,n.top+B/3,o-_/3,s-B/3,qo.TOP_RIGHT):new _s(n.left+n.width-_/3,n.top+B/3),this.bottomRightBorderDoubleOuterBox=u>0||c>0?Es(n.left+y,n.top+v,u-_/3,c-b/3,qo.BOTTOM_RIGHT):new _s(n.left+n.width-_/3,n.top+n.height-b/3),this.bottomLeftBorderDoubleOuterBox=h>0||f>0?Es(n.left+x/3,n.top+w,h-x/3,f-b/3,qo.BOTTOM_LEFT):new _s(n.left+x/3,n.top+n.height-b/3),this.topLeftBorderDoubleInnerBox=i>0||A>0?Es(n.left+2*x/3,n.top+2*B/3,i-2*x/3,A-2*B/3,qo.TOP_LEFT):new _s(n.left+2*x/3,n.top+2*B/3),this.topRightBorderDoubleInnerBox=i>0||A>0?Es(n.left+m,n.top+2*B/3,o-2*_/3,s-2*B/3,qo.TOP_RIGHT):new _s(n.left+n.width-2*_/3,n.top+2*B/3),this.bottomRightBorderDoubleInnerBox=u>0||c>0?Es(n.left+y,n.top+v,u-2*_/3,c-2*b/3,qo.BOTTOM_RIGHT):new _s(n.left+n.width-2*_/3,n.top+n.height-2*b/3),this.bottomLeftBorderDoubleInnerBox=h>0||f>0?Es(n.left+2*x/3,n.top+w,h-2*x/3,f-2*b/3,qo.BOTTOM_LEFT):new _s(n.left+2*x/3,n.top+n.height-2*b/3),this.topLeftBorderStroke=i>0||A>0?Es(n.left+x/2,n.top+B/2,i-x/2,A-B/2,qo.TOP_LEFT):new _s(n.left+x/2,n.top+B/2),this.topRightBorderStroke=i>0||A>0?Es(n.left+m,n.top+B/2,o-_/2,s-B/2,qo.TOP_RIGHT):new _s(n.left+n.width-_/2,n.top+B/2),this.bottomRightBorderStroke=u>0||c>0?Es(n.left+y,n.top+v,u-_/2,c-b/2,qo.BOTTOM_RIGHT):new _s(n.left+n.width-_/2,n.top+n.height-b/2),this.bottomLeftBorderStroke=h>0||f>0?Es(n.left+x/2,n.top+w,h-x/2,f-b/2,qo.BOTTOM_LEFT):new _s(n.left+x/2,n.top+n.height-b/2),this.topLeftBorderBox=i>0||A>0?Es(n.left,n.top,i,A,qo.TOP_LEFT):new _s(n.left,n.top),this.topRightBorderBox=o>0||s>0?Es(n.left+m,n.top,o,s,qo.TOP_RIGHT):new _s(n.left+n.width,n.top),this.bottomRightBorderBox=u>0||c>0?Es(n.left+y,n.top+v,u,c,qo.BOTTOM_RIGHT):new _s(n.left+n.width,n.top+n.height),this.bottomLeftBorderBox=h>0||f>0?Es(n.left,n.top+w,h,f,qo.BOTTOM_LEFT):new _s(n.left,n.top+n.height),this.topLeftPaddingBox=i>0||A>0?Es(n.left+x,n.top+B,Math.max(0,i-x),Math.max(0,A-B),qo.TOP_LEFT):new _s(n.left+x,n.top+B),this.topRightPaddingBox=o>0||s>0?Es(n.left+Math.min(m,n.width-_),n.top+B,m>n.width+_?0:Math.max(0,o-_),Math.max(0,s-B),qo.TOP_RIGHT):new _s(n.left+n.width-_,n.top+B),this.bottomRightPaddingBox=u>0||c>0?Es(n.left+Math.min(y,n.width-x),n.top+Math.min(v,n.height-b),Math.max(0,u-_),Math.max(0,c-b),qo.BOTTOM_RIGHT):new _s(n.left+n.width-_,n.top+n.height-b),this.bottomLeftPaddingBox=h>0||f>0?Es(n.left+x,n.top+Math.min(w,n.height-b),Math.max(0,h-x),Math.max(0,f-b),qo.BOTTOM_LEFT):new _s(n.left+x,n.top+n.height-b),this.topLeftContentBox=i>0||A>0?Es(n.left+x+U,n.top+B+C,Math.max(0,i-(x+U)),Math.max(0,A-(B+C)),qo.TOP_LEFT):new _s(n.left+x+U,n.top+B+C),this.topRightContentBox=o>0||s>0?Es(n.left+Math.min(m,n.width+x+U),n.top+B+C,m>n.width+x+U?0:o-x+U,s-(B+C),qo.TOP_RIGHT):new _s(n.left+n.width-(_+S),n.top+B+C),this.bottomRightContentBox=u>0||c>0?Es(n.left+Math.min(y,n.width-(x+U)),n.top+Math.min(v,n.height+B+C),Math.max(0,u-(_+S)),c-(b+E),qo.BOTTOM_RIGHT):new _s(n.left+n.width-(_+S),n.top+n.height-(b+E)),this.bottomLeftContentBox=h>0||f>0?Es(n.left+x+U,n.top+w,Math.max(0,h-(x+U)),f-(b+E),qo.BOTTOM_LEFT):new _s(n.left+x+U,n.top+n.height-(b+E))}return e}();!function(e){e[e.TOP_LEFT=0]="TOP_LEFT",e[e.TOP_RIGHT=1]="TOP_RIGHT",e[e.BOTTOM_RIGHT=2]="BOTTOM_RIGHT",e[e.BOTTOM_LEFT=3]="BOTTOM_LEFT"}(qo||(qo={}));var Es=function(e,t,n,r,i){var A=(Math.sqrt(2)-1)/3*4,a=n*A,o=r*A,s=e+n,l=t+r;switch(i){case qo.TOP_LEFT:return new xs(new _s(e,l),new _s(e,l-o),new _s(s-a,t),new _s(s,t));case qo.TOP_RIGHT:return new xs(new _s(e,t),new _s(e+a,t),new _s(s,l-o),new _s(s,l));case qo.BOTTOM_RIGHT:return new xs(new _s(s,t),new _s(s,t+o),new _s(e+a,l),new _s(e,l));case qo.BOTTOM_LEFT:default:return new xs(new _s(s,l),new _s(s-a,l),new _s(e,t+o),new _s(e,t))}},Us=function(e){return[e.topLeftBorderBox,e.topRightBorderBox,e.bottomRightBorderBox,e.bottomLeftBorderBox]},Ms=function(e){return[e.topLeftContentBox,e.topRightContentBox,e.bottomRightContentBox,e.bottomLeftContentBox]},Fs=function(e){return[e.topLeftPaddingBox,e.topRightPaddingBox,e.bottomRightPaddingBox,e.bottomLeftPaddingBox]},Ts=function(){function e(e,t,n){this.offsetX=e,this.offsetY=t,this.matrix=n,this.type=0,this.target=6}return e}(),ks=function(){function e(e,t){this.path=e,this.target=t,this.type=1}return e}(),Qs=function(){function e(e){this.opacity=e,this.type=2,this.target=6}return e}(),Ls=function(e){return 0===e.type},Ds=function(e){return 1===e.type},Is=function(e){return 2===e.type},Rs=function(e,t){return e.length===t.length&&e.some((function(e,n){return e===t[n]}))},Hs=function(e,t,n,r,i){return e.map((function(e,A){switch(A){case 0:return e.add(t,n);case 1:return e.add(t+r,n);case 2:return e.add(t+r,n+i);case 3:return e.add(t,n+i)}return e}))},Ps=function(){function e(e){this.element=e,this.inlineLevel=[],this.nonInlineLevel=[],this.negativeZIndex=[],this.zeroOrAutoZIndexOrTransformedOrOpacity=[],this.positiveZIndex=[],this.nonPositionedFloats=[],this.nonPositionedInlineLevel=[]}return e}(),Ns=function(){function e(e,t){if(this.container=e,this.parent=t,this.effects=[],this.curves=new Ss(this.container),this.container.styles.opacity<1&&this.effects.push(new Qs(this.container.styles.opacity)),null!==this.container.styles.transform){var n=this.container.bounds.left+this.container.styles.transformOrigin[0].number,r=this.container.bounds.top+this.container.styles.transformOrigin[1].number,i=this.container.styles.transform;this.effects.push(new Ts(n,r,i))}if(0!==this.container.styles.overflowX){var A=Us(this.curves),a=Fs(this.curves);Rs(A,a)?this.effects.push(new ks(A,6)):(this.effects.push(new ks(A,2)),this.effects.push(new ks(a,4)))}}return e.prototype.getEffects=function(e){for(var t=-1===[2,3].indexOf(this.container.styles.position),n=this.parent,r=this.effects.slice(0);n;){var i=n.effects.filter((function(e){return!Ds(e)}));if(t||0!==n.container.styles.position||!n.parent){if(r.unshift.apply(r,i),t=-1===[2,3].indexOf(n.container.styles.position),0!==n.container.styles.overflowX){var A=Us(n.curves),a=Fs(n.curves);Rs(A,a)||r.unshift(new ks(a,6))}}else r.unshift.apply(r,i);n=n.parent}return r.filter((function(t){return iA(t.target,e)}))},e}(),Os=function e(t,n,r,i){t.container.elements.forEach((function(A){var a=iA(A.flags,4),o=iA(A.flags,2),s=new Ns(A,t);iA(A.styles.display,2048)&&i.push(s);var l=iA(A.flags,8)?[]:i;if(a||o){var u=a||A.styles.isPositioned()?r:n,c=new Ps(s);if(A.styles.isPositioned()||A.styles.opacity<1||A.styles.isTransformed()){var d=A.styles.zIndex.order;if(d<0){var h=0;u.negativeZIndex.some((function(e,t){return d>e.element.container.styles.zIndex.order?(h=t,!1):h>0})),u.negativeZIndex.splice(h,0,c)}else if(d>0){var f=0;u.positiveZIndex.some((function(e,t){return d>=e.element.container.styles.zIndex.order?(f=t+1,!1):f>0})),u.positiveZIndex.splice(f,0,c)}else u.zeroOrAutoZIndexOrTransformedOrOpacity.push(c)}else A.styles.isFloating()?u.nonPositionedFloats.push(c):u.nonPositionedInlineLevel.push(c);e(s,c,a?c:r,l)}else A.styles.isInlineLevel()?n.inlineLevel.push(s):n.nonInlineLevel.push(s),e(s,n,r,l);iA(A.flags,8)&&Vs(A,l)}))},Vs=function(e,t){for(var n=e instanceof Va?e.start:1,r=e instanceof Va&&e.reversed,i=0;i0&&e.intrinsicHeight>0){var r=Js(e),i=Fs(t);this.path(i),this.ctx.save(),this.ctx.clip(),this.ctx.drawImage(n,0,0,e.intrinsicWidth,e.intrinsicHeight,r.left,r.top,r.width,r.height),this.ctx.restore()}},n.prototype.renderNodeContent=function(e){return r(this,void 0,void 0,(function(){var t,r,A,o,s,l,u,c,d,h,f,p,g,m,v,y,w,B;return i(this,(function(i){switch(i.label){case 0:this.applyEffects(e.getEffects(4)),t=e.container,r=e.curves,A=t.styles,o=0,s=t.textNodes,i.label=1;case 1:return o0&&x>0&&(v=r.ctx.createPattern(p,"repeat"),r.renderRepeat(w,v,S,E))):Qr(n)&&(y=el(e,t,[null,null,null]),w=y[0],B=y[1],_=y[2],b=y[3],x=y[4],C=0===n.position.length?[Gn]:n.position,S=jn(C[0],b),E=jn(C[C.length-1],x),U=Br(n,S,E,b,x),M=U[0],F=U[1],M>0&&F>0&&(T=r.ctx.createRadialGradient(B+S,_+E,0,B+S,_+E,M),gr(n.stops,2*M).forEach((function(e){return T.addColorStop(e.stop,ir(e.color))})),r.path(w),r.ctx.fillStyle=T,M!==F?(k=e.bounds.left+.5*e.bounds.width,Q=e.bounds.top+.5*e.bounds.height,D=1/(L=F/M),r.ctx.save(),r.ctx.translate(k,Q),r.ctx.transform(1,0,0,L,0,0),r.ctx.translate(-k,-Q),r.ctx.fillRect(B,D*(_-Q)+Q,b,x*D),r.ctx.restore()):r.ctx.fill())),i.label=6;case 6:return t--,[2]}}))},r=this,A=0,a=e.styles.backgroundImage.slice(0).reverse(),s.label=1;case 1:return A0?2!==l.style?[3,5]:[4,this.renderDashedDottedBorder(l.color,l.width,a,e.curves,2)]:[3,11]:[3,13];case 4:return i.sent(),[3,11];case 5:return 3!==l.style?[3,7]:[4,this.renderDashedDottedBorder(l.color,l.width,a,e.curves,3)];case 6:return i.sent(),[3,11];case 7:return 4!==l.style?[3,9]:[4,this.renderDoubleBorder(l.color,l.width,a,e.curves)];case 8:return i.sent(),[3,11];case 9:return[4,this.renderSolidBorder(l.color,a,e.curves)];case 10:i.sent(),i.label=11;case 11:a++,i.label=12;case 12:return o++,[3,3];case 13:return[2]}}))}))},n.prototype.renderDashedDottedBorder=function(e,t,n,A,a){return r(this,void 0,void 0,(function(){var r,o,s,l,u,c,d,h,f,p,g,m,v,y,w,B;return i(this,(function(i){return this.ctx.save(),r=js(A,n),o=Gs(A,n),2===a&&(this.path(o),this.ctx.clip()),Cs(o[0])?(s=o[0].start.x,l=o[0].start.y):(s=o[0].x,l=o[0].y),Cs(o[1])?(u=o[1].end.x,c=o[1].end.y):(u=o[1].x,c=o[1].y),d=0===n||2===n?Math.abs(s-u):Math.abs(l-c),this.ctx.beginPath(),3===a?this.formatPath(r):this.formatPath(o.slice(0,2)),h=t<3?3*t:2*t,f=t<3?2*t:t,3===a&&(h=t,f=t),p=!0,d<=2*h?p=!1:d<=2*h+f?(h*=g=d/(2*h+f),f*=g):(m=Math.floor((d+f)/(h+f)),v=(d-m*h)/(m-1),f=(y=(d-(m+1)*h)/m)<=0||Math.abs(f-v)