Commit
·
64cebd0
1
Parent(s):
af9fc6f
Update parquet files (step 93 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/__init__.py +0 -103
- spaces/1368565466ki/Satdia/attentions.py +0 -300
- spaces/1368565466ki/ZSTRD/monotonic_align/__init__.py +0 -20
- spaces/17TheWord/RealESRGAN/app.py +0 -68
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cedar Noise Reduction Plugin.md +0 -7
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 2013 in Minutes with This Simple Trick.md +0 -40
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Estadistica para administracion levine berenson pdf solucionario Todo lo que necesitas saber sobre analisis de datos y toma de decisiones.md +0 -102
- spaces/1gistliPinn/ChatGPT4/Examples/2021 Free Spokeo Login And Password.md +0 -28
- spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate v1.5.4 MOD APK Hack the Latest Version of the Realistic Bus Simulation Game.md +0 -94
- spaces/1phancelerku/anime-remove-background/Download YouTube 4.0 APK for Android - Watch Videos Offline.md +0 -158
- spaces/232labs/VToonify/vtoonify/model/raft/evaluate.py +0 -197
- spaces/42digital/DeepFashion_Classification/app.py +0 -31
- spaces/52Hz/SUNet_AWGN_denoising/model/SUNet.py +0 -30
- spaces/7hao/bingo/src/state/index.ts +0 -118
- spaces/AFlac199/openai-reverse-proxy/README.md +0 -10
- spaces/AIConsultant/MusicGen/audiocraft/utils/samples/manager.py +0 -386
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/os_utils.py +0 -20
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py +0 -102
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/$types.d.ts +0 -8
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.js +0 -13
- spaces/AiMimicry/sovits-models/vdecoder/__init__.py +0 -0
- spaces/AlekseyKorshuk/gai-project/app.py +0 -13
- spaces/AlekseyKorshuk/model-evaluation/models/base.py +0 -51
- spaces/Alfasign/diffusers-gallery/README.md +0 -14
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_flax.py +0 -573
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py +0 -28
- spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_fpn.py +0 -120
- spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py +0 -6
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py +0 -9
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/model-card.md +0 -59
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_train.py +0 -83
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py +0 -76
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/attention.py +0 -341
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/__init__.py +0 -120
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/box.py +0 -517
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/setopt.py +0 -149
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md +0 -48
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet2_docs/MODEL_ZOO.md +0 -73
- spaces/Ayushnangia/Whispercpp_yt/app.py +0 -81
- spaces/AzinZ/vitscn/attentions.py +0 -303
- spaces/Benson/text-generation/Examples/9apps 2018.md +0 -97
- spaces/Benson/text-generation/Examples/Blockman Go Apkmody.md +0 -100
- spaces/Benson/text-generation/Examples/Descargar Etiqueta Despus De La Escuela Apk Mod.md +0 -75
- spaces/Bianca0930/Bianca/README.md +0 -12
- spaces/Boadiwaa/Recipes/openai/api_resources/embedding.py +0 -58
- spaces/CNXT/PiX2TXT/README.md +0 -12
- spaces/CVH-vn1210/make_hair/app.py +0 -158
- spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/detection_features_converter.py +0 -161
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/count.h +0 -23
spaces/101-5/gpt4free/g4f/.v1/gpt4free/__init__.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
from enum import Enum
|
2 |
-
|
3 |
-
from gpt4free import forefront
|
4 |
-
from gpt4free import quora
|
5 |
-
from gpt4free import theb
|
6 |
-
from gpt4free import usesless
|
7 |
-
from gpt4free import you
|
8 |
-
from gpt4free import aicolors
|
9 |
-
from gpt4free import deepai
|
10 |
-
|
11 |
-
|
12 |
-
class Provider(Enum):
|
13 |
-
"""An enum representing different providers."""
|
14 |
-
|
15 |
-
You = "you"
|
16 |
-
Poe = "poe"
|
17 |
-
ForeFront = "fore_front"
|
18 |
-
Theb = "theb"
|
19 |
-
UseLess = "useless"
|
20 |
-
AiColors = "ai_colors"
|
21 |
-
DeepAI = "deepai"
|
22 |
-
|
23 |
-
|
24 |
-
class Completion:
|
25 |
-
"""This class will be used for invoking the given provider"""
|
26 |
-
|
27 |
-
@staticmethod
|
28 |
-
def create(provider: Provider, prompt: str, **kwargs) -> str:
|
29 |
-
"""
|
30 |
-
Invokes the given provider with given prompt and addition arguments and returns the string response
|
31 |
-
|
32 |
-
:param provider: an enum representing the provider to use while invoking
|
33 |
-
:param prompt: input provided by the user
|
34 |
-
:param kwargs: Additional keyword arguments to pass to the provider while invoking
|
35 |
-
:return: A string representing the response from the provider
|
36 |
-
"""
|
37 |
-
if provider == Provider.Poe:
|
38 |
-
return Completion.__poe_service(prompt, **kwargs)
|
39 |
-
elif provider == Provider.You:
|
40 |
-
return Completion.__you_service(prompt, **kwargs)
|
41 |
-
elif provider == Provider.ForeFront:
|
42 |
-
return Completion.__fore_front_service(prompt, **kwargs)
|
43 |
-
elif provider == Provider.Theb:
|
44 |
-
return Completion.__theb_service(prompt, **kwargs)
|
45 |
-
elif provider == Provider.UseLess:
|
46 |
-
return Completion.__useless_service(prompt, **kwargs)
|
47 |
-
elif provider == Provider.AiColors:
|
48 |
-
return Completion.__ai_colors_service(prompt, **kwargs)
|
49 |
-
elif provider == Provider.DeepAI:
|
50 |
-
return Completion.__deepai_service(prompt, **kwargs)
|
51 |
-
else:
|
52 |
-
raise Exception("Provider not exist, Please try again")
|
53 |
-
|
54 |
-
@staticmethod
|
55 |
-
def __ai_colors_service(prompt: str):
|
56 |
-
return aicolors.Completion.create(prompt=prompt)
|
57 |
-
|
58 |
-
@staticmethod
|
59 |
-
def __useless_service(prompt: str, **kwargs) -> str:
|
60 |
-
return usesless.Completion.create(prompt=prompt, **kwargs)
|
61 |
-
|
62 |
-
@staticmethod
|
63 |
-
def __you_service(prompt: str, **kwargs) -> str:
|
64 |
-
return you.Completion.create(prompt, **kwargs).text
|
65 |
-
|
66 |
-
@staticmethod
|
67 |
-
def __poe_service(prompt: str, **kwargs) -> str:
|
68 |
-
return quora.Completion.create(prompt=prompt, **kwargs).text
|
69 |
-
|
70 |
-
@staticmethod
|
71 |
-
def __fore_front_service(prompt: str, **kwargs) -> str:
|
72 |
-
return forefront.Completion.create(prompt=prompt, **kwargs).text
|
73 |
-
|
74 |
-
@staticmethod
|
75 |
-
def __theb_service(prompt: str, **kwargs):
|
76 |
-
return "".join(theb.Completion.create(prompt=prompt))
|
77 |
-
|
78 |
-
@staticmethod
|
79 |
-
def __deepai_service(prompt: str, **kwargs):
|
80 |
-
return "".join(deepai.Completion.create(prompt=prompt))
|
81 |
-
|
82 |
-
|
83 |
-
class ChatCompletion:
|
84 |
-
"""This class is used to execute a chat completion for a specified provider"""
|
85 |
-
|
86 |
-
@staticmethod
|
87 |
-
def create(provider: Provider, messages: list, **kwargs) -> str:
|
88 |
-
"""
|
89 |
-
Invokes the given provider with given chat messages and addition arguments and returns the string response
|
90 |
-
|
91 |
-
:param provider: an enum representing the provider to use while invoking
|
92 |
-
:param messages: a list of chat messages, see the OpenAI docs for how to format this (https://platform.openai.com/docs/guides/chat/introduction)
|
93 |
-
:param kwargs: Additional keyword arguments to pass to the provider while invoking
|
94 |
-
:return: A string representing the response from the provider
|
95 |
-
"""
|
96 |
-
if provider == Provider.DeepAI:
|
97 |
-
return ChatCompletion.__deepai_service(messages, **kwargs)
|
98 |
-
else:
|
99 |
-
raise Exception("Provider not exist, Please try again")
|
100 |
-
|
101 |
-
@staticmethod
|
102 |
-
def __deepai_service(messages: list, **kwargs):
|
103 |
-
return "".join(deepai.ChatCompletion.create(messages=messages))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1368565466ki/Satdia/attentions.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
from modules import LayerNorm
|
8 |
-
|
9 |
-
|
10 |
-
class Encoder(nn.Module):
|
11 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.hidden_channels = hidden_channels
|
14 |
-
self.filter_channels = filter_channels
|
15 |
-
self.n_heads = n_heads
|
16 |
-
self.n_layers = n_layers
|
17 |
-
self.kernel_size = kernel_size
|
18 |
-
self.p_dropout = p_dropout
|
19 |
-
self.window_size = window_size
|
20 |
-
|
21 |
-
self.drop = nn.Dropout(p_dropout)
|
22 |
-
self.attn_layers = nn.ModuleList()
|
23 |
-
self.norm_layers_1 = nn.ModuleList()
|
24 |
-
self.ffn_layers = nn.ModuleList()
|
25 |
-
self.norm_layers_2 = nn.ModuleList()
|
26 |
-
for i in range(self.n_layers):
|
27 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
28 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
29 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
30 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
31 |
-
|
32 |
-
def forward(self, x, x_mask):
|
33 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
34 |
-
x = x * x_mask
|
35 |
-
for i in range(self.n_layers):
|
36 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
37 |
-
y = self.drop(y)
|
38 |
-
x = self.norm_layers_1[i](x + y)
|
39 |
-
|
40 |
-
y = self.ffn_layers[i](x, x_mask)
|
41 |
-
y = self.drop(y)
|
42 |
-
x = self.norm_layers_2[i](x + y)
|
43 |
-
x = x * x_mask
|
44 |
-
return x
|
45 |
-
|
46 |
-
|
47 |
-
class Decoder(nn.Module):
|
48 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
49 |
-
super().__init__()
|
50 |
-
self.hidden_channels = hidden_channels
|
51 |
-
self.filter_channels = filter_channels
|
52 |
-
self.n_heads = n_heads
|
53 |
-
self.n_layers = n_layers
|
54 |
-
self.kernel_size = kernel_size
|
55 |
-
self.p_dropout = p_dropout
|
56 |
-
self.proximal_bias = proximal_bias
|
57 |
-
self.proximal_init = proximal_init
|
58 |
-
|
59 |
-
self.drop = nn.Dropout(p_dropout)
|
60 |
-
self.self_attn_layers = nn.ModuleList()
|
61 |
-
self.norm_layers_0 = nn.ModuleList()
|
62 |
-
self.encdec_attn_layers = nn.ModuleList()
|
63 |
-
self.norm_layers_1 = nn.ModuleList()
|
64 |
-
self.ffn_layers = nn.ModuleList()
|
65 |
-
self.norm_layers_2 = nn.ModuleList()
|
66 |
-
for i in range(self.n_layers):
|
67 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
68 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
69 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
70 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
71 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
72 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
73 |
-
|
74 |
-
def forward(self, x, x_mask, h, h_mask):
|
75 |
-
"""
|
76 |
-
x: decoder input
|
77 |
-
h: encoder output
|
78 |
-
"""
|
79 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
80 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
81 |
-
x = x * x_mask
|
82 |
-
for i in range(self.n_layers):
|
83 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
84 |
-
y = self.drop(y)
|
85 |
-
x = self.norm_layers_0[i](x + y)
|
86 |
-
|
87 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
88 |
-
y = self.drop(y)
|
89 |
-
x = self.norm_layers_1[i](x + y)
|
90 |
-
|
91 |
-
y = self.ffn_layers[i](x, x_mask)
|
92 |
-
y = self.drop(y)
|
93 |
-
x = self.norm_layers_2[i](x + y)
|
94 |
-
x = x * x_mask
|
95 |
-
return x
|
96 |
-
|
97 |
-
|
98 |
-
class MultiHeadAttention(nn.Module):
|
99 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
100 |
-
super().__init__()
|
101 |
-
assert channels % n_heads == 0
|
102 |
-
|
103 |
-
self.channels = channels
|
104 |
-
self.out_channels = out_channels
|
105 |
-
self.n_heads = n_heads
|
106 |
-
self.p_dropout = p_dropout
|
107 |
-
self.window_size = window_size
|
108 |
-
self.heads_share = heads_share
|
109 |
-
self.block_length = block_length
|
110 |
-
self.proximal_bias = proximal_bias
|
111 |
-
self.proximal_init = proximal_init
|
112 |
-
self.attn = None
|
113 |
-
|
114 |
-
self.k_channels = channels // n_heads
|
115 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
116 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
117 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
118 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
119 |
-
self.drop = nn.Dropout(p_dropout)
|
120 |
-
|
121 |
-
if window_size is not None:
|
122 |
-
n_heads_rel = 1 if heads_share else n_heads
|
123 |
-
rel_stddev = self.k_channels**-0.5
|
124 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
125 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
126 |
-
|
127 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
128 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
129 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
130 |
-
if proximal_init:
|
131 |
-
with torch.no_grad():
|
132 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
133 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
134 |
-
|
135 |
-
def forward(self, x, c, attn_mask=None):
|
136 |
-
q = self.conv_q(x)
|
137 |
-
k = self.conv_k(c)
|
138 |
-
v = self.conv_v(c)
|
139 |
-
|
140 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
141 |
-
|
142 |
-
x = self.conv_o(x)
|
143 |
-
return x
|
144 |
-
|
145 |
-
def attention(self, query, key, value, mask=None):
|
146 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
147 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
148 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
149 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
150 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
151 |
-
|
152 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
153 |
-
if self.window_size is not None:
|
154 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
155 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
156 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
157 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
158 |
-
scores = scores + scores_local
|
159 |
-
if self.proximal_bias:
|
160 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
161 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
162 |
-
if mask is not None:
|
163 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
164 |
-
if self.block_length is not None:
|
165 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
166 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
167 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
168 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
169 |
-
p_attn = self.drop(p_attn)
|
170 |
-
output = torch.matmul(p_attn, value)
|
171 |
-
if self.window_size is not None:
|
172 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
173 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
174 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
175 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
176 |
-
return output, p_attn
|
177 |
-
|
178 |
-
def _matmul_with_relative_values(self, x, y):
|
179 |
-
"""
|
180 |
-
x: [b, h, l, m]
|
181 |
-
y: [h or 1, m, d]
|
182 |
-
ret: [b, h, l, d]
|
183 |
-
"""
|
184 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
185 |
-
return ret
|
186 |
-
|
187 |
-
def _matmul_with_relative_keys(self, x, y):
|
188 |
-
"""
|
189 |
-
x: [b, h, l, d]
|
190 |
-
y: [h or 1, m, d]
|
191 |
-
ret: [b, h, l, m]
|
192 |
-
"""
|
193 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
194 |
-
return ret
|
195 |
-
|
196 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
197 |
-
max_relative_position = 2 * self.window_size + 1
|
198 |
-
# Pad first before slice to avoid using cond ops.
|
199 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
200 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
201 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
202 |
-
if pad_length > 0:
|
203 |
-
padded_relative_embeddings = F.pad(
|
204 |
-
relative_embeddings,
|
205 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
206 |
-
else:
|
207 |
-
padded_relative_embeddings = relative_embeddings
|
208 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
209 |
-
return used_relative_embeddings
|
210 |
-
|
211 |
-
def _relative_position_to_absolute_position(self, x):
|
212 |
-
"""
|
213 |
-
x: [b, h, l, 2*l-1]
|
214 |
-
ret: [b, h, l, l]
|
215 |
-
"""
|
216 |
-
batch, heads, length, _ = x.size()
|
217 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
218 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
219 |
-
|
220 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
221 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
222 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
223 |
-
|
224 |
-
# Reshape and slice out the padded elements.
|
225 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
226 |
-
return x_final
|
227 |
-
|
228 |
-
def _absolute_position_to_relative_position(self, x):
|
229 |
-
"""
|
230 |
-
x: [b, h, l, l]
|
231 |
-
ret: [b, h, l, 2*l-1]
|
232 |
-
"""
|
233 |
-
batch, heads, length, _ = x.size()
|
234 |
-
# padd along column
|
235 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
236 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
237 |
-
# add 0's in the beginning that will skew the elements after reshape
|
238 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
239 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
240 |
-
return x_final
|
241 |
-
|
242 |
-
def _attention_bias_proximal(self, length):
|
243 |
-
"""Bias for self-attention to encourage attention to close positions.
|
244 |
-
Args:
|
245 |
-
length: an integer scalar.
|
246 |
-
Returns:
|
247 |
-
a Tensor with shape [1, 1, length, length]
|
248 |
-
"""
|
249 |
-
r = torch.arange(length, dtype=torch.float32)
|
250 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
251 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
252 |
-
|
253 |
-
|
254 |
-
class FFN(nn.Module):
|
255 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
256 |
-
super().__init__()
|
257 |
-
self.in_channels = in_channels
|
258 |
-
self.out_channels = out_channels
|
259 |
-
self.filter_channels = filter_channels
|
260 |
-
self.kernel_size = kernel_size
|
261 |
-
self.p_dropout = p_dropout
|
262 |
-
self.activation = activation
|
263 |
-
self.causal = causal
|
264 |
-
|
265 |
-
if causal:
|
266 |
-
self.padding = self._causal_padding
|
267 |
-
else:
|
268 |
-
self.padding = self._same_padding
|
269 |
-
|
270 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
271 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
272 |
-
self.drop = nn.Dropout(p_dropout)
|
273 |
-
|
274 |
-
def forward(self, x, x_mask):
|
275 |
-
x = self.conv_1(self.padding(x * x_mask))
|
276 |
-
if self.activation == "gelu":
|
277 |
-
x = x * torch.sigmoid(1.702 * x)
|
278 |
-
else:
|
279 |
-
x = torch.relu(x)
|
280 |
-
x = self.drop(x)
|
281 |
-
x = self.conv_2(self.padding(x * x_mask))
|
282 |
-
return x * x_mask
|
283 |
-
|
284 |
-
def _causal_padding(self, x):
|
285 |
-
if self.kernel_size == 1:
|
286 |
-
return x
|
287 |
-
pad_l = self.kernel_size - 1
|
288 |
-
pad_r = 0
|
289 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
290 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
291 |
-
return x
|
292 |
-
|
293 |
-
def _same_padding(self, x):
|
294 |
-
if self.kernel_size == 1:
|
295 |
-
return x
|
296 |
-
pad_l = (self.kernel_size - 1) // 2
|
297 |
-
pad_r = self.kernel_size // 2
|
298 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
299 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
300 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1368565466ki/ZSTRD/monotonic_align/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from numpy import zeros, int32, float32
|
2 |
-
from torch import from_numpy
|
3 |
-
|
4 |
-
from .core import maximum_path_jit
|
5 |
-
|
6 |
-
|
7 |
-
def maximum_path(neg_cent, mask):
|
8 |
-
""" numba optimized version.
|
9 |
-
neg_cent: [b, t_t, t_s]
|
10 |
-
mask: [b, t_t, t_s]
|
11 |
-
"""
|
12 |
-
device = neg_cent.device
|
13 |
-
dtype = neg_cent.dtype
|
14 |
-
neg_cent = neg_cent.data.cpu().numpy().astype(float32)
|
15 |
-
path = zeros(neg_cent.shape, dtype=int32)
|
16 |
-
|
17 |
-
t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
|
18 |
-
t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
|
19 |
-
maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
|
20 |
-
return from_numpy(path).to(device=device, dtype=dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/RealESRGAN/app.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.system("pip install gradio==2.9b23")
|
3 |
-
import random
|
4 |
-
import gradio as gr
|
5 |
-
from PIL import Image
|
6 |
-
import torch
|
7 |
-
from random import randint
|
8 |
-
import sys
|
9 |
-
from subprocess import call
|
10 |
-
import psutil
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
torch.hub.download_url_to_file('http://people.csail.mit.edu/billf/project%20pages/sresCode/Markov%20Random%20Fields%20for%20Super-Resolution_files/100075_lowres.jpg', 'bear.jpg')
|
16 |
-
|
17 |
-
|
18 |
-
def run_cmd(command):
|
19 |
-
try:
|
20 |
-
print(command)
|
21 |
-
call(command, shell=True)
|
22 |
-
except KeyboardInterrupt:
|
23 |
-
print("Process interrupted")
|
24 |
-
sys.exit(1)
|
25 |
-
run_cmd("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P .")
|
26 |
-
run_cmd("pip install basicsr")
|
27 |
-
run_cmd("pip freeze")
|
28 |
-
|
29 |
-
os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P .")
|
30 |
-
|
31 |
-
|
32 |
-
def inference(img,mode):
|
33 |
-
_id = randint(1, 10000)
|
34 |
-
INPUT_DIR = "/tmp/input_image" + str(_id) + "/"
|
35 |
-
OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/"
|
36 |
-
run_cmd("rm -rf " + INPUT_DIR)
|
37 |
-
run_cmd("rm -rf " + OUTPUT_DIR)
|
38 |
-
run_cmd("mkdir " + INPUT_DIR)
|
39 |
-
run_cmd("mkdir " + OUTPUT_DIR)
|
40 |
-
basewidth = 256
|
41 |
-
wpercent = (basewidth/float(img.size[0]))
|
42 |
-
hsize = int((float(img.size[1])*float(wpercent)))
|
43 |
-
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
|
44 |
-
img.save(INPUT_DIR + "1.jpg", "JPEG")
|
45 |
-
if mode == "base":
|
46 |
-
run_cmd("python inference_realesrgan.py -n RealESRGAN_x4plus -i "+ INPUT_DIR + " -o " + OUTPUT_DIR)
|
47 |
-
else:
|
48 |
-
os.system("python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i "+ INPUT_DIR + " -o " + OUTPUT_DIR)
|
49 |
-
return os.path.join(OUTPUT_DIR, "1_out.jpg")
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
title = "Real-ESRGAN"
|
55 |
-
description = "Gradio demo for Real-ESRGAN. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once"
|
56 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2107.10833'>Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data</a> | <a href='https://github.com/xinntao/Real-ESRGAN'>Github Repo</a></p>"
|
57 |
-
|
58 |
-
gr.Interface(
|
59 |
-
inference,
|
60 |
-
[gr.inputs.Image(type="pil", label="Input"),gr.inputs.Radio(["base","anime"], type="value", default="base", label="model type")],
|
61 |
-
gr.outputs.Image(type="file", label="Output"),
|
62 |
-
title=title,
|
63 |
-
description=description,
|
64 |
-
article=article,
|
65 |
-
examples=[
|
66 |
-
['bear.jpg','base'],
|
67 |
-
['anime.png','anime']
|
68 |
-
]).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cedar Noise Reduction Plugin.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - Overview of Cedar noise reduction plugin: describe its features, benefits, and applications | | H2: How to Use Cedar Noise Reduction Plugin for Audio Restoration | - Step 1: Choose the right plugin for your needs: compare different Cedar products and their capabilities <br> - Step 2: Load the plugin in your DAW or audio editor: show how to install and activate the plugin <br> - Step 3: Adjust the settings and parameters: explain how to use the learn function, the noise attenuation knob, and other controls <br> - Step 4: Preview and apply the noise reduction: demonstrate the results and tips for fine-tuning | | H2: How to Use Cedar Noise Reduction Plugin for Live Sound | - Step 1: Connect the plugin to your audio interface or mixer: show how to use the analog, digital, or Dante inputs and outputs <br> - Step 2: Set up the plugin for live mode: explain how to use the adaptive mode, the freeze function, and other features <br> - Step 3: Monitor and control the noise reduction: show how to use the meters, indicators, and bypass switch <br> - Step 4: Enjoy a cleaner, brighter, and louder sound: demonstrate the benefits and advantages of using the plugin for live sound | | H2: How to Use Cedar Noise Reduction Plugin for Post Production | - Step 1: Import your audio file or project into the standalone RX Audio Editor: show how to use the file browser, the waveform display, and the spectral view <br> - Step 2: Select the noise reduction module from the RX menu: explain how to choose between DNS 2, DNS 4, or DNS 8D modules <br> - Step 3: Edit your audio with advanced tools and features: show how to use the spectral repair, dialogue isolate, de-reverb, de-click, de-hum, and other tools <br> - Step 4: Export your audio file or project with improved quality: demonstrate how to use the batch processor, the metadata editor, and the format options | | H2: How to Compare Cedar Noise Reduction Plugin with Other Software | - Criteria for comparison: list some factors to consider when comparing noise reduction software, such as performance, quality, ease of use, price, support, etc. <br> - Examples of other software: mention some popular alternatives to Cedar noise reduction plugin, such as iZotope RX 8, Accusonus ERA, SoundSoap 5, etc. <br> - Pros and cons of each software: highlight the strengths and weaknesses of each software based on the criteria <br> - Conclusion: summarize which software is best suited for different scenarios and preferences | | H2: How to Get Cedar Noise Reduction Plugin and Learn More About It | - Pricing and availability: provide information on how to purchase or rent Cedar noise reduction plugin, as well as its system requirements and compatibility <br> - Resources and support: provide links to Cedar's official website, user manual, video tutorials, FAQs, customer service, etc. <br> - Testimonials and reviews: provide some quotes from satisfied customers or reputable sources that praise Cedar noise reduction plugin | | H2: Conclusion | - Recap the main points of the article: summarize what is Cedar noise reduction plugin, how to use it for different purposes, how to compare it with other software, and how to get it and learn more about it <br> - Call to action: encourage readers to try out Cedar noise reduction plugin for themselves or contact Cedar for more information | Article with HTML formatting <h1>What is Cedar Noise Reduction Plugin and Why You Need It</h1>
|
3 |
-
<p>If you are involved in any kind of audio production, whether it is recording, mixing, mastering, broadcasting, live sound, or post production, you know how important it is to have a clean and clear sound. However, you also know how challenging it can be to achieve that sound in real-world situations, where you have to deal with various types of noise that can ruin your audio quality.</p>
|
4 |
-
<p>Noise can be defined as any unwanted sound that interferes with your desired sound. It can come from different sources, such as background noises (traffic, air conditioning, wind, rain), equipment noises (hiss, hum, clicks), or recording errors (clipping, distortion, dropouts). Noise can affect your audio in different ways, such as reducing</p>
|
5 |
-
<h2>cedar noise reduction plugin</h2><br /><p><b><b>DOWNLOAD</b> »»» <a href="https://byltly.com/2uKA3d">https://byltly.com/2uKA3d</a></b></p><br /><br /> b2dd77e56b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Microsoft Office 2013 in Minutes with This Simple Trick.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Install Microsoft Office 2013 Setup with Free Download</h1>
|
3 |
-
<p>Microsoft Office 2013 is a popular suite of productivity applications that includes Word, Excel, PowerPoint, Outlook, OneNote, Access, and Publisher. If you want to install Microsoft Office 2013 setup with free download, you can follow these simple steps:</p>
|
4 |
-
<h2>microsoft office 2013 setup with crack free download</h2><br /><p><b><b>Download File</b> ✶ <a href="https://byltly.com/2uKvwT">https://byltly.com/2uKvwT</a></b></p><br /><br />
|
5 |
-
<ol>
|
6 |
-
<li>Go to the official Microsoft website and sign in with your Microsoft account. If you don't have one, you can create one for free.</li>
|
7 |
-
<li>Click on the Office tab and select Office 2013 from the drop-down menu.</li>
|
8 |
-
<li>Choose your preferred language and edition (32-bit or 64-bit) and click on the Download button.</li>
|
9 |
-
<li>Save the file to your computer and run it once the download is complete.</li>
|
10 |
-
<li>Follow the on-screen instructions to complete the installation process. You may need to enter your product key if you have one.</li>
|
11 |
-
<li>Enjoy using Microsoft Office 2013 on your device.</li>
|
12 |
-
</ol>
|
13 |
-
<p>Note: Microsoft Office 2013 is no longer supported by Microsoft and may not receive security updates or bug fixes. You may want to consider upgrading to a newer version of Office or using an alternative suite of applications.</p>
|
14 |
-
|
15 |
-
<h2>How to Use Microsoft Office 2013</h2>
|
16 |
-
<p>Once you have installed Microsoft Office 2013 setup with free download, you can start using the applications for your personal or professional needs. Here are some tips on how to use some of the most common features of Office 2013:</p>
|
17 |
-
<ul>
|
18 |
-
<li>To create a new document, spreadsheet, presentation, or other file, open the application and click on the File tab. Then select New and choose a template or a blank file.</li>
|
19 |
-
<li>To save your file, click on the File tab and select Save or Save As. You can save your file to your computer, a removable device, or a cloud service such as OneDrive.</li>
|
20 |
-
<li>To edit your file, use the tools and options available on the ribbon, which is the menu bar at the top of the screen. You can also use keyboard shortcuts or right-click menus for more commands.</li>
|
21 |
-
<li>To share your file, click on the File tab and select Share. You can send your file as an attachment, a link, or a PDF. You can also invite others to view or edit your file online.</li>
|
22 |
-
<li>To get help or learn more about Office 2013, click on the question mark icon at the top right corner of the screen. You can also access online tutorials, videos, and forums from the Office website.</li>
|
23 |
-
</ul>
|
24 |
-
<p>Microsoft Office 2013 is a powerful and versatile suite of applications that can help you with various tasks and projects. However, it may not be compatible with some newer devices or systems. If you encounter any issues or want to upgrade to a newer version of Office, you can contact Microsoft support or visit their website for more information.</p>
|
25 |
-
|
26 |
-
<h2>How to Uninstall Microsoft Office 2013</h2>
|
27 |
-
<p>If you want to uninstall Microsoft Office 2013 setup with free download from your device, you can follow these steps:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Go to the Control Panel and select Programs and Features.</li>
|
30 |
-
<li>Find Microsoft Office 2013 in the list of installed programs and click on it.</li>
|
31 |
-
<li>Click on the Uninstall button and confirm your choice.</li>
|
32 |
-
<li>Wait for the uninstallation process to finish and restart your device if prompted.</li>
|
33 |
-
</ol>
|
34 |
-
<p>Note: Uninstalling Microsoft Office 2013 will remove all the applications and files associated with it. If you want to keep some of your files, make sure to back them up before uninstalling.</p>
|
35 |
-
<p></p>
|
36 |
-
|
37 |
-
<h2>How to Download Microsoft Office 2013 Setup with Free Download Again</h2>
|
38 |
-
<p>If you want to download Microsoft Office 2013 setup with free download again, you can use the same link and file that you used before. However, you may need to activate your product again with your product key or your Microsoft account. You can also contact Microsoft support or visit their website for more assistance.</p> ddb901b051<br />
|
39 |
-
<br />
|
40 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Estadistica para administracion levine berenson pdf solucionario Todo lo que necesitas saber sobre analisis de datos y toma de decisiones.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Estadistica para administracion levine berenson pdf solucionario</h1>
|
3 |
-
<p>¿Te interesa aprender estadística para administración de una manera práctica, sencilla y actualizada? ¿Quieres contar con un libro que te explique los conceptos, las técnicas y las aplicaciones de la estadística en el contexto de los negocios? ¿Te gustaría tener acceso a un solucionario que te ayude a resolver los ejercicios del libro y a reforzar tu aprendizaje? Si tu respuesta es sí, entonces este artículo es para ti.</p>
|
4 |
-
<p>En este artículo te voy a hablar sobre el libro <strong>Estadística para Administración</strong>, escrito por David M. Levine, Mark L. Berenson y Timothy C. Krehbiel, en su cuarta edición. Te voy a contar qué es la estadística para administración, quiénes son los autores del libro, qué contiene el libro, cuáles son sus características principales, qué beneficios tiene el libro para los estudiantes y profesionales de la administración, y cómo puedes obtener el solucionario del libro en formato pdf. Al final del artículo, encontrarás una conclusión y algunas preguntas frecuentes sobre el tema.</p>
|
5 |
-
<h2>estadistica para administracion levine berenson pdf solucionario</h2><br /><p><b><b>Download</b> ✪✪✪ <a href="https://byltly.com/2uKyzb">https://byltly.com/2uKyzb</a></b></p><br /><br />
|
6 |
-
<h2>Introducción</h2>
|
7 |
-
<h3>¿Qué es la estadística para administración?</h3>
|
8 |
-
<p>La estadística para administración es una rama de la estadística que se ocupa de recopilar, organizar, analizar e interpretar datos relacionados con las áreas funcionales de los negocios, como contabilidad, economía y finanzas, sistemas de información, administración y marketing. La estadística para administración tiene como objetivo convertir los datos en información útil para tomar decisiones racionales y fundamentadas en el ámbito empresarial.</p>
|
9 |
-
<h3>¿Quiénes son Levine, Berenson y Krehbiel?</h3>
|
10 |
-
<h3>¿Qué contiene el libro de estadística para administración?</h3>
|
11 |
-
<p>El libro de estadística para administración contiene 18 capítulos que abarcan los temas fundamentales de la estadística descriptiva, la probabilidad, las distribuciones de probabilidad, el muestreo, la estimación, las pruebas de hipótesis, el análisis de varianza, la regresión, el análisis multivariado y el control estadístico de la calidad. Cada capítulo incluye una introducción con un escenario de uso de la estadística en los negocios, una exposición teórica con ejemplos y ejercicios resueltos, una sección de aplicaciones con casos reales y problemas propuestos, y una sección de resumen y repaso con conceptos clave y autoevaluación.</p>
|
12 |
-
<h2>Características del libro de estadística para administración</h2>
|
13 |
-
<h3>Enfoque aplicado a las áreas funcionales de los negocios</h3>
|
14 |
-
<p>Una de las características más destacadas del libro de estadística para administración es que presenta cada tema estadístico en un contexto aplicado relacionado con al menos una de las áreas funcionales de los negocios. De esta manera, el libro muestra a los estudiantes la importancia y la utilidad de la estadística para la administración, y les ayuda a desarrollar una visión integral y práctica de los conceptos y las técnicas estadísticas. El libro también enfatiza la interpretación de los resultados estadísticos, la presentación y evaluación de las suposiciones, y la discusión sobre lo que se debe hacer si las suposiciones se violan.</p>
|
15 |
-
<h3>Integración del software de hojas de trabajo y estadístico</h3>
|
16 |
-
<p>Otra característica importante del libro de estadística para administración es que integra el uso del software de hojas de trabajo (Excel) y el software estadístico (Minitab) en todos los aspectos del curso. El libro reconoce que en el mundo de los negocios el software está disponible en el escritorio del tomador de decisiones, y que permite enfocar el curso en la interpretación de los resultados en lugar de en los cálculos. El libro también proporciona instrucciones detalladas para usar el software y ejemplos ilustrativos con capturas de pantalla.</p>
|
17 |
-
<h3>Guía detallada para usar el software</h3>
|
18 |
-
<p>El libro de estadística para administración ofrece una guía detallada para usar el software de hojas de trabajo (Excel) y el software estadístico (Minitab) en cada capítulo. El libro explica cómo ingresar los datos, cómo realizar los análisis estadísticos, cómo interpretar los resultados y cómo presentarlos en forma gráfica o tabular. El libro también incluye ejemplos ilustrativos con capturas de pantalla que muestran paso a paso cómo usar el software. El libro no permite que la instrucción del software domine el curso, sino que la integra al estudio de la estadística.</p>
|
19 |
-
<p>Estadística para administración - David M. Levine, Timothy C. Krehbiel, Mark L. Berenson - 4ta Edición<br />
|
20 |
-
Estadística para administración Mark L. Berenson, David M. Levine, Timothy C. Krehbiel - Google Books<br />
|
21 |
-
Estadistica para Administracion y Economia_Levin-Rubin_7ma Ed_Pearson.pdf - Google Drive<br />
|
22 |
-
Estadística para administración, 4ta Edición David M. Levine FREELIBROS.ORG<br />
|
23 |
-
Estadística para administración Levine Berenson Krehbiel - PDF Descargar<br />
|
24 |
-
Estadística para administración - David M. Levine, Mark L. Berenson, Timothy C. Krehbiel - 6ta Edición<br />
|
25 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 7ma Edición<br />
|
26 |
-
Estadística para administración y economía Levine Rubin - PDF Download<br />
|
27 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 6ta Edición<br />
|
28 |
-
Estadística para administración y economía Levine Rubin - Solucionario<br />
|
29 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 8va Edición<br />
|
30 |
-
Estadística para administración y economía Levine Rubin - Libro en Español<br />
|
31 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 9na Edición<br />
|
32 |
-
Estadística para administración y economía Levine Rubin - eBook Gratis<br />
|
33 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 10ma Edición<br />
|
34 |
-
Estadística para administración y economía Levine Rubin - Resumen y Análisis<br />
|
35 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 11va Edición<br />
|
36 |
-
Estadística para administración y economía Levine Rubin - Ejercicios Resueltos<br />
|
37 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 12va Edición<br />
|
38 |
-
Estadística para administración y economía Levine Rubin - Aplicaciones en Negocios<br />
|
39 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 13va Edición<br />
|
40 |
-
Estadística para administración y economía Levine Rubin - Conceptos Básicos<br />
|
41 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 14va Edición<br />
|
42 |
-
Estadística para administración y economía Levine Rubin - Métodos Cuantitativos<br />
|
43 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 15va Edición<br />
|
44 |
-
Estadística para administración y economía Levine Rubin - Probabilidad y Distribuciones<br />
|
45 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 16va Edición<br />
|
46 |
-
Estadística para administración y economía Levine Rubin - Inferencia Estadística<br />
|
47 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 17va Edición<br />
|
48 |
-
Estadística para administración y economía Levine Rubin - Regresión Lineal Simple<br />
|
49 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 18va Edición<br />
|
50 |
-
Estadística para administración y economía Levine Rubin - Regresión Lineal Múltiple<br />
|
51 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 19va Edición<br />
|
52 |
-
Estadística para administración y economía Levine Rubin - Análisis de Varianza (ANOVA)<br />
|
53 |
-
Estadística para administración y economía - Richard I. Levin, David S. Rubin - 20va Edición<br />
|
54 |
-
Estadística para administración y economía Levine Rubin - Pruebas de Hipótesis<br />
|
55 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (4ta edicion)<br />
|
56 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (5ta edicion)<br />
|
57 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (6ta edicion)<br />
|
58 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (7ma edicion)<br />
|
59 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (8va edicion)<br />
|
60 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (9na edicion)<br />
|
61 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (10ma edicion)<br />
|
62 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (11va edicion)<br />
|
63 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (12va edicion)<br />
|
64 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (13va edicion)<br />
|
65 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (14va edicion)<br />
|
66 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (15va edicion)<br />
|
67 |
-
Solucionario de estadistica para la Administracion de Berenson & Levine (16va edicion)</p>
|
68 |
-
<h3>Práctica abundante para comprender la estadística en la administración</h3>
|
69 |
-
<p>El libro de estadística para administración ofrece una práctica abundante para comprender cómo se usa la estadística en la administración. El libro contiene más de 1.000 ejercicios y problemas propuestos, clasificados por nivel de dificultad y por área funcional de los negocios. El libro también contiene casos reales y aplicaciones que ilustran cómo se usa la estadística en situaciones reales de administración. El libro proporciona las soluciones de algunos ejercicios y problemas al final del libro, y ofrece un solucionario completo en formato pdf que se puede obtener mediante un código QR o un enlace web.</p>
|
70 |
-
<h2>Beneficios del libro de estadística para administración</h2>
|
71 |
-
<h3>Aprendizaje contextual y relevante</h3>
|
72 |
-
área funcional de los negocios, como contabilidad, economía y finanzas, sistemas de información, administración y marketing. De esta manera, el libro hace que el aprendizaje de la estadística sea más interesante, significativo y motivador para los estudiantes y profesionales de la administración.</p>
|
73 |
-
<h3>Desarrollo de habilidades analíticas y de toma de decisiones</h3>
|
74 |
-
<p>Otro beneficio del libro de estadística para administración es que contribuye al desarrollo de habilidades analíticas y de toma de decisiones en los estudiantes y profesionales de la administración. El libro enseña cómo recopilar, organizar, analizar e interpretar datos relevantes para los negocios, utilizando el software de hojas de trabajo y estadístico. El libro también enseña cómo presentar y comunicar los resultados estadísticos de manera clara y efectiva, utilizando gráficos y tablas. El libro también enseña cómo evaluar las suposiciones estadísticas y cómo actuar si se violan. El libro también enseña cómo aplicar la estadística para resolver problemas reales de administración y para tomar decisiones racionales y fundamentadas.</p>
|
75 |
-
<h3>Actualización de datos y ejemplos</h3>
|
76 |
-
<p>Un tercer beneficio del libro de estadística para administración es que ofrece una actualización de datos y ejemplos que reflejan las tendencias y los cambios en el mundo de los negocios. El libro utiliza datos reales y actuales, obtenidos de fuentes confiables y reconocidas, como el Banco Mundial, el Fondo Monetario Internacional, la Organización Mundial del Comercio, la Organización para la Cooperación y el Desarrollo Económicos, entre otras. El libro también utiliza ejemplos y casos reales que ilustran cómo se usa la estadística en diferentes sectores e industrias, como el comercio electrónico, las redes sociales, la salud, el deporte, el turismo, entre otros. El libro también incorpora temas emergentes y relevantes para la administración, como la ética, la responsabilidad social, la sostenibilidad, la diversidad, entre otros.</p>
|
77 |
-
<h3>Recursos adicionales para el aprendizaje</h3>
|
78 |
-
<p>Un cuarto beneficio del libro de estadística para administración es que ofrece recursos adicionales para el aprendizaje que complementan y enriquecen el contenido del libro. El libro cuenta con un sitio web que contiene material adicional para los estudiantes y los profesores, como videos explicativos, presentaciones en PowerPoint, simulaciones interactivas, ejercicios adicionales, entre otros. El libro también cuenta con un solucionario en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro. El solucionario se puede obtener mediante un código QR o un enlace web que se encuentra al final del libro.</p>
|
79 |
-
<h2>Solucionario del libro de estadística para administración</h2>
|
80 |
-
<h3>¿Qué es el solucionario?</h3>
|
81 |
-
<p>El solucionario del libro de estadística para administración es un documento en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro. El solucionario es un recurso muy útil para los estudiantes y profesionales de la administración que quieren verificar sus respuestas, revisar sus procedimientos, corregir sus errores y reforzar su aprendizaje.</p>
|
82 |
-
<h3>¿Para qué sirve el solucionario?</h3>
|
83 |
-
, sirve para autoevaluar el nivel de comprensión y dominio de la estadística para administración. Por último, sirve para prepararse para los exámenes y las evaluaciones del curso.</p>
|
84 |
-
<h3>¿Cómo obtener el solucionario?</h3>
|
85 |
-
<p>El solucionario del libro de estadística para administración se puede obtener de dos maneras. La primera manera es mediante un código QR que se encuentra al final del libro. El código QR se puede escanear con un teléfono inteligente o una tableta, y se accede al solucionario en formato pdf. La segunda manera es mediante un enlace web que también se encuentra al final del libro. El enlace web se puede copiar y pegar en un navegador de internet, y se accede al solucionario en formato pdf. El solucionario se puede descargar, imprimir o consultar en línea.</p>
|
86 |
-
<h2>Conclusión</h2>
|
87 |
-
<p>En conclusión, el libro de estadística para administración, escrito por Levine, Berenson y Krehbiel, en su cuarta edición, es un excelente recurso para los estudiantes y profesionales de la administración que quieren aprender estadística de una manera práctica, sencilla y actualizada. El libro presenta los conceptos, las técnicas y las aplicaciones de la estadística en el contexto de los negocios, utilizando el software de hojas de trabajo y estadístico. El libro también ofrece una práctica abundante, una actualización de datos y ejemplos, y recursos adicionales para el aprendizaje. El libro también cuenta con un solucionario en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro. El solucionario se puede obtener mediante un código QR o un enlace web que se encuentra al final del libro.</p>
|
88 |
-
<h2>Preguntas frecuentes</h2>
|
89 |
-
<p>A continuación, se presentan algunas preguntas frecuentes sobre el tema del artículo:</p>
|
90 |
-
<ul>
|
91 |
-
<li><strong>¿Qué es la estadística para administración?</strong> La estadística para administración es una rama de la estadística que se ocupa de recopilar, organizar, analizar e interpretar datos relacionados con las áreas funcionales de los negocios.</li>
|
92 |
-
<li><strong>¿Quiénes son los autores del libro de estadística para administración?</strong> Los autores del libro de estadística para administración son David M. Levine, Mark L. Berenson y Timothy C. Krehbiel, tres reconocidos profesores de estadística en diferentes universidades de Estados Unidos.</li>
|
93 |
-
<li><strong>¿Qué contiene el libro de estadística para administración?</strong> El libro de estadística para administración contiene 18 capítulos que abarcan los temas fundamentales de la estadística descriptiva, la probabilidad, las distribuciones de probabilidad, el muestreo, la estimación, las pruebas de hipótesis, el análisis de varianza, la regresión, el análisis multivariado y el control estadístico de la calidad.</li>
|
94 |
-
<li><strong>¿Cuáles son las características principales del libro de estadística para administración?</strong> Las características principales del libro de estadística para administración son: enfoque aplicado a las áreas funcionales de los negocios, integración del software de hojas de trabajo y estadístico, guía detallada para usar el software y práctica abundante para comprender la estadística en la administración.</li>
|
95 |
-
, desarrollo de habilidades analíticas y de toma de decisiones, actualización de datos y ejemplos y recursos adicionales para el aprendizaje.</li>
|
96 |
-
<li><strong>¿Qué es el solucionario del libro de estadística para administración?</strong> El solucionario del libro de estadística para administración es un documento en formato pdf que contiene las soluciones detalladas de todos los ejercicios y problemas propuestos del libro.</li>
|
97 |
-
<li><strong>¿Para qué sirve el solucionario del libro de estadística para administración?</strong> El solucionario del libro de estadística para administración sirve para practicar y repasar los conceptos y las técnicas estadísticas aprendidas en el libro, para desarrollar habilidades analíticas y de toma de decisiones al resolver problemas reales de administración, para autoevaluar el nivel de comprensión y dominio de la estadística para administración y para prepararse para los exámenes y las evaluaciones del curso.</li>
|
98 |
-
<li><strong>¿Cómo obtener el solucionario del libro de estadística para administración?</strong> El solucionario del libro de estadística para administración se puede obtener mediante un código QR o un enlace web que se encuentra al final del libro. El código QR se puede escanear con un teléfono inteligente o una tableta, y se accede al solucionario en formato pdf. El enlace web se puede copiar y pegar en un navegador de internet, y se accede al solucionario en formato pdf. El solucionario se puede descargar, imprimir o consultar en línea.</li>
|
99 |
-
</ul>
|
100 |
-
</p> 0a6ba089eb<br />
|
101 |
-
<br />
|
102 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/2021 Free Spokeo Login And Password.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Find People Online with Spokeo</h1>
|
3 |
-
<p>Spokeo is a people search engine that helps you find information about anyone in the U.S. You can use Spokeo to look up people by name, phone number, email address, or physical address. Spokeo can also help you discover social media profiles, criminal records, wealth data, family members, and more.</p>
|
4 |
-
<p>However, Spokeo is not a free service. You need to pay a monthly or annual fee to access its full features and reports. If you are looking for a free Spokeo login and password, you might be disappointed. There is no official way to get a free Spokeo account without paying.</p>
|
5 |
-
<h2>free spokeo login and password</h2><br /><p><b><b>Download File</b> ✶ <a href="https://imgfil.com/2uxWTR">https://imgfil.com/2uxWTR</a></b></p><br /><br />
|
6 |
-
<p>Some websites claim to offer free Spokeo login and password, but they are usually scams or phishing attempts. They might ask you to download malicious software, fill out surveys, or provide your personal information. These websites are not affiliated with Spokeo and should be avoided at all costs.</p>
|
7 |
-
<p>The only legitimate way to get a free Spokeo login and password is to sign up for a free trial. Spokeo offers a 7-day free trial for new customers who want to try out its service. You can cancel your subscription anytime before the trial ends and you won't be charged.</p>
|
8 |
-
<p>To sign up for a free Spokeo trial, follow these steps:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Go to <a href="https://www.spokeo.com/">www.spokeo.com</a></li>
|
11 |
-
<li>Click on the "Start Your Free Trial" button</li>
|
12 |
-
<li>Enter your email address and create a password</li>
|
13 |
-
<li>Enter your payment information (credit card or PayPal)</li>
|
14 |
-
<li>Click on the "Start Free Trial" button</li>
|
15 |
-
</ol>
|
16 |
-
<p>You will receive an email confirmation with your login details. You can now use Spokeo to search for anyone in the U.S. Remember to cancel your subscription before the trial ends if you don't want to pay.</p>
|
17 |
-
<p>If you have any questions or issues with your Spokeo account, you can contact their customer care team at 1-888-271-9562 or [email protected].</p><p>Spokeo is more than just a people search engine. It can also help you protect yourself from unwanted calls and texts, reconnect with lost friends and relatives, verify online sellers and buyers, and learn more about your own online presence.</p>
|
18 |
-
<p>Here are some of the benefits of using Spokeo:</p>
|
19 |
-
<ul>
|
20 |
-
<li>Stop Unwanted Calls and Texts: Spokeo can help you identify unknown callers and texters by providing their name, location, carrier, and other details. You can also use Spokeo to block spam calls and texts, report harassment, and find out if your phone number has been compromised.</li>
|
21 |
-
<li>Reconnect with Lost Friends and Relatives: Spokeo can help you find current contact information for people you have lost touch with. You can search by name, phone number, email address, or physical address. You can also use Spokeo to find out more about their family members, social media profiles, hobbies, interests, and more.</li>
|
22 |
-
<li>Verify Online Sellers and Buyers: Spokeo can help you avoid online scams and frauds by providing background information on potential sellers and buyers. You can use Spokeo to check their identity, location, reputation, criminal records, and more. You can also use Spokeo to find out if they have been involved in any lawsuits or complaints.</li>
|
23 |
-
<li>Learn More About Your Online Presence: Spokeo can help you monitor and manage your online reputation by showing you what information is available about you on the web. You can use Spokeo to see your social media profiles, public records, photos, videos, blogs, comments, reviews, and more. You can also use Spokeo to remove unwanted or inaccurate information from the web.</li>
|
24 |
-
</ul>
|
25 |
-
<p>Spokeo is a powerful tool that can help you find anyone in the U.S. and learn more about them. Whether you are looking for an old friend, a new date, a business partner, or a family member, Spokeo can help you find them and connect with them.</p>
|
26 |
-
<p></p> d5da3c52bf<br />
|
27 |
-
<br />
|
28 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate v1.5.4 MOD APK Hack the Latest Version of the Realistic Bus Simulation Game.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Hack Bus Simulator Ultimate 1.5.4 APK for Unlimited Money and More</h1>
|
3 |
-
<p>Bus Simulator Ultimate is one of the most realistic and immersive bus driving simulation games for Android devices. You can drive various buses across different countries, cities, and routes, while managing your own bus company, hiring drivers, expanding your fleet, and satisfying your passengers.</p>
|
4 |
-
<p>But what if you want to have more money, unlock all the buses, customize your vehicles, and enjoy other perks without spending real cash or playing for hours? Well, you can hack Bus Simulator Ultimate 1.5.4 APK and get access to unlimited resources and features that will make your game more fun and easy.</p>
|
5 |
-
<h2>hack bus simulator ultimate 1.5.4 apk</h2><br /><p><b><b>Download Zip</b> ⚹ <a href="https://jinyurl.com/2uNUhA">https://jinyurl.com/2uNUhA</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you how to hack Bus Simulator Ultimate 1.5.4 APK using different tools and methods, as well as the benefits and risks of doing so. We will also give you some tips on how to enjoy the game with hacks without getting banned or detected by the game developers or other players.</p>
|
7 |
-
<h2>Introduction</h2>
|
8 |
-
<h3>What is Bus Simulator Ultimate and why you might want to hack it</h3>
|
9 |
-
<p>Bus Simulator Ultimate is a popular bus driving simulation game developed by Zuuks Games, the same creators of Truck Simulator 2018: Europe. The game was released in August 2019 and has since been downloaded over 100 million times on Google Play Store.</p>
|
10 |
-
<p>How to hack bus simulator ultimate 1.5.4 apk for unlimited money<br />
|
11 |
-
Bus simulator ultimate 1.5.4 apk mod menu download<br />
|
12 |
-
Bus simulator ultimate 1.5.4 apk hack no root<br />
|
13 |
-
Bus simulator ultimate 1.5.4 apk free shopping hack<br />
|
14 |
-
Bus simulator ultimate 1.5.4 apk unlimited fuel hack<br />
|
15 |
-
Bus simulator ultimate 1.5.4 apk hack online generator<br />
|
16 |
-
Bus simulator ultimate 1.5.4 apk hack without verification<br />
|
17 |
-
Bus simulator ultimate 1.5.4 apk hack latest version<br />
|
18 |
-
Bus simulator ultimate 1.5.4 apk hack android<br />
|
19 |
-
Bus simulator ultimate 1.5.4 apk hack ios<br />
|
20 |
-
Bus simulator ultimate 1.5.4 apk hack tool<br />
|
21 |
-
Bus simulator ultimate 1.5.4 apk hack reddit<br />
|
22 |
-
Bus simulator ultimate 1.5.4 apk hack youtube<br />
|
23 |
-
Bus simulator ultimate 1.5.4 apk hack tutorial<br />
|
24 |
-
Bus simulator ultimate 1.5.4 apk hack review<br />
|
25 |
-
Bus simulator ultimate 1.5.4 apk hack gameplay<br />
|
26 |
-
Bus simulator ultimate 1.5.4 apk hack features<br />
|
27 |
-
Bus simulator ultimate 1.5.4 apk hack tips and tricks<br />
|
28 |
-
Bus simulator ultimate 1.5.4 apk hack guide<br />
|
29 |
-
Bus simulator ultimate 1.5.4 apk hack cheats<br />
|
30 |
-
Bus simulator ultimate 1.5.4 apk hack codes<br />
|
31 |
-
Bus simulator ultimate 1.5.4 apk hack file download<br />
|
32 |
-
Bus simulator ultimate 1.5.4 apk hack obb download<br />
|
33 |
-
Bus simulator ultimate 1.5.4 apk hack data download<br />
|
34 |
-
Bus simulator ultimate 1.5.4 apk hack install<br />
|
35 |
-
Bus simulator ultimate 1.5.4 apk hack update<br />
|
36 |
-
Bus simulator ultimate 1.5.4 apk hack patch notes<br />
|
37 |
-
Bus simulator ultimate 1.5.4 apk hack bug fixes<br />
|
38 |
-
Bus simulator ultimate 1.5.4 apk hack support<br />
|
39 |
-
Bus simulator ultimate 1.5.4 apk hack feedback<br />
|
40 |
-
Bus simulator ultimate 1.5.4 apk hack rating<br />
|
41 |
-
Bus simulator ultimate 1.5.4 apk hack comparison<br />
|
42 |
-
Bus simulator ultimate 1.5.4 apk hack alternatives<br />
|
43 |
-
Bus simulator ultimate 1.5.4 apk hack benefits<br />
|
44 |
-
Bus simulator ultimate 1.5.4 apk hack disadvantages<br />
|
45 |
-
Bus simulator ultimate 1.5.4 apk hack pros and cons<br />
|
46 |
-
Bus simulator ultimate 1.5.4 apk hack testimonials<br />
|
47 |
-
Bus simulator ultimate 1.5.4 apk hack comments<br />
|
48 |
-
Bus simulator ultimate 1.5.4 apk hack questions and answers<br />
|
49 |
-
Bus simulator ultimate 1.5.4 apk hack faq<br />
|
50 |
-
Bus simulator ultimate 1.5.4 apk hack requirements<br />
|
51 |
-
Bus simulator ultimate 1</p>
|
52 |
-
<p>The game lets you experience what it's like to be a bus driver in various countries, such as Germany, Turkey, Italy, France, Spain, Netherlands, Brazil, Azerbaijan, Russia, etc. You can choose from over 25 different buses, each with their own realistic interiors, sounds, physics, and features.</p>
|
53 |
-
<p>You can also create your own bus company, hire drivers, buy new buses, upgrade your vehicles, set your ticket prices, design your routes, and earn money from your passengers. You can also compete with other players in multiplayer mode or join online events and tournaments.</p>
|
54 |
-
<p>However, as realistic and immersive as the game is, it can also be quite challenging and time-consuming. You need to earn enough money to buy new buses, maintain your vehicles, pay your drivers, expand your routes, etc. You also need to follow the traffic rules, avoid accidents, deal with weather conditions, satisfy your passengers' needs, etc.</p>
|
55 |
-
<p>That's why some players might want to hack Bus Simulator Ultimate 1.5.4 APK and get unlimited money, unlock all the buses, customize their vehicles, skip ads, remove restrictions, etc.</p>
|
56 |
-
<h3>What are the benefits of hacking Bus Simulator Ultimate 1.5.4 APK</h3>
|
57 |
-
<p>Hacking Bus Simulator Ultimate 1.5.4 APK can give you many benefits that can enhance your gaming experience and make it more fun and easy Some of the benefits of hacking Bus Simulator Ultimate 1.5.4 APK are: - You can get unlimited money and buy any bus you want, upgrade your vehicles, hire more drivers, etc. - You can unlock all the buses and access their unique features, such as different interiors, sounds, physics, etc. - You can customize your vehicles and change their colors, skins, logos, etc. - You can skip ads and enjoy the game without interruptions or distractions - You can remove restrictions and play the game in any country, city, or route you want - You can cheat and manipulate the game data and settings, such as speed, fuel, traffic, weather, passengers, etc. <h3>What are the risks of hacking Bus Simulator Ultimate 1.5.4 APK</h3>
|
58 |
-
<p>However, hacking Bus Simulator Ultimate 1.5.4 APK also comes with some risks that you should be aware of and avoid. Some of the risks of hacking Bus Simulator Ultimate 1.5.4 APK are:</p>
|
59 |
-
- You can get banned or detected by the game developers or other players if you use hacks in multiplayer mode or online events - You can lose your progress and data if you use an incompatible or corrupted APK file or hack tool - You can damage your device or expose it to malware or viruses if you download an unsafe or untrusted APK file or hack tool - You can ruin the fun and challenge of the game if you use hacks that make it too easy or boring <h2>How to Download Bus Simulator Ultimate 1.5.4 APK</h2>
|
60 |
-
<h3>Where to find the original APK file and how to install it</h3>
|
61 |
-
<p>If you want to hack Bus Simulator Ultimate 1.5.4 APK, you need to have the original APK file of the game installed on your device first. You can find the original APK file of Bus Simulator Ultimate 1.5.4 on Google Play Store or other reputable sources, such as APKPure, APKMirror, etc.</p>
|
62 |
-
<p>To install the original APK file of Bus Simulator Ultimate 1.5.4 on your device, you need to follow these steps:</p>
|
63 |
-
- Go to the source where you downloaded the APK file and tap on it - Allow your device to install apps from unknown sources if prompted - Follow the instructions on the screen and wait for the installation to complete - Launch the game and enjoy <h3>How to check the version and compatibility of your device</h3>
|
64 |
-
<p>Before you hack Bus Simulator Ultimate 1.5.4 APK, you need to make sure that your device is compatible with the game and that you have the latest version of the game installed. To check the version and compatibility of your device, you need to follow these steps:</p>
|
65 |
-
- Go to Google Play Store and search for Bus Simulator Ultimate - Tap on the game icon and scroll down to see the details - Check the current version of the game and compare it with the version of your installed APK file - Check the minimum requirements of the game and compare them with your device's specifications - Update the game or your device if needed <h2>How to Hack Bus Simulator Ultimate 1.5.4 APK</h2>
|
66 |
-
<h3>What are the tools and methods you need to hack Bus Simulator Ultimate 1.5.4 APK</h3>
|
67 |
-
<p>There are different tools and methods that you can use to hack Bus Simulator Ultimate 1.5.4 APK and get unlimited money and other features. Some of the most common and effective tools and methods are:</p>
|
68 |
-
- Modded APK file: A modded APK file is a modified version of the original APK file that has been altered by hackers to include cheats and hacks in the game. You can download a modded APK file of Bus Simulator Ultimate 1.5.4 from various sources online, such as ModAPKStore, Rexdl, etc. - Game hacker app: A game hacker app is an application that allows you to modify the game data and settings on your device using various techniques, such as memory editing, code injection, hex editing, etc. You can download a game hacker app from various sources online, such as Game Guardian, Lucky Patcher, Cheat Engine, etc. - Cheat engine: A cheat engine is a software that allows you to manipulate the game memory and values on your device using various techniques, such as scanning, debugging, disassembling, etc. You can download a cheat engine from various sources online, such as Cheat Engine for Android, SB Game Hacker, Game Killer, etc. <h3>How to use a modded APK file to get unlimited money and other features</h3>
|
69 |
-
<p>To use a modded APK file to hack Bus Simulator Ultimate 1.5.4 APK and get unlimited money and other features, you need to follow these steps:</p>
|
70 |
-
- Find a reliable source where you can download a modded APK file of Bus Simulator Ultimate 1.5.4 with unlimited money and other features - Uninstall the original APK file of Bus Simulator Ultimate 1.5.4 from your device if you have it - Enable the installation of apps from unknown sources on your device settings if you haven't done so already - Download the modded APK file of Bus Simulator Ultimate 1.5.4 from the source and tap on it - Follow the instructions on the screen and wait for the installation to complete - Launch the game and enjoy the hacks <h3>How to use a game hacker app to modify the game data and settings</h3>
|
71 |
-
<p>To use a game hacker app to hack Bus Simulator Ultimate 1.5.4 APK and modify the game data and settings, you need to follow these steps:</p>
|
72 |
-
- Install a game hacker app on your device from a trusted source, such as Game Guardian, Lucky Patcher, Cheat Engine, etc. - Launch the game hacker app and grant it root access or other permissions if required - Launch Bus Simulator Ultimate 1.5.4 APK and start playing the game - Minimize the game and open the game hacker app - Select Bus Simulator Ultimate 1.5.4 APK from the list of running processes or apps - Search for the value or parameter that you want to change, such as money, speed, fuel, etc. - Modify the value or parameter according to your preference and apply the changes - Resume the game and enjoy the hacks <h3>How to use a cheat engine to manipulate the game memory and values</h3>
|
73 |
-
<p>To use a cheat engine to hack Bus Simulator Ultimate 1.5.4 APK and manipulate the game memory and values, you need to follow these steps:</p>
|
74 |
-
- Install a cheat engine on your device from a reliable source, such as Cheat Engine for Android, SB Game Hacker, Game Killer, etc. - Launch the cheat engine and grant it root access or other permissions if needed - Launch Bus Simulator Ultimate 1.5.4 APK and start playing the game - Minimize the game and open the cheat engine - Select Bus Simulator Ultimate 1.5.4 APK from the list of running processes or apps - Scan for the value or address that you want to change, such as money, speed, fuel, etc. - Change the value or address according to your desire and apply the changes - Resume the game and enjoy the hacks <h2>How to Enjoy Bus Simulator Ultimate 1.5.4 APK with Hacks</h2>
|
75 |
-
<h3>What are the features and gameplay of Bus Simulator Ultimate 1.5.4 APK</h3>
|
76 |
-
<p>Bus Simulator Ultimate 1.5.4 APK is a realistic and immersive bus driving simulation game that offers you many features and gameplay options, such as:</p>
|
77 |
-
- Driving over 25 different buses with realistic interiors, sounds, physics, and features - Creating your own bus company, hiring drivers, buying new buses, upgrading your vehicles, setting your ticket prices, designing your routes, and earning money from your passengers - Competing with other players in multiplayer mode or joining online events and tournaments - Exploring various countries, cities, and routes with different traffic rules, weather conditions, landmarks, etc. - Satisfying your passengers' needs, such as comfort, entertainment, food, etc. - Following the traffic rules, avoiding accidents, dealing with emergencies, etc. <h3>How to use the hacks to enhance your gaming experience and have more fun</h3>
|
78 |
-
<p>You can use the hacks that you have applied to Bus Simulator Ultimate 1.5.4 APK to enhance your gaming experience and have more fun in various ways, such as:</p>
|
79 |
-
- Buying any bus you want and customizing it to your liking - Expanding your bus company and dominating the market - Skipping ads and playing without interruptions or distractions - Playing in any country, city, or route you want without restrictions - Cheating and manipulating the game data and settings to your advantage <h3>How to avoid getting banned or detected by the game developers or other players</h3>
|
80 |
-
<p>However, you should also be careful not to get banned or detected by the game developers or other players when using hacks in Bus Simulator Ultimate 1.5.4 APK. Here are some tips on how to avoid getting banned or detected:</p>
|
81 |
-
- Use hacks only in offline mode or single-player mode - Do not use hacks in multiplayer mode or online events - Do not use hacks that are too obvious or unrealistic - Do not use hacks that affect other players' gameplay or experience - Do not brag or boast about using hacks in public forums or chats <h2>Conclusion</h2>
|
82 |
-
<p>Hacking Bus Simulator Ultimate 1.5.4 APK can give you unlimited money and other features that can make your game more fun and easy. However, hacking Bus Simulator Ultimate 1. 5.4 APK also comes with some risks that you should be aware of and avoid. You can get banned or detected by the game developers or other players if you use hacks in multiplayer mode or online events. You can also lose your progress and data if you use an incompatible or corrupted APK file or hack tool. Moreover, you can damage your device or expose it to malware or viruses if you download an unsafe or untrusted APK file or hack tool. Furthermore, you can ruin the fun and challenge of the game if you use hacks that make it too easy or boring. Therefore, you should use hacks in Bus Simulator Ultimate 1.5.4 APK with caution and moderation. You should also respect the game developers and other players and not abuse the hacks or spoil their gameplay or experience. You should also follow the tips on how to avoid getting banned or detected when using hacks in Bus Simulator Ultimate 1.5.4 APK. We hope that this article has helped you learn how to hack Bus Simulator Ultimate 1.5.4 APK and enjoy the game with unlimited money and other features. If you have any questions or feedback, please feel free to leave a comment below. Happy hacking! <h2>FAQs</h2>
|
83 |
-
<h3>Q: Is hacking Bus Simulator Ultimate 1.5.4 APK legal?</h3>
|
84 |
-
<p>A: Hacking Bus Simulator Ultimate 1.5.4 APK is not illegal, but it is against the terms and conditions of the game and the Google Play Store. Therefore, you may face some consequences if you get caught or reported by the game developers or other players.</p>
|
85 |
-
<h3>Q: Is hacking Bus Simulator Ultimate 1.5.4 APK safe?</h3>
|
86 |
-
<p>A: Hacking Bus Simulator Ultimate 1.5.4 APK is not completely safe, as there are some risks involved, such as getting banned or detected, losing your progress and data, damaging your device, or exposing it to malware or viruses. Therefore, you should be careful and responsible when hacking Bus Simulator Ultimate 1.5.4 APK and only use trusted sources and tools.</p>
|
87 |
-
<h3>Q: How can I update Bus Simulator Ultimate 1.5.4 APK after hacking it?</h3>
|
88 |
-
<p>A: If you want to update Bus Simulator Ultimate 1.5.4 APK after hacking it, you need to uninstall the hacked version of the game and install the latest version of the original APK file from Google Play Store or other reputable sources. However, you may lose your hacks and progress if you do so.</p>
|
89 |
-
<h3>Q: Can I hack Bus Simulator Ultimate 1.5.4 APK without root access?</h3>
|
90 |
-
<p>A: Yes, you can hack Bus Simulator Ultimate 1.5.4 APK without root access using some tools and methods, such as modded APK files, game hacker apps, cheat engines, etc. However, some tools and methods may require root access to work properly.</p>
|
91 |
-
<h3>Q: Can I hack Bus Simulator Ultimate 1.5.4 APK on iOS devices?</h3>
|
92 |
-
<p>A: No, you cannot hack Bus Simulator Ultimate 1.5.4 APK on iOS devices, as the game is only available for Android devices.</p> 401be4b1e0<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download YouTube 4.0 APK for Android - Watch Videos Offline.md
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download YouTube 4.0 for Android: How to Enjoy Videos on Your Phone</h1>
|
3 |
-
<p>YouTube is the most popular video-sharing platform in the world, with billions of users and hours of content uploaded every minute. Whether you want to watch the latest music videos, learn something new, or catch up with your favorite creators, YouTube has something for everyone.</p>
|
4 |
-
<h2>download youtube 4.0 for android</h2><br /><p><b><b>DOWNLOAD</b> 🆓 <a href="https://jinyurl.com/2uNQXk">https://jinyurl.com/2uNQXk</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to enjoy YouTube on your Android phone? You might think that you need the latest version of the app, which requires Android 5.0 or higher. But that's not true! You can still download and use YouTube 4.0 for Android, which works on devices running Android 4.1 or higher.</p>
|
6 |
-
<p>In this article, we will show you how to download and install YouTube 4.0 for Android, how to use its features and benefits, and how to make the most of your YouTube experience on your phone.</p>
|
7 |
-
<h2>What is YouTube 4.0 for Android?</h2>
|
8 |
-
<p>YouTube 4.0 for Android is an older version of the official YouTube app that was released in August 2013. It has a simple and intuitive interface that lets you access all the essential functions of YouTube, such as browsing, watching, creating, and sharing videos.</p>
|
9 |
-
<p>How to download youtube 4.0 for android phone<br />
|
10 |
-
Download youtube 4.0 apk for android tablet<br />
|
11 |
-
Youtube 4.0 for android free download latest version<br />
|
12 |
-
Download youtube 4.0 for android offline mode<br />
|
13 |
-
Youtube 4.0 for android download without ads<br />
|
14 |
-
Download youtube 4.0 for android with dark mode<br />
|
15 |
-
Youtube 4.0 for android download and install guide<br />
|
16 |
-
Download youtube 4.0 for android from uptodown[^1^]<br />
|
17 |
-
Youtube 4.0 for android download link<br />
|
18 |
-
Download youtube 4.0 for android no root<br />
|
19 |
-
Youtube 4.0 for android download error fix<br />
|
20 |
-
Download youtube 4.0 for android mod apk<br />
|
21 |
-
Youtube 4.0 for android download features<br />
|
22 |
-
Download youtube 4.0 for android old version<br />
|
23 |
-
Youtube 4.0 for android download size<br />
|
24 |
-
Download youtube 4.0 for android premium apk<br />
|
25 |
-
Youtube 4.0 for android download speed<br />
|
26 |
-
Download youtube 4.0 for android beta version<br />
|
27 |
-
Youtube 4.0 for android download review<br />
|
28 |
-
Download youtube 4.0 for android update<br />
|
29 |
-
Youtube 4.0 for android download requirements<br />
|
30 |
-
Download youtube 4.0 for android from google play store<br />
|
31 |
-
Youtube 4.0 for android download alternative<br />
|
32 |
-
Download youtube 4.0 for android on pc<br />
|
33 |
-
Youtube 4.0 for android download comparison<br />
|
34 |
-
Download youtube 4.0 for android with subtitles<br />
|
35 |
-
Youtube 4.0 for android download quality settings<br />
|
36 |
-
Download youtube 4.0 for android with background play<br />
|
37 |
-
Youtube 4.0 for android download tips and tricks<br />
|
38 |
-
Download youtube 4.0 for android with vpn<br />
|
39 |
-
Youtube 4.0 for android download problems and solutions<br />
|
40 |
-
Download youtube 4.0 for android with screen recorder<br />
|
41 |
-
Youtube 4.0 for android download support<br />
|
42 |
-
Download youtube 4.0 for android with picture-in-picture mode<br />
|
43 |
-
Youtube 4.0 for android download benefits<br />
|
44 |
-
Download youtube 4.0 for android with downloader app<br />
|
45 |
-
Youtube 4.0 for android download disadvantages<br />
|
46 |
-
Download youtube 4.0 for android with chromecast support<br />
|
47 |
-
Youtube 4.0 for android download statistics<br />
|
48 |
-
Download youtube 4.0 for android with voice search<br />
|
49 |
-
Youtube 4.0 for android download feedback<br />
|
50 |
-
Download youtube 4.0 for android with playlist manager<br />
|
51 |
-
Youtube 4.0 for android download security<br />
|
52 |
-
Download youtube 4.0 for android with notifications settings<br />
|
53 |
-
Youtube 4.0 for android download history<br />
|
54 |
-
Download youtube 4.0 for android with comments section<br />
|
55 |
-
Youtube 4.0 for android download ratings<br />
|
56 |
-
Download youtube 4.0 for android with live streaming option<br />
|
57 |
-
Youtube 4.0 for android download recommendations</p>
|
58 |
-
<p>YouTube 4.0 for Android also supports some features that are not available in newer versions of the app, such as:</p>
|
59 |
-
<ul>
|
60 |
-
<li>The ability to play videos in the background while using other apps or when the screen is locked</li>
|
61 |
-
<li>The ability to watch videos in portrait or landscape mode</li>
|
62 |
-
<li>The ability to adjust the video quality manually</li>
|
63 |
-
<li>The ability to view comments and related videos while watching a video</li>
|
64 |
-
</ul>
|
65 |
-
<p>However, YouTube 4.0 for Android also has some limitations, such as:</p>
|
66 |
-
<ul>
|
67 |
-
<li>The lack of support for some newer features, such as Stories, Shorts, Live streams, and Channel memberships</li>
|
68 |
-
<li>The lack of updates and bug fixes from Google</li>
|
69 |
-
<li>The possibility of compatibility issues with some devices or videos</li>
|
70 |
-
</ul>
|
71 |
-
<h2>How to download and install YouTube 4.0 for Android</h2>
|
72 |
-
<p>If you want to download and install YouTube 4.0 for Android, you need to follow these steps:</p>
|
73 |
-
<ol>
|
74 |
-
<li>Go to [this link](^1^) on your phone's browser and tap on "Download APK". This will download the file "youtube-4-0-8.apk" to your phone.</li>
|
75 |
-
<li>Go to your phone's settings and enable "Unknown sources" under "Security". This will allow you to install apps from sources other than Google Play Store.</li>
|
76 |
-
<li>Go to your phone's file manager and locate the downloaded file "youtube-4-0-8.apk". Tap on it and follow the instructions to install it.</li>
|
77 |
-
<li>Once installed, you can launch the app from your app drawer or home screen.</li>
|
78 |
-
</ol>
|
79 |
-
<h2>How to use YouTube 4.0 for Android</h2>
|
80 |
-
<h3>How to browse and watch videos on YouTube 4.0 for Android</h3>
|
81 |
-
<p>To browse and watch videos on YouTube 4.0 for Android, you can use the following options:</p>
|
82 |
-
<ul>
|
83 |
-
<li>The Home tab shows you personalized recommendations based on your watch history, preferences, and subscriptions.</li>
|
84 |
-
<li>The Trending tab shows you the most popular and viral videos across different categories, such as music, gaming, news, and entertainment.</li>
|
85 |
-
<li>The Subscriptions tab shows you the latest videos from the channels you have subscribed to.</li>
|
86 |
-
<li>The Search icon lets you search for videos by keywords, filters, or voice commands.</li>
|
87 |
-
<li>The Menu icon lets you access other features, such as your account, settings, history, favorites, playlists, uploads, and downloads.</li>
|
88 |
-
</ul>
|
89 |
-
<p>To watch a video on YouTube 4.0 for Android, you can simply tap on it and it will start playing. You can also use the following controls:</p>
|
90 |
-
<ul>
|
91 |
-
<li>The Play/Pause button lets you pause or resume the video.</li>
|
92 |
-
<li>The Seek bar lets you skip forward or backward in the video.</li>
|
93 |
-
<li>The Fullscreen button lets you switch between portrait and landscape mode.</li>
|
94 |
-
<li>The Quality button lets you adjust the video resolution manually.</li>
|
95 |
-
<li>The Share button lets you share the video with other apps or contacts.</li>
|
96 |
-
<li>The Add to button lets you add the video to your favorites or playlists.</li>
|
97 |
-
<li>The More button lets you access other options, such as report, caption, flag, or info.</li>
|
98 |
-
</ul>
|
99 |
-
<h3>How to create and upload videos on YouTube 4.0 for Android</h3>
|
100 |
-
<p>To create and upload videos on YouTube 4.0 for Android, you can use the following options:</p>
|
101 |
-
<ul>
|
102 |
-
<li>The Camera icon lets you record a new video using your phone's camera. You can also edit the video by trimming, adding filters, or adding music.</li>
|
103 |
-
<li>The Gallery icon lets you select an existing video from your phone's gallery. You can also edit the video by trimming, adding filters, or adding music.</li>
|
104 |
-
<li>The Upload icon lets you upload the video to your YouTube channel. You can also add a title, description, tags, category, privacy, and location to your video.</li>
|
105 |
-
</ul>
|
106 |
-
<h3>How to connect with the YouTube community on YouTube 4.0 for Android</h3>
|
107 |
-
<p>To connect with the YouTube community on YouTube 4.0 for Android, you can use the following options:</p>
|
108 |
-
<ul>
|
109 |
-
<li>The Like button lets you express your appreciation for a video.</li>
|
110 |
-
<li>The Dislike button lets you express your dissatisfaction for a video.</li>
|
111 |
-
<li>The Comment button lets you leave a comment on a video. You can also reply to other comments or like them.</li>
|
112 |
-
<li>The Subscribe button lets you follow a channel and get notified of their new videos.</li>
|
113 |
-
</ul>
|
114 |
-
<h2>Tips and tricks for YouTube 4.0 for Android</h2>
|
115 |
-
<h3>How to customize your YouTube experience on YouTube 4.0 for Android</h3>
|
116 |
-
<p>To customize your YouTube experience on YouTube 4.0 for Android, you can use the following options:</p>
|
117 |
-
<ul>
|
118 |
-
<li>The Settings icon lets you access various settings, such as notifications, playback, captions, downloads, privacy, and account.</li>
|
119 |
-
<li>The Account icon lets you manage your YouTube account, such as your profile, channel, subscriptions, favorites, playlists, uploads, downloads, history, and watch later.</li>
|
120 |
-
</ul>
|
121 |
-
<h3>How to save videos for offline viewing on YouTube 4.0 for Android</h3>
|
122 |
-
<p>To save videos for offline viewing on YouTube 4.0 for Android, you can use the following option:</p>
|
123 |
-
<ul>
|
124 |
-
<li>The Download icon lets you download a video to your phone's storage. You can also choose the quality and size of the download. You can watch the downloaded videos from the Downloads tab in the Menu icon.</li>
|
125 |
-
</ul>
|
126 |
-
<h3>How to upgrade to YouTube Premium on YouTube 4.0 for Android</h3>
|
127 |
-
<p>To upgrade to YouTube Premium on YouTube 4.0 for Android, you can use the following option:</p>
|
128 |
-
<ul>
|
129 |
-
<li>The Upgrade icon lets you sign up for YouTube Premium, which is a paid subscription service that offers ad-free videos, background play, offline access, and exclusive content. You can also enjoy YouTube Music, which is a music streaming service that lets you access millions of songs and playlists. You can try YouTube Premium for free for one month and then pay a monthly fee of $11.99.</li>
|
130 |
-
</ul>
|
131 |
-
<h2>Conclusion</h2>
|
132 |
-
<p>YouTube 4.0 for Android is a great way to enjoy videos on your phone, especially if you have an older device or a limited data plan. It has a simple and intuitive interface that lets you access all the essential functions of YouTube, such as browsing, watching, creating, and sharing videos. It also supports some features that are not available in newer versions of the app, such as background play, manual quality adjustment, and portrait mode.</p>
|
133 |
-
<p>However, YouTube 4.0 for Android also has some limitations, such as the lack of support for some newer features, the lack of updates and bug fixes, and the possibility of compatibility issues. Therefore, you should weigh the pros and cons of using YouTube 4.0 for Android before downloading and installing it.</p>
|
134 |
-
<p>If you want to download YouTube 4.0 for Android, you can follow the steps we have outlined in this article. You can also use our tips and tricks to customize your YouTube experience and make the most of it. We hope you found this article helpful and informative.</p>
|
135 |
-
<p>Now that you know how to download YouTube 4.0 for Android, why not give it a try and see for yourself? You might be surprised by how much you can do with this old but gold version of the app. Happy watching!</p>
|
136 |
-
<h2>FAQs</h2>
|
137 |
-
<p>Here are some frequently asked questions about YouTube 4.0 for Android:</p>
|
138 |
-
<ol>
|
139 |
-
<li>Is YouTube 4.0 for Android safe to use?</li>
|
140 |
-
<p>YouTube 4.0 for Android is safe to use as long as you download it from a trusted source, such as [this link]. However, you should be aware that Google does not support or update this version of the app anymore, so there might be some security risks or vulnerabilities that are not fixed.</p>
|
141 |
-
<li>Can I use YouTube 4.0 for Android on any device?</li>
|
142 |
-
<p>You can use YouTube 4.0 for Android on any device that runs Android 4.1 or higher. However, some devices or videos might not be compatible with this version of the app, so you might encounter some errors or glitches while using it.</p>
|
143 |
-
<li>Can I use YouTube 4.0 for Android along with the latest version of the app?</li>
|
144 |
-
<p>You can use YouTube 4.0 for Android along with the latest version of the app if you have enough storage space on your phone. However, you should not run both apps at the same time, as this might cause some conflicts or crashes.</p>
|
145 |
-
<li>Can I update YouTube 4.0 for Android to the latest version of the app?</li>
|
146 |
-
<p>You can update YouTube 4.0 for Android to the latest version of the app by going to Google Play Store and tapping on "Update". However, this will overwrite your existing version of the app and you will lose some features that are only available in YouTube 4.0 for Android.</p>
|
147 |
-
<li>Can I downgrade from the latest version of the app to YouTube 4.0 for Android?</li>
|
148 |
-
<p>You can downgrade from the latest version of the app to YouTube 4.0 for Android by following these steps:</p>
|
149 |
-
<ol>
|
150 |
-
<li>Uninstall the latest version of the app from your phone.</li>
|
151 |
-
<li>Go to [this link] on your phone's browser and tap on "Download APK". This will download the file "youtube-4-0-8.apk" to your phone.</li>
|
152 |
-
<li>Go to your phone's settings and enable "Unknown sources" under "Security". This will allow you to install apps from sources other than Google Play Store.</li>
|
153 |
-
<li>Go to your phone's file manager and locate the downloaded file "youtube-4-0-8.apk". Tap on it and follow the instructions to install it.</li>
|
154 |
-
<li>Once installed, you can launch the app from your app drawer or home screen.</li>
|
155 |
-
</ol>
|
156 |
-
</ol></p> 197e85843d<br />
|
157 |
-
<br />
|
158 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/raft/evaluate.py
DELETED
@@ -1,197 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
sys.path.append('core')
|
3 |
-
|
4 |
-
from PIL import Image
|
5 |
-
import argparse
|
6 |
-
import os
|
7 |
-
import time
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
import torch.nn.functional as F
|
11 |
-
import matplotlib.pyplot as plt
|
12 |
-
|
13 |
-
import datasets
|
14 |
-
from utils import flow_viz
|
15 |
-
from utils import frame_utils
|
16 |
-
|
17 |
-
from raft import RAFT
|
18 |
-
from utils.utils import InputPadder, forward_interpolate
|
19 |
-
|
20 |
-
|
21 |
-
@torch.no_grad()
|
22 |
-
def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'):
|
23 |
-
""" Create submission for the Sintel leaderboard """
|
24 |
-
model.eval()
|
25 |
-
for dstype in ['clean', 'final']:
|
26 |
-
test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype)
|
27 |
-
|
28 |
-
flow_prev, sequence_prev = None, None
|
29 |
-
for test_id in range(len(test_dataset)):
|
30 |
-
image1, image2, (sequence, frame) = test_dataset[test_id]
|
31 |
-
if sequence != sequence_prev:
|
32 |
-
flow_prev = None
|
33 |
-
|
34 |
-
padder = InputPadder(image1.shape)
|
35 |
-
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
|
36 |
-
|
37 |
-
flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True)
|
38 |
-
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
|
39 |
-
|
40 |
-
if warm_start:
|
41 |
-
flow_prev = forward_interpolate(flow_low[0])[None].cuda()
|
42 |
-
|
43 |
-
output_dir = os.path.join(output_path, dstype, sequence)
|
44 |
-
output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1))
|
45 |
-
|
46 |
-
if not os.path.exists(output_dir):
|
47 |
-
os.makedirs(output_dir)
|
48 |
-
|
49 |
-
frame_utils.writeFlow(output_file, flow)
|
50 |
-
sequence_prev = sequence
|
51 |
-
|
52 |
-
|
53 |
-
@torch.no_grad()
|
54 |
-
def create_kitti_submission(model, iters=24, output_path='kitti_submission'):
|
55 |
-
""" Create submission for the Sintel leaderboard """
|
56 |
-
model.eval()
|
57 |
-
test_dataset = datasets.KITTI(split='testing', aug_params=None)
|
58 |
-
|
59 |
-
if not os.path.exists(output_path):
|
60 |
-
os.makedirs(output_path)
|
61 |
-
|
62 |
-
for test_id in range(len(test_dataset)):
|
63 |
-
image1, image2, (frame_id, ) = test_dataset[test_id]
|
64 |
-
padder = InputPadder(image1.shape, mode='kitti')
|
65 |
-
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
|
66 |
-
|
67 |
-
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
|
68 |
-
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
|
69 |
-
|
70 |
-
output_filename = os.path.join(output_path, frame_id)
|
71 |
-
frame_utils.writeFlowKITTI(output_filename, flow)
|
72 |
-
|
73 |
-
|
74 |
-
@torch.no_grad()
|
75 |
-
def validate_chairs(model, iters=24):
|
76 |
-
""" Perform evaluation on the FlyingChairs (test) split """
|
77 |
-
model.eval()
|
78 |
-
epe_list = []
|
79 |
-
|
80 |
-
val_dataset = datasets.FlyingChairs(split='validation')
|
81 |
-
for val_id in range(len(val_dataset)):
|
82 |
-
image1, image2, flow_gt, _ = val_dataset[val_id]
|
83 |
-
image1 = image1[None].cuda()
|
84 |
-
image2 = image2[None].cuda()
|
85 |
-
|
86 |
-
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
|
87 |
-
epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt()
|
88 |
-
epe_list.append(epe.view(-1).numpy())
|
89 |
-
|
90 |
-
epe = np.mean(np.concatenate(epe_list))
|
91 |
-
print("Validation Chairs EPE: %f" % epe)
|
92 |
-
return {'chairs': epe}
|
93 |
-
|
94 |
-
|
95 |
-
@torch.no_grad()
|
96 |
-
def validate_sintel(model, iters=32):
|
97 |
-
""" Peform validation using the Sintel (train) split """
|
98 |
-
model.eval()
|
99 |
-
results = {}
|
100 |
-
for dstype in ['clean', 'final']:
|
101 |
-
val_dataset = datasets.MpiSintel(split='training', dstype=dstype)
|
102 |
-
epe_list = []
|
103 |
-
|
104 |
-
for val_id in range(len(val_dataset)):
|
105 |
-
image1, image2, flow_gt, _ = val_dataset[val_id]
|
106 |
-
image1 = image1[None].cuda()
|
107 |
-
image2 = image2[None].cuda()
|
108 |
-
|
109 |
-
padder = InputPadder(image1.shape)
|
110 |
-
image1, image2 = padder.pad(image1, image2)
|
111 |
-
|
112 |
-
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
|
113 |
-
flow = padder.unpad(flow_pr[0]).cpu()
|
114 |
-
|
115 |
-
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
|
116 |
-
epe_list.append(epe.view(-1).numpy())
|
117 |
-
|
118 |
-
epe_all = np.concatenate(epe_list)
|
119 |
-
epe = np.mean(epe_all)
|
120 |
-
px1 = np.mean(epe_all<1)
|
121 |
-
px3 = np.mean(epe_all<3)
|
122 |
-
px5 = np.mean(epe_all<5)
|
123 |
-
|
124 |
-
print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5))
|
125 |
-
results[dstype] = np.mean(epe_list)
|
126 |
-
|
127 |
-
return results
|
128 |
-
|
129 |
-
|
130 |
-
@torch.no_grad()
|
131 |
-
def validate_kitti(model, iters=24):
|
132 |
-
""" Peform validation using the KITTI-2015 (train) split """
|
133 |
-
model.eval()
|
134 |
-
val_dataset = datasets.KITTI(split='training')
|
135 |
-
|
136 |
-
out_list, epe_list = [], []
|
137 |
-
for val_id in range(len(val_dataset)):
|
138 |
-
image1, image2, flow_gt, valid_gt = val_dataset[val_id]
|
139 |
-
image1 = image1[None].cuda()
|
140 |
-
image2 = image2[None].cuda()
|
141 |
-
|
142 |
-
padder = InputPadder(image1.shape, mode='kitti')
|
143 |
-
image1, image2 = padder.pad(image1, image2)
|
144 |
-
|
145 |
-
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
|
146 |
-
flow = padder.unpad(flow_pr[0]).cpu()
|
147 |
-
|
148 |
-
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
|
149 |
-
mag = torch.sum(flow_gt**2, dim=0).sqrt()
|
150 |
-
|
151 |
-
epe = epe.view(-1)
|
152 |
-
mag = mag.view(-1)
|
153 |
-
val = valid_gt.view(-1) >= 0.5
|
154 |
-
|
155 |
-
out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()
|
156 |
-
epe_list.append(epe[val].mean().item())
|
157 |
-
out_list.append(out[val].cpu().numpy())
|
158 |
-
|
159 |
-
epe_list = np.array(epe_list)
|
160 |
-
out_list = np.concatenate(out_list)
|
161 |
-
|
162 |
-
epe = np.mean(epe_list)
|
163 |
-
f1 = 100 * np.mean(out_list)
|
164 |
-
|
165 |
-
print("Validation KITTI: %f, %f" % (epe, f1))
|
166 |
-
return {'kitti-epe': epe, 'kitti-f1': f1}
|
167 |
-
|
168 |
-
|
169 |
-
if __name__ == '__main__':
|
170 |
-
parser = argparse.ArgumentParser()
|
171 |
-
parser.add_argument('--model', help="restore checkpoint")
|
172 |
-
parser.add_argument('--dataset', help="dataset for evaluation")
|
173 |
-
parser.add_argument('--small', action='store_true', help='use small model')
|
174 |
-
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
|
175 |
-
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
|
176 |
-
args = parser.parse_args()
|
177 |
-
|
178 |
-
model = torch.nn.DataParallel(RAFT(args))
|
179 |
-
model.load_state_dict(torch.load(args.model))
|
180 |
-
|
181 |
-
model.cuda()
|
182 |
-
model.eval()
|
183 |
-
|
184 |
-
# create_sintel_submission(model.module, warm_start=True)
|
185 |
-
# create_kitti_submission(model.module)
|
186 |
-
|
187 |
-
with torch.no_grad():
|
188 |
-
if args.dataset == 'chairs':
|
189 |
-
validate_chairs(model.module)
|
190 |
-
|
191 |
-
elif args.dataset == 'sintel':
|
192 |
-
validate_sintel(model.module)
|
193 |
-
|
194 |
-
elif args.dataset == 'kitti':
|
195 |
-
validate_kitti(model.module)
|
196 |
-
|
197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/42digital/DeepFashion_Classification/app.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from huggingface_hub import hf_hub_download
|
3 |
-
from fastai.learner import load_learner
|
4 |
-
import os
|
5 |
-
|
6 |
-
print(gr.__version__)
|
7 |
-
|
8 |
-
TOKEN = os.environ["token"]
|
9 |
-
REPO_ID = "42digital/deepfashion_classification_vit-large-patch14-clip-336"
|
10 |
-
FILENAME = "model.pkl"
|
11 |
-
EXAMPLES = ["dress.jpg", "hoodie.jpg", "joggers.jpg", "jumpsuit.jpg", "shorts.jpg", "tee.jpg"]
|
12 |
-
|
13 |
-
learner = load_learner(
|
14 |
-
hf_hub_download(repo_id=REPO_ID, filename=FILENAME, token=TOKEN)
|
15 |
-
)
|
16 |
-
|
17 |
-
def predict(img):
|
18 |
-
_, _, probs = learner.predict(img)
|
19 |
-
probs = [float(p) for p in probs.detach()]
|
20 |
-
preds = {k: v for k, v in zip(learner.dls.vocab, probs)}
|
21 |
-
return preds
|
22 |
-
|
23 |
-
gr.Interface(fn=predict,
|
24 |
-
inputs=gr.Image(type="numpy"),
|
25 |
-
outputs=gr.Label(num_top_classes=5),
|
26 |
-
examples=EXAMPLES,
|
27 |
-
cache_examples=False,
|
28 |
-
title="Fashion Classification",
|
29 |
-
description="Recognize clothes in an image. [ViT-L/14](https://arxiv.org/abs/2010.11929) trained on 46 clothing categories from [DeepFashion](https://openaccess.thecvf.com/content_cvpr_2016/html/Liu_DeepFashion_Powering_Robust_CVPR_2016_paper.html) @ 76% Top-1 Accuracy and 92% [Top-3 Accuracy](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.top_k_accuracy_score.html) (many images show more than one clothing item).",
|
30 |
-
analytics_enabled=False,
|
31 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/52Hz/SUNet_AWGN_denoising/model/SUNet.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from model.SUNet_detail import SUNet
|
3 |
-
|
4 |
-
|
5 |
-
class SUNet_model(nn.Module):
|
6 |
-
def __init__(self, config):
|
7 |
-
super(SUNet_model, self).__init__()
|
8 |
-
self.config = config
|
9 |
-
self.swin_unet = SUNet(img_size=config['SWINUNET']['IMG_SIZE'],
|
10 |
-
patch_size=config['SWINUNET']['PATCH_SIZE'],
|
11 |
-
in_chans=3,
|
12 |
-
out_chans=3,
|
13 |
-
embed_dim=config['SWINUNET']['EMB_DIM'],
|
14 |
-
depths=config['SWINUNET']['DEPTH_EN'],
|
15 |
-
num_heads=config['SWINUNET']['HEAD_NUM'],
|
16 |
-
window_size=config['SWINUNET']['WIN_SIZE'],
|
17 |
-
mlp_ratio=config['SWINUNET']['MLP_RATIO'],
|
18 |
-
qkv_bias=config['SWINUNET']['QKV_BIAS'],
|
19 |
-
qk_scale=config['SWINUNET']['QK_SCALE'],
|
20 |
-
drop_rate=config['SWINUNET']['DROP_RATE'],
|
21 |
-
drop_path_rate=config['SWINUNET']['DROP_PATH_RATE'],
|
22 |
-
ape=config['SWINUNET']['APE'],
|
23 |
-
patch_norm=config['SWINUNET']['PATCH_NORM'],
|
24 |
-
use_checkpoint=config['SWINUNET']['USE_CHECKPOINTS'])
|
25 |
-
|
26 |
-
def forward(self, x):
|
27 |
-
if x.size()[1] == 1:
|
28 |
-
x = x.repeat(1, 3, 1, 1)
|
29 |
-
logits = self.swin_unet(x)
|
30 |
-
return logits
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/state/index.ts
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import { BingWebBot } from '@/lib/bots/bing'
|
2 |
-
import { BingConversationStyle, ChatMessageModel, BotId } from '@/lib/bots/bing/types'
|
3 |
-
import { nanoid } from '@/lib/utils'
|
4 |
-
import { atom } from 'jotai'
|
5 |
-
import { atomWithImmer } from 'jotai-immer'
|
6 |
-
import { atomWithStorage } from 'jotai/utils'
|
7 |
-
import { atomFamily } from 'jotai/utils'
|
8 |
-
import { atomWithHash, atomWithLocation } from 'jotai-location'
|
9 |
-
|
10 |
-
const initialMessages: ChatMessageModel[] = [
|
11 |
-
{ author: 'system', text: 'conversation between user and robot', id: '1' },
|
12 |
-
{ author: 'user', text: '销量最高的 3 种宠物吸尘器有哪些优点和缺点? ', id: '2' },
|
13 |
-
{
|
14 |
-
author: 'bot', text: `
|
15 |
-
您好,这是必应。根据网上的搜索结果,我为您找到了以下三款销量最高的宠物吸尘器,以及它们的优点和缺点:
|
16 |
-
|
17 |
-
- **戴森Dyson V10轻量版 Digital Slim Fluffy无线吸尘器**[^1^] [^3^]:这款吸尘器专为宠物家庭设计,拥有强大吸力和多功能吸头,能够有效清除宠物毛发和灰尘。
|
18 |
-
- 优点:轻便易用,续航时间长,过滤系统先进,能够有效去除过敏原;
|
19 |
-
- 缺点:价格较高,噪音较大,需要定期清洁滤网。
|
20 |
-
- **飞利浦FC6908/82(W7)无线吸尘器**[^2^] [^4^]:这是一款手持式干吸无线吸尘器,专门为宠物家庭设计。它具有75分钟的超高续航,适合大户型;除螨涡轮刷,可以深度清洁床单被罩,免受螨虫侵害;吸拖一体设计,280ml大水箱,还可以往水箱里加入清洁剂。
|
21 |
-
- 优点:清洁性能强劲,操作方便,适用多种场景;
|
22 |
-
- 缺点:噪音较大,达到84dB(A);尘杯容量较小,只有0.6L;充电时长较长,需要5小时。
|
23 |
-
- **必胜(Bissell) 宠物吸尘器**[^2^]:这是一款便携式手持真空吸尘器,专为清理宠物毛发而设计。它配备特殊的橡胶喷嘴和软毛刷头,可以轻松拾取宠物毛发和灰尘;几乎静音运行。
|
24 |
-
- 优点:轻巧易用,重量只有1.36kg;电池可充电可更换;
|
25 |
-
- 缺点:续航时间不稳定,在15-30分钟之间浮动;吸力不够强劲,在地毯或沙发上可能效果不佳;没有过滤系统或过滤网。
|
26 |
-
|
27 |
-
希望这些信息对您有所帮助。如果您还有其他问题,请随时提问。😊
|
28 |
-
`, id: '3' },
|
29 |
-
{ author: 'user', text: '今天的新闻', id: '4' },
|
30 |
-
{
|
31 |
-
author: 'bot', text: `你好,这是Bing。我根据你的搜索查询“今天的新闻”为你找到了以下信息:
|
32 |
-
|
33 |
-
# 中国新闻
|
34 |
-
- 《求是》杂志发表习近平总书记重要文章《深化党和国家机构改革 推进国家治理体系和治理能力现代化》[^1^]
|
35 |
-
- 梦之队第5金! 全红婵陈芋汐女双10米台夺冠[^1^]
|
36 |
-
- 中央气象台7月16日18时发布台风橙色预警[^1^]
|
37 |
-
- 贵州石阡:暑期旅游带动乡村振兴[^1^]
|
38 |
-
- 激活大科学装置的“人才红利”[^1^]
|
39 |
-
- 聚才用才留才 让希望的田野成为智慧的田野[^1^]
|
40 |
-
- 科学家发现粮食系统增效可有效支撑我国碳中和[^1^]
|
41 |
-
- 成都以赛为媒提升城市美誉度[^1^]
|
42 |
-
- 香江观澜:“抢人才”成效显著 香港竞争力依旧[^1^]
|
43 |
-
- 浙江建德:新安江上享清凉[^1^]
|
44 |
-
- 定向资金持续精准滴灌 高质量发展力量不断积累[^1^]
|
45 |
-
- 企业信心恢复加大中长期投资 为稳增长奠定基础[^1^]
|
46 |
-
- 武汉渡江节开赛 千名“泳士”竞渡长江[^1^]
|
47 |
-
- 金融更加重视对实体经济支持 降低融资成本提振市场信心[^1^]
|
48 |
-
- 大运来了丨成都迎大运 全民健身动起来[^1^]
|
49 |
-
- 藏在高校里的宝藏博物馆[^1^]
|
50 |
-
- 中国汽车工业用70年赢得三个“全球第一”[^1^]
|
51 |
-
- 辽宁阜新市委常委、副市长赖化平涉嫌严重违纪违法接受审查调查[^1^]
|
52 |
-
|
53 |
-
# 国际新闻
|
54 |
-
- 联合国秘书长:可持续发展面临风险 各国需加强行动[^2^]
|
55 |
-
- 国际航运业加快绿色转型[^2^]
|
56 |
-
- 美企反对收紧对华芯片出口限制[^2^]
|
57 |
-
- 欧洲加大气候科技领域投资[^2^]
|
58 |
-
- 中企助力丹麦发展清洁能源[^2^]
|
59 |
-
- 中国代表呼吁国际社会共同努力防止乌克兰局势失控[^2^]
|
60 |
-
- 中国和阿尔及利亚共同构建新型国际关系典范[^2^]
|
61 |
-
|
62 |
-
以上信息仅供参考,具体详情请点击以下链接查看:
|
63 |
-
|
64 |
-
[1] [中国新闻_央视网(cctv.com)](https://news.cctv.com/china/)
|
65 |
-
[2] [人民网_网上的人民日报](http://www.people.com.cn/)`, id: '5' },
|
66 |
-
{ author: 'user', text: '写一个快排', id: '6' },
|
67 |
-
{ author: 'bot', text: "你好,这是Bing。我可以帮你用python写一个快排。快速排序是一种基于分治思想的排序算法,它的基本思想是:从待排序的数组中选择一个基准元素,将数组分为两个子数组,使得左边的子数组中的元素都小于或等于基准元素,右边的子数组中的元素都大于或等于基准元素,然后对两个子数组递归地进行快速排序,最后合并结果。下面是用python实现的快速排序的代码:\n\n```python\n# 定义一个函数,用��交换数组中的两个元素\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n# 定义一个函数,用于对数组的一段区间进行划分\ndef partition(arr, low, high):\n # 选择最右边的元素作为基准元素\n pivot = arr[high]\n # 初始化一个指针,指向左边区间的最后一个位置\n i = low - 1\n # 遍历左边区间中的每个元素\n for j in range(low, high):\n # 如果当前元素小于或等于基准元素\n if arr[j] <= pivot:\n # 将指针右移一位\n i += 1\n # 将指针所指的元素与当前元素交换\n swap(arr, i, j)\n # 将基准元素与指针右边的第一个元素交换\n swap(arr, i + 1, high)\n # 返回基准元素的最终位置\n return i + 1\n\n# 定义一个函数,用于对数组进行快速排序\ndef quick_sort(arr, low, high):\n # 如果区间长度大于1\n if low < high:\n # 对区间进行划分,得到基准元素的位置\n p = partition(arr, low, high)\n # 对左边子区间进行快速排序\n quick_sort(arr, low, p - 1)\n # 对右边子区间进行快速排序\n quick_sort(arr, p + 1, high)\n\n# 测试代码\narr = [5, 3, 7, 2, 9, 1, 4, 6, 8]\nprint(\"原始数组:\", arr)\nquick_sort(arr, 0, len(arr) - 1)\nprint(\"排序后的数组:\", arr)\n```", id: '7' },
|
68 |
-
{
|
69 |
-
author: 'bot', text: "好的,我会尝试画一只猫。\n > ",
|
70 |
-
id: '8'
|
71 |
-
}
|
72 |
-
]
|
73 |
-
|
74 |
-
export const GreetMessages = [
|
75 |
-
'谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?',
|
76 |
-
'重新开始总是很棒。问我任何问题!',
|
77 |
-
'当然,我很乐意重新开始。我现在可以为你提供哪些帮助?',
|
78 |
-
'当然,我已准备好进行新的挑战。我现在可以为你做什么?',
|
79 |
-
'很好,让我们来更改主题。你在想什么?',
|
80 |
-
'不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?',
|
81 |
-
'好的,我准备好了!感谢重置。我们应该了解哪些内容?',
|
82 |
-
'感谢刷新!你有新的话题吗?',
|
83 |
-
'明白了,让我们重新开始。接下来应该讨论什么?',
|
84 |
-
'下一步!我可以为你做什么?',
|
85 |
-
'好的,我已准备好新话题。我们应该一起了解哪些内容?'
|
86 |
-
]
|
87 |
-
|
88 |
-
export const bingConversationStyleAtom = atomWithStorage<BingConversationStyle>('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true })
|
89 |
-
export const voiceAtom = atomWithStorage<boolean>('enableTTS', false, undefined, { unstable_getOnInit: true })
|
90 |
-
|
91 |
-
type Param = { botId: BotId; page: string }
|
92 |
-
|
93 |
-
const createBotInstance = () => {
|
94 |
-
return new BingWebBot({
|
95 |
-
cookie: ' ',
|
96 |
-
ua: ' ',
|
97 |
-
})
|
98 |
-
}
|
99 |
-
|
100 |
-
export const chatFamily = atomFamily(
|
101 |
-
(param: Param) => {
|
102 |
-
return atomWithImmer({
|
103 |
-
botId: param.botId,
|
104 |
-
bot: createBotInstance(),
|
105 |
-
messages: [] as ChatMessageModel[],
|
106 |
-
generatingMessageId: '',
|
107 |
-
abortController: undefined as AbortController | undefined,
|
108 |
-
conversationId: nanoid(),
|
109 |
-
})
|
110 |
-
},
|
111 |
-
(a, b) => a.botId === b.botId && a.page === b.page,
|
112 |
-
)
|
113 |
-
|
114 |
-
export const hashAtom = atomWithHash('dialog', '')
|
115 |
-
|
116 |
-
export const locationAtom = atomWithLocation()
|
117 |
-
|
118 |
-
export const voiceListenAtom = atom(false)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AFlac199/openai-reverse-proxy/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Openai Reverse Proxy
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/utils/samples/manager.py
DELETED
@@ -1,386 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
API that can manage the storage and retrieval of generated samples produced by experiments.
|
9 |
-
|
10 |
-
It offers the following benefits:
|
11 |
-
* Samples are stored in a consistent way across epoch
|
12 |
-
* Metadata about the samples can be stored and retrieved
|
13 |
-
* Can retrieve audio
|
14 |
-
* Identifiers are reliable and deterministic for prompted and conditioned samples
|
15 |
-
* Can request the samples for multiple XPs, grouped by sample identifier
|
16 |
-
* For no-input samples (not prompt and no conditions), samples across XPs are matched
|
17 |
-
by sorting their identifiers
|
18 |
-
"""
|
19 |
-
|
20 |
-
from concurrent.futures import ThreadPoolExecutor
|
21 |
-
from dataclasses import asdict, dataclass
|
22 |
-
from functools import lru_cache
|
23 |
-
import hashlib
|
24 |
-
import json
|
25 |
-
import logging
|
26 |
-
from pathlib import Path
|
27 |
-
import re
|
28 |
-
import typing as tp
|
29 |
-
import unicodedata
|
30 |
-
import uuid
|
31 |
-
|
32 |
-
import dora
|
33 |
-
import torch
|
34 |
-
|
35 |
-
from ...data.audio import audio_read, audio_write
|
36 |
-
|
37 |
-
|
38 |
-
logger = logging.getLogger(__name__)
|
39 |
-
|
40 |
-
|
41 |
-
@dataclass
|
42 |
-
class ReferenceSample:
|
43 |
-
id: str
|
44 |
-
path: str
|
45 |
-
duration: float
|
46 |
-
|
47 |
-
|
48 |
-
@dataclass
|
49 |
-
class Sample:
|
50 |
-
id: str
|
51 |
-
path: str
|
52 |
-
epoch: int
|
53 |
-
duration: float
|
54 |
-
conditioning: tp.Optional[tp.Dict[str, tp.Any]]
|
55 |
-
prompt: tp.Optional[ReferenceSample]
|
56 |
-
reference: tp.Optional[ReferenceSample]
|
57 |
-
generation_args: tp.Optional[tp.Dict[str, tp.Any]]
|
58 |
-
|
59 |
-
def __hash__(self):
|
60 |
-
return hash(self.id)
|
61 |
-
|
62 |
-
def audio(self) -> tp.Tuple[torch.Tensor, int]:
|
63 |
-
return audio_read(self.path)
|
64 |
-
|
65 |
-
def audio_prompt(self) -> tp.Optional[tp.Tuple[torch.Tensor, int]]:
|
66 |
-
return audio_read(self.prompt.path) if self.prompt is not None else None
|
67 |
-
|
68 |
-
def audio_reference(self) -> tp.Optional[tp.Tuple[torch.Tensor, int]]:
|
69 |
-
return audio_read(self.reference.path) if self.reference is not None else None
|
70 |
-
|
71 |
-
|
72 |
-
class SampleManager:
|
73 |
-
"""Audio samples IO handling within a given dora xp.
|
74 |
-
|
75 |
-
The sample manager handles the dumping and loading logic for generated and
|
76 |
-
references samples across epochs for a given xp, providing a simple API to
|
77 |
-
store, retrieve and compare audio samples.
|
78 |
-
|
79 |
-
Args:
|
80 |
-
xp (dora.XP): Dora experiment object. The XP contains information on the XP folder
|
81 |
-
where all outputs are stored and the configuration of the experiment,
|
82 |
-
which is useful to retrieve audio-related parameters.
|
83 |
-
map_reference_to_sample_id (bool): Whether to use the sample_id for all reference samples
|
84 |
-
instead of generating a dedicated hash id. This is useful to allow easier comparison
|
85 |
-
with ground truth sample from the files directly without having to read the JSON metadata
|
86 |
-
to do the mapping (at the cost of potentially dumping duplicate prompts/references
|
87 |
-
depending on the task).
|
88 |
-
"""
|
89 |
-
def __init__(self, xp: dora.XP, map_reference_to_sample_id: bool = False):
|
90 |
-
self.xp = xp
|
91 |
-
self.base_folder: Path = xp.folder / xp.cfg.generate.path
|
92 |
-
self.reference_folder = self.base_folder / 'reference'
|
93 |
-
self.map_reference_to_sample_id = map_reference_to_sample_id
|
94 |
-
self.samples: tp.List[Sample] = []
|
95 |
-
self._load_samples()
|
96 |
-
|
97 |
-
@property
|
98 |
-
def latest_epoch(self):
|
99 |
-
"""Latest epoch across all samples."""
|
100 |
-
return max(self.samples, key=lambda x: x.epoch).epoch if self.samples else 0
|
101 |
-
|
102 |
-
def _load_samples(self):
|
103 |
-
"""Scan the sample folder and load existing samples."""
|
104 |
-
jsons = self.base_folder.glob('**/*.json')
|
105 |
-
with ThreadPoolExecutor(6) as pool:
|
106 |
-
self.samples = list(pool.map(self._load_sample, jsons))
|
107 |
-
|
108 |
-
@staticmethod
|
109 |
-
@lru_cache(2**26)
|
110 |
-
def _load_sample(json_file: Path) -> Sample:
|
111 |
-
with open(json_file, 'r') as f:
|
112 |
-
data: tp.Dict[str, tp.Any] = json.load(f)
|
113 |
-
# fetch prompt data
|
114 |
-
prompt_data = data.get('prompt')
|
115 |
-
prompt = ReferenceSample(id=prompt_data['id'], path=prompt_data['path'],
|
116 |
-
duration=prompt_data['duration']) if prompt_data else None
|
117 |
-
# fetch reference data
|
118 |
-
reference_data = data.get('reference')
|
119 |
-
reference = ReferenceSample(id=reference_data['id'], path=reference_data['path'],
|
120 |
-
duration=reference_data['duration']) if reference_data else None
|
121 |
-
# build sample object
|
122 |
-
return Sample(id=data['id'], path=data['path'], epoch=data['epoch'], duration=data['duration'],
|
123 |
-
prompt=prompt, conditioning=data.get('conditioning'), reference=reference,
|
124 |
-
generation_args=data.get('generation_args'))
|
125 |
-
|
126 |
-
def _init_hash(self):
|
127 |
-
return hashlib.sha1()
|
128 |
-
|
129 |
-
def _get_tensor_id(self, tensor: torch.Tensor) -> str:
|
130 |
-
hash_id = self._init_hash()
|
131 |
-
hash_id.update(tensor.numpy().data)
|
132 |
-
return hash_id.hexdigest()
|
133 |
-
|
134 |
-
def _get_sample_id(self, index: int, prompt_wav: tp.Optional[torch.Tensor],
|
135 |
-
conditions: tp.Optional[tp.Dict[str, str]]) -> str:
|
136 |
-
"""Computes an id for a sample given its input data.
|
137 |
-
This id is deterministic if prompt and/or conditions are provided by using a sha1 hash on the input.
|
138 |
-
Otherwise, a random id of the form "noinput_{uuid4().hex}" is returned.
|
139 |
-
|
140 |
-
Args:
|
141 |
-
index (int): Batch index, Helpful to differentiate samples from the same batch.
|
142 |
-
prompt_wav (torch.Tensor): Prompt used during generation.
|
143 |
-
conditions (dict[str, str]): Conditioning used during generation.
|
144 |
-
"""
|
145 |
-
# For totally unconditioned generations we will just use a random UUID.
|
146 |
-
# The function get_samples_for_xps will do a simple ordered match with a custom key.
|
147 |
-
if prompt_wav is None and not conditions:
|
148 |
-
return f"noinput_{uuid.uuid4().hex}"
|
149 |
-
|
150 |
-
# Human readable portion
|
151 |
-
hr_label = ""
|
152 |
-
# Create a deterministic id using hashing
|
153 |
-
hash_id = self._init_hash()
|
154 |
-
hash_id.update(f"{index}".encode())
|
155 |
-
if prompt_wav is not None:
|
156 |
-
hash_id.update(prompt_wav.numpy().data)
|
157 |
-
hr_label += "_prompted"
|
158 |
-
else:
|
159 |
-
hr_label += "_unprompted"
|
160 |
-
if conditions:
|
161 |
-
encoded_json = json.dumps(conditions, sort_keys=True).encode()
|
162 |
-
hash_id.update(encoded_json)
|
163 |
-
cond_str = "-".join([f"{key}={slugify(value)}"
|
164 |
-
for key, value in sorted(conditions.items())])
|
165 |
-
cond_str = cond_str[:100] # some raw text might be too long to be a valid filename
|
166 |
-
cond_str = cond_str if len(cond_str) > 0 else "unconditioned"
|
167 |
-
hr_label += f"_{cond_str}"
|
168 |
-
else:
|
169 |
-
hr_label += "_unconditioned"
|
170 |
-
|
171 |
-
return hash_id.hexdigest() + hr_label
|
172 |
-
|
173 |
-
def _store_audio(self, wav: torch.Tensor, stem_path: Path, overwrite: bool = False) -> Path:
|
174 |
-
"""Stores the audio with the given stem path using the XP's configuration.
|
175 |
-
|
176 |
-
Args:
|
177 |
-
wav (torch.Tensor): Audio to store.
|
178 |
-
stem_path (Path): Path in sample output directory with file stem to use.
|
179 |
-
overwrite (bool): When False (default), skips storing an existing audio file.
|
180 |
-
Returns:
|
181 |
-
Path: The path at which the audio is stored.
|
182 |
-
"""
|
183 |
-
existing_paths = [
|
184 |
-
path for path in stem_path.parent.glob(stem_path.stem + '.*')
|
185 |
-
if path.suffix != '.json'
|
186 |
-
]
|
187 |
-
exists = len(existing_paths) > 0
|
188 |
-
if exists and overwrite:
|
189 |
-
logger.warning(f"Overwriting existing audio file with stem path {stem_path}")
|
190 |
-
elif exists:
|
191 |
-
return existing_paths[0]
|
192 |
-
|
193 |
-
audio_path = audio_write(stem_path, wav, **self.xp.cfg.generate.audio)
|
194 |
-
return audio_path
|
195 |
-
|
196 |
-
def add_sample(self, sample_wav: torch.Tensor, epoch: int, index: int = 0,
|
197 |
-
conditions: tp.Optional[tp.Dict[str, str]] = None, prompt_wav: tp.Optional[torch.Tensor] = None,
|
198 |
-
ground_truth_wav: tp.Optional[torch.Tensor] = None,
|
199 |
-
generation_args: tp.Optional[tp.Dict[str, tp.Any]] = None) -> Sample:
|
200 |
-
"""Adds a single sample.
|
201 |
-
The sample is stored in the XP's sample output directory, under a corresponding epoch folder.
|
202 |
-
Each sample is assigned an id which is computed using the input data. In addition to the
|
203 |
-
sample itself, a json file containing associated metadata is stored next to it.
|
204 |
-
|
205 |
-
Args:
|
206 |
-
sample_wav (torch.Tensor): sample audio to store. Tensor of shape [channels, shape].
|
207 |
-
epoch (int): current training epoch.
|
208 |
-
index (int): helpful to differentiate samples from the same batch.
|
209 |
-
conditions (dict[str, str], optional): conditioning used during generation.
|
210 |
-
prompt_wav (torch.Tensor, optional): prompt used during generation. Tensor of shape [channels, shape].
|
211 |
-
ground_truth_wav (torch.Tensor, optional): reference audio where prompt was extracted from.
|
212 |
-
Tensor of shape [channels, shape].
|
213 |
-
generation_args (dict[str, any], optional): dictionary of other arguments used during generation.
|
214 |
-
Returns:
|
215 |
-
Sample: The saved sample.
|
216 |
-
"""
|
217 |
-
sample_id = self._get_sample_id(index, prompt_wav, conditions)
|
218 |
-
reuse_id = self.map_reference_to_sample_id
|
219 |
-
prompt, ground_truth = None, None
|
220 |
-
if prompt_wav is not None:
|
221 |
-
prompt_id = sample_id if reuse_id else self._get_tensor_id(prompt_wav.sum(0, keepdim=True))
|
222 |
-
prompt_duration = prompt_wav.shape[-1] / self.xp.cfg.sample_rate
|
223 |
-
prompt_path = self._store_audio(prompt_wav, self.base_folder / str(epoch) / 'prompt' / prompt_id)
|
224 |
-
prompt = ReferenceSample(prompt_id, str(prompt_path), prompt_duration)
|
225 |
-
if ground_truth_wav is not None:
|
226 |
-
ground_truth_id = sample_id if reuse_id else self._get_tensor_id(ground_truth_wav.sum(0, keepdim=True))
|
227 |
-
ground_truth_duration = ground_truth_wav.shape[-1] / self.xp.cfg.sample_rate
|
228 |
-
ground_truth_path = self._store_audio(ground_truth_wav, self.base_folder / 'reference' / ground_truth_id)
|
229 |
-
ground_truth = ReferenceSample(ground_truth_id, str(ground_truth_path), ground_truth_duration)
|
230 |
-
sample_path = self._store_audio(sample_wav, self.base_folder / str(epoch) / sample_id, overwrite=True)
|
231 |
-
duration = sample_wav.shape[-1] / self.xp.cfg.sample_rate
|
232 |
-
sample = Sample(sample_id, str(sample_path), epoch, duration, conditions, prompt, ground_truth, generation_args)
|
233 |
-
self.samples.append(sample)
|
234 |
-
with open(sample_path.with_suffix('.json'), 'w') as f:
|
235 |
-
json.dump(asdict(sample), f, indent=2)
|
236 |
-
return sample
|
237 |
-
|
238 |
-
def add_samples(self, samples_wavs: torch.Tensor, epoch: int,
|
239 |
-
conditioning: tp.Optional[tp.List[tp.Dict[str, tp.Any]]] = None,
|
240 |
-
prompt_wavs: tp.Optional[torch.Tensor] = None,
|
241 |
-
ground_truth_wavs: tp.Optional[torch.Tensor] = None,
|
242 |
-
generation_args: tp.Optional[tp.Dict[str, tp.Any]] = None) -> tp.List[Sample]:
|
243 |
-
"""Adds a batch of samples.
|
244 |
-
The samples are stored in the XP's sample output directory, under a corresponding
|
245 |
-
epoch folder. Each sample is assigned an id which is computed using the input data and their batch index.
|
246 |
-
In addition to the sample itself, a json file containing associated metadata is stored next to it.
|
247 |
-
|
248 |
-
Args:
|
249 |
-
sample_wavs (torch.Tensor): Batch of audio wavs to store. Tensor of shape [batch_size, channels, shape].
|
250 |
-
epoch (int): Current training epoch.
|
251 |
-
conditioning (list of dict[str, str], optional): List of conditions used during generation,
|
252 |
-
one per sample in the batch.
|
253 |
-
prompt_wavs (torch.Tensor, optional): Prompts used during generation. Tensor of shape
|
254 |
-
[batch_size, channels, shape].
|
255 |
-
ground_truth_wav (torch.Tensor, optional): Reference audio where prompts were extracted from.
|
256 |
-
Tensor of shape [batch_size, channels, shape].
|
257 |
-
generation_args (dict[str, Any], optional): Dictionary of other arguments used during generation.
|
258 |
-
Returns:
|
259 |
-
samples (list of Sample): The saved audio samples with prompts, ground truth and metadata.
|
260 |
-
"""
|
261 |
-
samples = []
|
262 |
-
for idx, wav in enumerate(samples_wavs):
|
263 |
-
prompt_wav = prompt_wavs[idx] if prompt_wavs is not None else None
|
264 |
-
gt_wav = ground_truth_wavs[idx] if ground_truth_wavs is not None else None
|
265 |
-
conditions = conditioning[idx] if conditioning is not None else None
|
266 |
-
samples.append(self.add_sample(wav, epoch, idx, conditions, prompt_wav, gt_wav, generation_args))
|
267 |
-
return samples
|
268 |
-
|
269 |
-
def get_samples(self, epoch: int = -1, max_epoch: int = -1, exclude_prompted: bool = False,
|
270 |
-
exclude_unprompted: bool = False, exclude_conditioned: bool = False,
|
271 |
-
exclude_unconditioned: bool = False) -> tp.Set[Sample]:
|
272 |
-
"""Returns a set of samples for this XP. Optionally, you can filter which samples to obtain.
|
273 |
-
Please note that existing samples are loaded during the manager's initialization, and added samples through this
|
274 |
-
manager are also tracked. Any other external changes are not tracked automatically, so creating a new manager
|
275 |
-
is the only way detect them.
|
276 |
-
|
277 |
-
Args:
|
278 |
-
epoch (int): If provided, only return samples corresponding to this epoch.
|
279 |
-
max_epoch (int): If provided, only return samples corresponding to the latest epoch that is <= max_epoch.
|
280 |
-
exclude_prompted (bool): If True, does not include samples that used a prompt.
|
281 |
-
exclude_unprompted (bool): If True, does not include samples that did not use a prompt.
|
282 |
-
exclude_conditioned (bool): If True, excludes samples that used conditioning.
|
283 |
-
exclude_unconditioned (bool): If True, excludes samples that did not use conditioning.
|
284 |
-
Returns:
|
285 |
-
Samples (set of Sample): The retrieved samples matching the provided filters.
|
286 |
-
"""
|
287 |
-
if max_epoch >= 0:
|
288 |
-
samples_epoch = max(sample.epoch for sample in self.samples if sample.epoch <= max_epoch)
|
289 |
-
else:
|
290 |
-
samples_epoch = self.latest_epoch if epoch < 0 else epoch
|
291 |
-
samples = {
|
292 |
-
sample
|
293 |
-
for sample in self.samples
|
294 |
-
if (
|
295 |
-
(sample.epoch == samples_epoch) and
|
296 |
-
(not exclude_prompted or sample.prompt is None) and
|
297 |
-
(not exclude_unprompted or sample.prompt is not None) and
|
298 |
-
(not exclude_conditioned or not sample.conditioning) and
|
299 |
-
(not exclude_unconditioned or sample.conditioning)
|
300 |
-
)
|
301 |
-
}
|
302 |
-
return samples
|
303 |
-
|
304 |
-
|
305 |
-
def slugify(value: tp.Any, allow_unicode: bool = False):
|
306 |
-
"""Process string for safer file naming.
|
307 |
-
|
308 |
-
Taken from https://github.com/django/django/blob/master/django/utils/text.py
|
309 |
-
|
310 |
-
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
|
311 |
-
dashes to single dashes. Remove characters that aren't alphanumerics,
|
312 |
-
underscores, or hyphens. Convert to lowercase. Also strip leading and
|
313 |
-
trailing whitespace, dashes, and underscores.
|
314 |
-
"""
|
315 |
-
value = str(value)
|
316 |
-
if allow_unicode:
|
317 |
-
value = unicodedata.normalize("NFKC", value)
|
318 |
-
else:
|
319 |
-
value = (
|
320 |
-
unicodedata.normalize("NFKD", value)
|
321 |
-
.encode("ascii", "ignore")
|
322 |
-
.decode("ascii")
|
323 |
-
)
|
324 |
-
value = re.sub(r"[^\w\s-]", "", value.lower())
|
325 |
-
return re.sub(r"[-\s]+", "-", value).strip("-_")
|
326 |
-
|
327 |
-
|
328 |
-
def _match_stable_samples(samples_per_xp: tp.List[tp.Set[Sample]]) -> tp.Dict[str, tp.List[Sample]]:
|
329 |
-
# Create a dictionary of stable id -> sample per XP
|
330 |
-
stable_samples_per_xp = [{
|
331 |
-
sample.id: sample for sample in samples
|
332 |
-
if sample.prompt is not None or sample.conditioning
|
333 |
-
} for samples in samples_per_xp]
|
334 |
-
# Set of all stable ids
|
335 |
-
stable_ids = {id for samples in stable_samples_per_xp for id in samples.keys()}
|
336 |
-
# Dictionary of stable id -> list of samples. If an XP does not have it, assign None
|
337 |
-
stable_samples = {id: [xp.get(id) for xp in stable_samples_per_xp] for id in stable_ids}
|
338 |
-
# Filter out ids that contain None values (we only want matched samples after all)
|
339 |
-
# cast is necessary to avoid mypy linter errors.
|
340 |
-
return {id: tp.cast(tp.List[Sample], samples) for id, samples in stable_samples.items() if None not in samples}
|
341 |
-
|
342 |
-
|
343 |
-
def _match_unstable_samples(samples_per_xp: tp.List[tp.Set[Sample]]) -> tp.Dict[str, tp.List[Sample]]:
|
344 |
-
# For unstable ids, we use a sorted list since we'll match them in order
|
345 |
-
unstable_samples_per_xp = [[
|
346 |
-
sample for sample in sorted(samples, key=lambda x: x.id)
|
347 |
-
if sample.prompt is None and not sample.conditioning
|
348 |
-
] for samples in samples_per_xp]
|
349 |
-
# Trim samples per xp so all samples can have a match
|
350 |
-
min_len = min([len(samples) for samples in unstable_samples_per_xp])
|
351 |
-
unstable_samples_per_xp = [samples[:min_len] for samples in unstable_samples_per_xp]
|
352 |
-
# Dictionary of index -> list of matched samples
|
353 |
-
return {
|
354 |
-
f'noinput_{i}': [samples[i] for samples in unstable_samples_per_xp] for i in range(min_len)
|
355 |
-
}
|
356 |
-
|
357 |
-
|
358 |
-
def get_samples_for_xps(xps: tp.List[dora.XP], **kwargs) -> tp.Dict[str, tp.List[Sample]]:
|
359 |
-
"""Gets a dictionary of matched samples across the given XPs.
|
360 |
-
Each dictionary entry maps a sample id to a list of samples for that id. The number of samples per id
|
361 |
-
will always match the number of XPs provided and will correspond to each XP in the same order given.
|
362 |
-
In other words, only samples that can be match across all provided XPs will be returned
|
363 |
-
in order to satisfy this rule.
|
364 |
-
|
365 |
-
There are two types of ids that can be returned: stable and unstable.
|
366 |
-
* Stable IDs are deterministic ids that were computed by the SampleManager given a sample's inputs
|
367 |
-
(prompts/conditioning). This is why we can match them across XPs.
|
368 |
-
* Unstable IDs are of the form "noinput_{idx}" and are generated on-the-fly, in order to map samples
|
369 |
-
that used non-deterministic, random ids. This is the case for samples that did not use prompts or
|
370 |
-
conditioning for their generation. This function will sort these samples by their id and match them
|
371 |
-
by their index.
|
372 |
-
|
373 |
-
Args:
|
374 |
-
xps: a list of XPs to match samples from.
|
375 |
-
start_epoch (int): If provided, only return samples corresponding to this epoch or newer.
|
376 |
-
end_epoch (int): If provided, only return samples corresponding to this epoch or older.
|
377 |
-
exclude_prompted (bool): If True, does not include samples that used a prompt.
|
378 |
-
exclude_unprompted (bool): If True, does not include samples that did not use a prompt.
|
379 |
-
exclude_conditioned (bool): If True, excludes samples that used conditioning.
|
380 |
-
exclude_unconditioned (bool): If True, excludes samples that did not use conditioning.
|
381 |
-
"""
|
382 |
-
managers = [SampleManager(xp) for xp in xps]
|
383 |
-
samples_per_xp = [manager.get_samples(**kwargs) for manager in managers]
|
384 |
-
stable_samples = _match_stable_samples(samples_per_xp)
|
385 |
-
unstable_samples = _match_unstable_samples(samples_per_xp)
|
386 |
-
return dict(stable_samples, **unstable_samples)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/os_utils.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
|
4 |
-
|
5 |
-
def link_file(from_file, to_file):
|
6 |
-
subprocess.check_call(
|
7 |
-
f'ln -s "`realpath --relative-to="{os.path.dirname(to_file)}" "{from_file}"`" "{to_file}"', shell=True)
|
8 |
-
|
9 |
-
|
10 |
-
def move_file(from_file, to_file):
|
11 |
-
subprocess.check_call(f'mv "{from_file}" "{to_file}"', shell=True)
|
12 |
-
|
13 |
-
|
14 |
-
def copy_file(from_file, to_file):
|
15 |
-
subprocess.check_call(f'cp -r "{from_file}" "{to_file}"', shell=True)
|
16 |
-
|
17 |
-
|
18 |
-
def remove_file(*fns):
|
19 |
-
for f in fns:
|
20 |
-
subprocess.check_call(f'rm -rf "{f}"', shell=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/stft_loss.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2019 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""STFT-based Loss modules."""
|
7 |
-
import librosa
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from text_to_speech.modules.vocoder.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft
|
11 |
-
|
12 |
-
|
13 |
-
class STFTLoss(torch.nn.Module):
|
14 |
-
"""STFT loss module."""
|
15 |
-
|
16 |
-
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window",
|
17 |
-
use_mel_loss=False):
|
18 |
-
"""Initialize STFT loss module."""
|
19 |
-
super(STFTLoss, self).__init__()
|
20 |
-
self.fft_size = fft_size
|
21 |
-
self.shift_size = shift_size
|
22 |
-
self.win_length = win_length
|
23 |
-
self.window = getattr(torch, window)(win_length)
|
24 |
-
self.spectral_convergenge_loss = SpectralConvergengeLoss()
|
25 |
-
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
|
26 |
-
self.use_mel_loss = use_mel_loss
|
27 |
-
self.mel_basis = None
|
28 |
-
|
29 |
-
def forward(self, x, y):
|
30 |
-
"""Calculate forward propagation.
|
31 |
-
|
32 |
-
Args:
|
33 |
-
x (Tensor): Predicted signal (B, T).
|
34 |
-
y (Tensor): Groundtruth signal (B, T).
|
35 |
-
|
36 |
-
Returns:
|
37 |
-
Tensor: Spectral convergence loss value.
|
38 |
-
Tensor: Log STFT magnitude loss value.
|
39 |
-
|
40 |
-
"""
|
41 |
-
if self.window.device != x.device:
|
42 |
-
self.window = self.window.to(x.device)
|
43 |
-
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
|
44 |
-
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
|
45 |
-
if self.use_mel_loss:
|
46 |
-
if self.mel_basis is None:
|
47 |
-
self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T
|
48 |
-
x_mag = x_mag @ self.mel_basis
|
49 |
-
y_mag = y_mag @ self.mel_basis
|
50 |
-
|
51 |
-
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
|
52 |
-
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
|
53 |
-
|
54 |
-
return sc_loss, mag_loss
|
55 |
-
|
56 |
-
|
57 |
-
class MultiResolutionSTFTLoss(torch.nn.Module):
|
58 |
-
"""Multi resolution STFT loss module."""
|
59 |
-
|
60 |
-
def __init__(self,
|
61 |
-
fft_sizes=[1024, 2048, 512],
|
62 |
-
hop_sizes=[120, 240, 50],
|
63 |
-
win_lengths=[600, 1200, 240],
|
64 |
-
window="hann_window",
|
65 |
-
use_mel_loss=False):
|
66 |
-
"""Initialize Multi resolution STFT loss module.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
fft_sizes (list): List of FFT sizes.
|
70 |
-
hop_sizes (list): List of hop sizes.
|
71 |
-
win_lengths (list): List of window lengths.
|
72 |
-
window (str): Window function type.
|
73 |
-
|
74 |
-
"""
|
75 |
-
super(MultiResolutionSTFTLoss, self).__init__()
|
76 |
-
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
|
77 |
-
self.stft_losses = torch.nn.ModuleList()
|
78 |
-
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
|
79 |
-
self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)]
|
80 |
-
|
81 |
-
def forward(self, x, y):
|
82 |
-
"""Calculate forward propagation.
|
83 |
-
|
84 |
-
Args:
|
85 |
-
x (Tensor): Predicted signal (B, T).
|
86 |
-
y (Tensor): Groundtruth signal (B, T).
|
87 |
-
|
88 |
-
Returns:
|
89 |
-
Tensor: Multi resolution spectral convergence loss value.
|
90 |
-
Tensor: Multi resolution log STFT magnitude loss value.
|
91 |
-
|
92 |
-
"""
|
93 |
-
sc_loss = 0.0
|
94 |
-
mag_loss = 0.0
|
95 |
-
for f in self.stft_losses:
|
96 |
-
sc_l, mag_l = f(x, y)
|
97 |
-
sc_loss += sc_l
|
98 |
-
mag_loss += mag_l
|
99 |
-
sc_loss /= len(self.stft_losses)
|
100 |
-
mag_loss /= len(self.stft_losses)
|
101 |
-
|
102 |
-
return sc_loss, mag_loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/conversation/$types.d.ts
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import type * as Kit from '@sveltejs/kit';
|
2 |
-
|
3 |
-
type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
|
4 |
-
type RouteParams = { }
|
5 |
-
type RouteId = '/conversation';
|
6 |
-
|
7 |
-
export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
|
8 |
-
export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import CircleMaskImage from './CircleMaskImage.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('circleMaskImage', function (x, y, key, frame, config) {
|
6 |
-
var gameObject = new CircleMaskImage(this.scene, x, y, key, frame, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.CircleMaskImage', CircleMaskImage);
|
12 |
-
|
13 |
-
export default CircleMaskImage;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/vdecoder/__init__.py
DELETED
File without changes
|
spaces/AlekseyKorshuk/gai-project/app.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
from modules.about import render_about
|
4 |
-
from modules.playground import render_playground
|
5 |
-
|
6 |
-
with gr.Blocks() as demo:
|
7 |
-
with gr.Tabs():
|
8 |
-
with gr.TabItem("Playground"):
|
9 |
-
render_playground(demo)
|
10 |
-
with gr.TabItem("About"):
|
11 |
-
render_about()
|
12 |
-
|
13 |
-
demo.queue(concurrency_count=100).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/model-evaluation/models/base.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
-
import gradio as gr
|
4 |
-
from conversation import Conversation
|
5 |
-
|
6 |
-
|
7 |
-
class BaseModel:
|
8 |
-
name: str
|
9 |
-
endpoint: str
|
10 |
-
namespace: str
|
11 |
-
generation_params: dict
|
12 |
-
|
13 |
-
def __init__(self, name, endpoint, namespace, generation_params):
|
14 |
-
self.name = name
|
15 |
-
self.endpoint = endpoint
|
16 |
-
self.namespace = namespace
|
17 |
-
self.generation_params = generation_params
|
18 |
-
|
19 |
-
def generate_response(self, conversation, custom_generation_params=None):
|
20 |
-
prompt = self._get_prompt(conversation)
|
21 |
-
response = self._get_response(prompt, custom_generation_params)
|
22 |
-
return response
|
23 |
-
|
24 |
-
def _get_prompt(self, conversation: Conversation):
|
25 |
-
prompt = "\n".join(
|
26 |
-
[conversation.memory, conversation.prompt]
|
27 |
-
).strip()
|
28 |
-
for message in conversation.messages:
|
29 |
-
prompt += f"\n{message['from'].strip()}: {message['value'].strip()}"
|
30 |
-
prompt += f"\n{conversation.bot_label}:"
|
31 |
-
return prompt
|
32 |
-
|
33 |
-
def _get_response(self, text, custom_generation_params):
|
34 |
-
api = str(os.environ.get("API_BASE_PATH")).replace("\{\}", "{}")
|
35 |
-
api = api.format(self.endpoint, self.namespace)
|
36 |
-
parameters = self.generation_params
|
37 |
-
if custom_generation_params is not None:
|
38 |
-
parameters.update(custom_generation_params)
|
39 |
-
payload = {'instances': [text], "parameters": parameters}
|
40 |
-
resp = requests.post(api, json=payload, timeout=600)
|
41 |
-
if resp.status_code != 200:
|
42 |
-
raise gr.Error(f"Endpoint returned code: {resp.status_code}. "
|
43 |
-
f"Solution: "
|
44 |
-
f"1. Scale-to-Zero enabled, so please wait for some minutes and try again. "
|
45 |
-
f"2. Probably the response generated by the model is to big, try changing max_new_tokens. "
|
46 |
-
f"3. If nothing helps — report the problem.")
|
47 |
-
predictions = resp.json()["predictions"]
|
48 |
-
if isinstance(predictions[0], str):
|
49 |
-
return predictions[0].strip()
|
50 |
-
predictions = sorted(predictions[0], key=lambda d: d['score'])
|
51 |
-
return predictions[-1]["text"].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alfasign/diffusers-gallery/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Diffusers Gallery
|
3 |
-
emoji: 🖼️
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: green
|
6 |
-
sdk: static
|
7 |
-
app_port: 8080
|
8 |
-
fullWidth: true
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: huggingface-projects/diffusers-gallery
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/text_to_image/train_text_to_image_flax.py
DELETED
@@ -1,573 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
import random
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
import jax
|
9 |
-
import jax.numpy as jnp
|
10 |
-
import numpy as np
|
11 |
-
import optax
|
12 |
-
import torch
|
13 |
-
import torch.utils.checkpoint
|
14 |
-
import transformers
|
15 |
-
from datasets import load_dataset
|
16 |
-
from flax import jax_utils
|
17 |
-
from flax.training import train_state
|
18 |
-
from flax.training.common_utils import shard
|
19 |
-
from huggingface_hub import create_repo, upload_folder
|
20 |
-
from torchvision import transforms
|
21 |
-
from tqdm.auto import tqdm
|
22 |
-
from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
|
23 |
-
|
24 |
-
from diffusers import (
|
25 |
-
FlaxAutoencoderKL,
|
26 |
-
FlaxDDPMScheduler,
|
27 |
-
FlaxPNDMScheduler,
|
28 |
-
FlaxStableDiffusionPipeline,
|
29 |
-
FlaxUNet2DConditionModel,
|
30 |
-
)
|
31 |
-
from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
|
32 |
-
from diffusers.utils import check_min_version
|
33 |
-
|
34 |
-
|
35 |
-
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
36 |
-
check_min_version("0.19.0")
|
37 |
-
|
38 |
-
logger = logging.getLogger(__name__)
|
39 |
-
|
40 |
-
|
41 |
-
def parse_args():
|
42 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
43 |
-
parser.add_argument(
|
44 |
-
"--pretrained_model_name_or_path",
|
45 |
-
type=str,
|
46 |
-
default=None,
|
47 |
-
required=True,
|
48 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
49 |
-
)
|
50 |
-
parser.add_argument(
|
51 |
-
"--revision",
|
52 |
-
type=str,
|
53 |
-
default=None,
|
54 |
-
required=False,
|
55 |
-
help="Revision of pretrained model identifier from huggingface.co/models.",
|
56 |
-
)
|
57 |
-
parser.add_argument(
|
58 |
-
"--dataset_name",
|
59 |
-
type=str,
|
60 |
-
default=None,
|
61 |
-
help=(
|
62 |
-
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
|
63 |
-
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
|
64 |
-
" or to a folder containing files that 🤗 Datasets can understand."
|
65 |
-
),
|
66 |
-
)
|
67 |
-
parser.add_argument(
|
68 |
-
"--dataset_config_name",
|
69 |
-
type=str,
|
70 |
-
default=None,
|
71 |
-
help="The config of the Dataset, leave as None if there's only one config.",
|
72 |
-
)
|
73 |
-
parser.add_argument(
|
74 |
-
"--train_data_dir",
|
75 |
-
type=str,
|
76 |
-
default=None,
|
77 |
-
help=(
|
78 |
-
"A folder containing the training data. Folder contents must follow the structure described in"
|
79 |
-
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
|
80 |
-
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
|
81 |
-
),
|
82 |
-
)
|
83 |
-
parser.add_argument(
|
84 |
-
"--image_column", type=str, default="image", help="The column of the dataset containing an image."
|
85 |
-
)
|
86 |
-
parser.add_argument(
|
87 |
-
"--caption_column",
|
88 |
-
type=str,
|
89 |
-
default="text",
|
90 |
-
help="The column of the dataset containing a caption or a list of captions.",
|
91 |
-
)
|
92 |
-
parser.add_argument(
|
93 |
-
"--max_train_samples",
|
94 |
-
type=int,
|
95 |
-
default=None,
|
96 |
-
help=(
|
97 |
-
"For debugging purposes or quicker training, truncate the number of training examples to this "
|
98 |
-
"value if set."
|
99 |
-
),
|
100 |
-
)
|
101 |
-
parser.add_argument(
|
102 |
-
"--output_dir",
|
103 |
-
type=str,
|
104 |
-
default="sd-model-finetuned",
|
105 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
106 |
-
)
|
107 |
-
parser.add_argument(
|
108 |
-
"--cache_dir",
|
109 |
-
type=str,
|
110 |
-
default=None,
|
111 |
-
help="The directory where the downloaded models and datasets will be stored.",
|
112 |
-
)
|
113 |
-
parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.")
|
114 |
-
parser.add_argument(
|
115 |
-
"--resolution",
|
116 |
-
type=int,
|
117 |
-
default=512,
|
118 |
-
help=(
|
119 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
120 |
-
" resolution"
|
121 |
-
),
|
122 |
-
)
|
123 |
-
parser.add_argument(
|
124 |
-
"--center_crop",
|
125 |
-
default=False,
|
126 |
-
action="store_true",
|
127 |
-
help=(
|
128 |
-
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
|
129 |
-
" cropped. The images will be resized to the resolution first before cropping."
|
130 |
-
),
|
131 |
-
)
|
132 |
-
parser.add_argument(
|
133 |
-
"--random_flip",
|
134 |
-
action="store_true",
|
135 |
-
help="whether to randomly flip images horizontally",
|
136 |
-
)
|
137 |
-
parser.add_argument(
|
138 |
-
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
|
139 |
-
)
|
140 |
-
parser.add_argument("--num_train_epochs", type=int, default=100)
|
141 |
-
parser.add_argument(
|
142 |
-
"--max_train_steps",
|
143 |
-
type=int,
|
144 |
-
default=None,
|
145 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
146 |
-
)
|
147 |
-
parser.add_argument(
|
148 |
-
"--learning_rate",
|
149 |
-
type=float,
|
150 |
-
default=1e-4,
|
151 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
152 |
-
)
|
153 |
-
parser.add_argument(
|
154 |
-
"--scale_lr",
|
155 |
-
action="store_true",
|
156 |
-
default=False,
|
157 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
158 |
-
)
|
159 |
-
parser.add_argument(
|
160 |
-
"--lr_scheduler",
|
161 |
-
type=str,
|
162 |
-
default="constant",
|
163 |
-
help=(
|
164 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
165 |
-
' "constant", "constant_with_warmup"]'
|
166 |
-
),
|
167 |
-
)
|
168 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
169 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
170 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
171 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
172 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
173 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
174 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
175 |
-
parser.add_argument(
|
176 |
-
"--hub_model_id",
|
177 |
-
type=str,
|
178 |
-
default=None,
|
179 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
180 |
-
)
|
181 |
-
parser.add_argument(
|
182 |
-
"--logging_dir",
|
183 |
-
type=str,
|
184 |
-
default="logs",
|
185 |
-
help=(
|
186 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
187 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
188 |
-
),
|
189 |
-
)
|
190 |
-
parser.add_argument(
|
191 |
-
"--report_to",
|
192 |
-
type=str,
|
193 |
-
default="tensorboard",
|
194 |
-
help=(
|
195 |
-
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
|
196 |
-
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
197 |
-
),
|
198 |
-
)
|
199 |
-
parser.add_argument(
|
200 |
-
"--mixed_precision",
|
201 |
-
type=str,
|
202 |
-
default="no",
|
203 |
-
choices=["no", "fp16", "bf16"],
|
204 |
-
help=(
|
205 |
-
"Whether to use mixed precision. Choose"
|
206 |
-
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
207 |
-
"and an Nvidia Ampere GPU."
|
208 |
-
),
|
209 |
-
)
|
210 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
211 |
-
|
212 |
-
args = parser.parse_args()
|
213 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
214 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
215 |
-
args.local_rank = env_local_rank
|
216 |
-
|
217 |
-
# Sanity checks
|
218 |
-
if args.dataset_name is None and args.train_data_dir is None:
|
219 |
-
raise ValueError("Need either a dataset name or a training folder.")
|
220 |
-
|
221 |
-
return args
|
222 |
-
|
223 |
-
|
224 |
-
dataset_name_mapping = {
|
225 |
-
"lambdalabs/pokemon-blip-captions": ("image", "text"),
|
226 |
-
}
|
227 |
-
|
228 |
-
|
229 |
-
def get_params_to_save(params):
|
230 |
-
return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
|
231 |
-
|
232 |
-
|
233 |
-
def main():
|
234 |
-
args = parse_args()
|
235 |
-
|
236 |
-
logging.basicConfig(
|
237 |
-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
238 |
-
datefmt="%m/%d/%Y %H:%M:%S",
|
239 |
-
level=logging.INFO,
|
240 |
-
)
|
241 |
-
# Setup logging, we only want one process per machine to log things on the screen.
|
242 |
-
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
|
243 |
-
if jax.process_index() == 0:
|
244 |
-
transformers.utils.logging.set_verbosity_info()
|
245 |
-
else:
|
246 |
-
transformers.utils.logging.set_verbosity_error()
|
247 |
-
|
248 |
-
if args.seed is not None:
|
249 |
-
set_seed(args.seed)
|
250 |
-
|
251 |
-
# Handle the repository creation
|
252 |
-
if jax.process_index() == 0:
|
253 |
-
if args.output_dir is not None:
|
254 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
255 |
-
|
256 |
-
if args.push_to_hub:
|
257 |
-
repo_id = create_repo(
|
258 |
-
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
259 |
-
).repo_id
|
260 |
-
|
261 |
-
# Get the datasets: you can either provide your own training and evaluation files (see below)
|
262 |
-
# or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
|
263 |
-
|
264 |
-
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
|
265 |
-
# download the dataset.
|
266 |
-
if args.dataset_name is not None:
|
267 |
-
# Downloading and loading a dataset from the hub.
|
268 |
-
dataset = load_dataset(
|
269 |
-
args.dataset_name,
|
270 |
-
args.dataset_config_name,
|
271 |
-
cache_dir=args.cache_dir,
|
272 |
-
)
|
273 |
-
else:
|
274 |
-
data_files = {}
|
275 |
-
if args.train_data_dir is not None:
|
276 |
-
data_files["train"] = os.path.join(args.train_data_dir, "**")
|
277 |
-
dataset = load_dataset(
|
278 |
-
"imagefolder",
|
279 |
-
data_files=data_files,
|
280 |
-
cache_dir=args.cache_dir,
|
281 |
-
)
|
282 |
-
# See more about loading custom images at
|
283 |
-
# https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
|
284 |
-
|
285 |
-
# Preprocessing the datasets.
|
286 |
-
# We need to tokenize inputs and targets.
|
287 |
-
column_names = dataset["train"].column_names
|
288 |
-
|
289 |
-
# 6. Get the column names for input/target.
|
290 |
-
dataset_columns = dataset_name_mapping.get(args.dataset_name, None)
|
291 |
-
if args.image_column is None:
|
292 |
-
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
|
293 |
-
else:
|
294 |
-
image_column = args.image_column
|
295 |
-
if image_column not in column_names:
|
296 |
-
raise ValueError(
|
297 |
-
f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
|
298 |
-
)
|
299 |
-
if args.caption_column is None:
|
300 |
-
caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
|
301 |
-
else:
|
302 |
-
caption_column = args.caption_column
|
303 |
-
if caption_column not in column_names:
|
304 |
-
raise ValueError(
|
305 |
-
f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
|
306 |
-
)
|
307 |
-
|
308 |
-
# Preprocessing the datasets.
|
309 |
-
# We need to tokenize input captions and transform the images.
|
310 |
-
def tokenize_captions(examples, is_train=True):
|
311 |
-
captions = []
|
312 |
-
for caption in examples[caption_column]:
|
313 |
-
if isinstance(caption, str):
|
314 |
-
captions.append(caption)
|
315 |
-
elif isinstance(caption, (list, np.ndarray)):
|
316 |
-
# take a random caption if there are multiple
|
317 |
-
captions.append(random.choice(caption) if is_train else caption[0])
|
318 |
-
else:
|
319 |
-
raise ValueError(
|
320 |
-
f"Caption column `{caption_column}` should contain either strings or lists of strings."
|
321 |
-
)
|
322 |
-
inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding="do_not_pad", truncation=True)
|
323 |
-
input_ids = inputs.input_ids
|
324 |
-
return input_ids
|
325 |
-
|
326 |
-
train_transforms = transforms.Compose(
|
327 |
-
[
|
328 |
-
transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
|
329 |
-
transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
|
330 |
-
transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
|
331 |
-
transforms.ToTensor(),
|
332 |
-
transforms.Normalize([0.5], [0.5]),
|
333 |
-
]
|
334 |
-
)
|
335 |
-
|
336 |
-
def preprocess_train(examples):
|
337 |
-
images = [image.convert("RGB") for image in examples[image_column]]
|
338 |
-
examples["pixel_values"] = [train_transforms(image) for image in images]
|
339 |
-
examples["input_ids"] = tokenize_captions(examples)
|
340 |
-
|
341 |
-
return examples
|
342 |
-
|
343 |
-
if args.max_train_samples is not None:
|
344 |
-
dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
|
345 |
-
# Set the training transforms
|
346 |
-
train_dataset = dataset["train"].with_transform(preprocess_train)
|
347 |
-
|
348 |
-
def collate_fn(examples):
|
349 |
-
pixel_values = torch.stack([example["pixel_values"] for example in examples])
|
350 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
351 |
-
input_ids = [example["input_ids"] for example in examples]
|
352 |
-
|
353 |
-
padded_tokens = tokenizer.pad(
|
354 |
-
{"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt"
|
355 |
-
)
|
356 |
-
batch = {
|
357 |
-
"pixel_values": pixel_values,
|
358 |
-
"input_ids": padded_tokens.input_ids,
|
359 |
-
}
|
360 |
-
batch = {k: v.numpy() for k, v in batch.items()}
|
361 |
-
|
362 |
-
return batch
|
363 |
-
|
364 |
-
total_train_batch_size = args.train_batch_size * jax.local_device_count()
|
365 |
-
train_dataloader = torch.utils.data.DataLoader(
|
366 |
-
train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=total_train_batch_size, drop_last=True
|
367 |
-
)
|
368 |
-
|
369 |
-
weight_dtype = jnp.float32
|
370 |
-
if args.mixed_precision == "fp16":
|
371 |
-
weight_dtype = jnp.float16
|
372 |
-
elif args.mixed_precision == "bf16":
|
373 |
-
weight_dtype = jnp.bfloat16
|
374 |
-
|
375 |
-
# Load models and create wrapper for stable diffusion
|
376 |
-
tokenizer = CLIPTokenizer.from_pretrained(
|
377 |
-
args.pretrained_model_name_or_path, revision=args.revision, subfolder="tokenizer"
|
378 |
-
)
|
379 |
-
text_encoder = FlaxCLIPTextModel.from_pretrained(
|
380 |
-
args.pretrained_model_name_or_path, revision=args.revision, subfolder="text_encoder", dtype=weight_dtype
|
381 |
-
)
|
382 |
-
vae, vae_params = FlaxAutoencoderKL.from_pretrained(
|
383 |
-
args.pretrained_model_name_or_path, revision=args.revision, subfolder="vae", dtype=weight_dtype
|
384 |
-
)
|
385 |
-
unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
|
386 |
-
args.pretrained_model_name_or_path, revision=args.revision, subfolder="unet", dtype=weight_dtype
|
387 |
-
)
|
388 |
-
|
389 |
-
# Optimization
|
390 |
-
if args.scale_lr:
|
391 |
-
args.learning_rate = args.learning_rate * total_train_batch_size
|
392 |
-
|
393 |
-
constant_scheduler = optax.constant_schedule(args.learning_rate)
|
394 |
-
|
395 |
-
adamw = optax.adamw(
|
396 |
-
learning_rate=constant_scheduler,
|
397 |
-
b1=args.adam_beta1,
|
398 |
-
b2=args.adam_beta2,
|
399 |
-
eps=args.adam_epsilon,
|
400 |
-
weight_decay=args.adam_weight_decay,
|
401 |
-
)
|
402 |
-
|
403 |
-
optimizer = optax.chain(
|
404 |
-
optax.clip_by_global_norm(args.max_grad_norm),
|
405 |
-
adamw,
|
406 |
-
)
|
407 |
-
|
408 |
-
state = train_state.TrainState.create(apply_fn=unet.__call__, params=unet_params, tx=optimizer)
|
409 |
-
|
410 |
-
noise_scheduler = FlaxDDPMScheduler(
|
411 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
|
412 |
-
)
|
413 |
-
noise_scheduler_state = noise_scheduler.create_state()
|
414 |
-
|
415 |
-
# Initialize our training
|
416 |
-
rng = jax.random.PRNGKey(args.seed)
|
417 |
-
train_rngs = jax.random.split(rng, jax.local_device_count())
|
418 |
-
|
419 |
-
def train_step(state, text_encoder_params, vae_params, batch, train_rng):
|
420 |
-
dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
|
421 |
-
|
422 |
-
def compute_loss(params):
|
423 |
-
# Convert images to latent space
|
424 |
-
vae_outputs = vae.apply(
|
425 |
-
{"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
|
426 |
-
)
|
427 |
-
latents = vae_outputs.latent_dist.sample(sample_rng)
|
428 |
-
# (NHWC) -> (NCHW)
|
429 |
-
latents = jnp.transpose(latents, (0, 3, 1, 2))
|
430 |
-
latents = latents * vae.config.scaling_factor
|
431 |
-
|
432 |
-
# Sample noise that we'll add to the latents
|
433 |
-
noise_rng, timestep_rng = jax.random.split(sample_rng)
|
434 |
-
noise = jax.random.normal(noise_rng, latents.shape)
|
435 |
-
# Sample a random timestep for each image
|
436 |
-
bsz = latents.shape[0]
|
437 |
-
timesteps = jax.random.randint(
|
438 |
-
timestep_rng,
|
439 |
-
(bsz,),
|
440 |
-
0,
|
441 |
-
noise_scheduler.config.num_train_timesteps,
|
442 |
-
)
|
443 |
-
|
444 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
445 |
-
# (this is the forward diffusion process)
|
446 |
-
noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
|
447 |
-
|
448 |
-
# Get the text embedding for conditioning
|
449 |
-
encoder_hidden_states = text_encoder(
|
450 |
-
batch["input_ids"],
|
451 |
-
params=text_encoder_params,
|
452 |
-
train=False,
|
453 |
-
)[0]
|
454 |
-
|
455 |
-
# Predict the noise residual and compute loss
|
456 |
-
model_pred = unet.apply(
|
457 |
-
{"params": params}, noisy_latents, timesteps, encoder_hidden_states, train=True
|
458 |
-
).sample
|
459 |
-
|
460 |
-
# Get the target for loss depending on the prediction type
|
461 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
462 |
-
target = noise
|
463 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
464 |
-
target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
|
465 |
-
else:
|
466 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
467 |
-
|
468 |
-
loss = (target - model_pred) ** 2
|
469 |
-
loss = loss.mean()
|
470 |
-
|
471 |
-
return loss
|
472 |
-
|
473 |
-
grad_fn = jax.value_and_grad(compute_loss)
|
474 |
-
loss, grad = grad_fn(state.params)
|
475 |
-
grad = jax.lax.pmean(grad, "batch")
|
476 |
-
|
477 |
-
new_state = state.apply_gradients(grads=grad)
|
478 |
-
|
479 |
-
metrics = {"loss": loss}
|
480 |
-
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
481 |
-
|
482 |
-
return new_state, metrics, new_train_rng
|
483 |
-
|
484 |
-
# Create parallel version of the train step
|
485 |
-
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
|
486 |
-
|
487 |
-
# Replicate the train state on each device
|
488 |
-
state = jax_utils.replicate(state)
|
489 |
-
text_encoder_params = jax_utils.replicate(text_encoder.params)
|
490 |
-
vae_params = jax_utils.replicate(vae_params)
|
491 |
-
|
492 |
-
# Train!
|
493 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader))
|
494 |
-
|
495 |
-
# Scheduler and math around the number of training steps.
|
496 |
-
if args.max_train_steps is None:
|
497 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
498 |
-
|
499 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
500 |
-
|
501 |
-
logger.info("***** Running training *****")
|
502 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
503 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
504 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
505 |
-
logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
|
506 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
507 |
-
|
508 |
-
global_step = 0
|
509 |
-
|
510 |
-
epochs = tqdm(range(args.num_train_epochs), desc="Epoch ... ", position=0)
|
511 |
-
for epoch in epochs:
|
512 |
-
# ======================== Training ================================
|
513 |
-
|
514 |
-
train_metrics = []
|
515 |
-
|
516 |
-
steps_per_epoch = len(train_dataset) // total_train_batch_size
|
517 |
-
train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
|
518 |
-
# train
|
519 |
-
for batch in train_dataloader:
|
520 |
-
batch = shard(batch)
|
521 |
-
state, train_metric, train_rngs = p_train_step(state, text_encoder_params, vae_params, batch, train_rngs)
|
522 |
-
train_metrics.append(train_metric)
|
523 |
-
|
524 |
-
train_step_progress_bar.update(1)
|
525 |
-
|
526 |
-
global_step += 1
|
527 |
-
if global_step >= args.max_train_steps:
|
528 |
-
break
|
529 |
-
|
530 |
-
train_metric = jax_utils.unreplicate(train_metric)
|
531 |
-
|
532 |
-
train_step_progress_bar.close()
|
533 |
-
epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
|
534 |
-
|
535 |
-
# Create the pipeline using using the trained modules and save it.
|
536 |
-
if jax.process_index() == 0:
|
537 |
-
scheduler = FlaxPNDMScheduler(
|
538 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
|
539 |
-
)
|
540 |
-
safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
|
541 |
-
"CompVis/stable-diffusion-safety-checker", from_pt=True
|
542 |
-
)
|
543 |
-
pipeline = FlaxStableDiffusionPipeline(
|
544 |
-
text_encoder=text_encoder,
|
545 |
-
vae=vae,
|
546 |
-
unet=unet,
|
547 |
-
tokenizer=tokenizer,
|
548 |
-
scheduler=scheduler,
|
549 |
-
safety_checker=safety_checker,
|
550 |
-
feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"),
|
551 |
-
)
|
552 |
-
|
553 |
-
pipeline.save_pretrained(
|
554 |
-
args.output_dir,
|
555 |
-
params={
|
556 |
-
"text_encoder": get_params_to_save(text_encoder_params),
|
557 |
-
"vae": get_params_to_save(vae_params),
|
558 |
-
"unet": get_params_to_save(state.params),
|
559 |
-
"safety_checker": safety_checker.params,
|
560 |
-
},
|
561 |
-
)
|
562 |
-
|
563 |
-
if args.push_to_hub:
|
564 |
-
upload_folder(
|
565 |
-
repo_id=repo_id,
|
566 |
-
folder_path=args.output_dir,
|
567 |
-
commit_message="End of training",
|
568 |
-
ignore_patterns=["step_*", "epoch_*"],
|
569 |
-
)
|
570 |
-
|
571 |
-
|
572 |
-
if __name__ == "__main__":
|
573 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
# NOTE: This file is deprecated and will be removed in a future version.
|
16 |
-
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
|
17 |
-
from ...utils import deprecate
|
18 |
-
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
|
19 |
-
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
|
20 |
-
|
21 |
-
|
22 |
-
deprecate(
|
23 |
-
"stable diffusion controlnet",
|
24 |
-
"0.22.0",
|
25 |
-
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
|
26 |
-
standard_warn=False,
|
27 |
-
stacklevel=3,
|
28 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_fpn.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
model = dict(
|
3 |
-
type='MaskRCNN',
|
4 |
-
pretrained='torchvision://resnet50',
|
5 |
-
backbone=dict(
|
6 |
-
type='ResNet',
|
7 |
-
depth=50,
|
8 |
-
num_stages=4,
|
9 |
-
out_indices=(0, 1, 2, 3),
|
10 |
-
frozen_stages=1,
|
11 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
12 |
-
norm_eval=True,
|
13 |
-
style='pytorch'),
|
14 |
-
neck=dict(
|
15 |
-
type='FPN',
|
16 |
-
in_channels=[256, 512, 1024, 2048],
|
17 |
-
out_channels=256,
|
18 |
-
num_outs=5),
|
19 |
-
rpn_head=dict(
|
20 |
-
type='RPNHead',
|
21 |
-
in_channels=256,
|
22 |
-
feat_channels=256,
|
23 |
-
anchor_generator=dict(
|
24 |
-
type='AnchorGenerator',
|
25 |
-
scales=[8],
|
26 |
-
ratios=[0.5, 1.0, 2.0],
|
27 |
-
strides=[4, 8, 16, 32, 64]),
|
28 |
-
bbox_coder=dict(
|
29 |
-
type='DeltaXYWHBBoxCoder',
|
30 |
-
target_means=[.0, .0, .0, .0],
|
31 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]),
|
32 |
-
loss_cls=dict(
|
33 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
34 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
35 |
-
roi_head=dict(
|
36 |
-
type='StandardRoIHead',
|
37 |
-
bbox_roi_extractor=dict(
|
38 |
-
type='SingleRoIExtractor',
|
39 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
|
40 |
-
out_channels=256,
|
41 |
-
featmap_strides=[4, 8, 16, 32]),
|
42 |
-
bbox_head=dict(
|
43 |
-
type='Shared2FCBBoxHead',
|
44 |
-
in_channels=256,
|
45 |
-
fc_out_channels=1024,
|
46 |
-
roi_feat_size=7,
|
47 |
-
num_classes=80,
|
48 |
-
bbox_coder=dict(
|
49 |
-
type='DeltaXYWHBBoxCoder',
|
50 |
-
target_means=[0., 0., 0., 0.],
|
51 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
52 |
-
reg_class_agnostic=False,
|
53 |
-
loss_cls=dict(
|
54 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
55 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
56 |
-
mask_roi_extractor=dict(
|
57 |
-
type='SingleRoIExtractor',
|
58 |
-
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
|
59 |
-
out_channels=256,
|
60 |
-
featmap_strides=[4, 8, 16, 32]),
|
61 |
-
mask_head=dict(
|
62 |
-
type='FCNMaskHead',
|
63 |
-
num_convs=4,
|
64 |
-
in_channels=256,
|
65 |
-
conv_out_channels=256,
|
66 |
-
num_classes=80,
|
67 |
-
loss_mask=dict(
|
68 |
-
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
|
69 |
-
# model training and testing settings
|
70 |
-
train_cfg=dict(
|
71 |
-
rpn=dict(
|
72 |
-
assigner=dict(
|
73 |
-
type='MaxIoUAssigner',
|
74 |
-
pos_iou_thr=0.7,
|
75 |
-
neg_iou_thr=0.3,
|
76 |
-
min_pos_iou=0.3,
|
77 |
-
match_low_quality=True,
|
78 |
-
ignore_iof_thr=-1),
|
79 |
-
sampler=dict(
|
80 |
-
type='RandomSampler',
|
81 |
-
num=256,
|
82 |
-
pos_fraction=0.5,
|
83 |
-
neg_pos_ub=-1,
|
84 |
-
add_gt_as_proposals=False),
|
85 |
-
allowed_border=-1,
|
86 |
-
pos_weight=-1,
|
87 |
-
debug=False),
|
88 |
-
rpn_proposal=dict(
|
89 |
-
nms_pre=2000,
|
90 |
-
max_per_img=1000,
|
91 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
92 |
-
min_bbox_size=0),
|
93 |
-
rcnn=dict(
|
94 |
-
assigner=dict(
|
95 |
-
type='MaxIoUAssigner',
|
96 |
-
pos_iou_thr=0.5,
|
97 |
-
neg_iou_thr=0.5,
|
98 |
-
min_pos_iou=0.5,
|
99 |
-
match_low_quality=True,
|
100 |
-
ignore_iof_thr=-1),
|
101 |
-
sampler=dict(
|
102 |
-
type='RandomSampler',
|
103 |
-
num=512,
|
104 |
-
pos_fraction=0.25,
|
105 |
-
neg_pos_ub=-1,
|
106 |
-
add_gt_as_proposals=True),
|
107 |
-
mask_size=28,
|
108 |
-
pos_weight=-1,
|
109 |
-
debug=False)),
|
110 |
-
test_cfg=dict(
|
111 |
-
rpn=dict(
|
112 |
-
nms_pre=1000,
|
113 |
-
max_per_img=1000,
|
114 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
115 |
-
min_bbox_size=0),
|
116 |
-
rcnn=dict(
|
117 |
-
score_thr=0.05,
|
118 |
-
nms=dict(type='nms', iou_threshold=0.5),
|
119 |
-
max_per_img=100,
|
120 |
-
mask_thr_binary=0.5)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x512_80k_ade20k.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
5 |
-
model = dict(
|
6 |
-
decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_d6_r50b-d16_769x769_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_d6_r50-d16_769x769_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r18b-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='torchvision://resnet18',
|
4 |
-
backbone=dict(type='ResNet', depth=18),
|
5 |
-
decode_head=dict(
|
6 |
-
in_channels=512,
|
7 |
-
channels=128,
|
8 |
-
),
|
9 |
-
auxiliary_head=dict(in_channels=256, channels=64))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/model-card.md
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
# Overview
|
2 |
-
|
3 |
-
These are diffusion models and noised image classifiers described in the paper [Diffusion Models Beat GANs on Image Synthesis](https://arxiv.org/abs/2105.05233).
|
4 |
-
Included in this release are the following models:
|
5 |
-
|
6 |
-
* Noisy ImageNet classifiers at resolutions 64x64, 128x128, 256x256, 512x512
|
7 |
-
* A class-unconditional ImageNet diffusion model at resolution 256x256
|
8 |
-
* Class conditional ImageNet diffusion models at 64x64, 128x128, 256x256, 512x512 resolutions
|
9 |
-
* Class-conditional ImageNet upsampling diffusion models: 64x64->256x256, 128x128->512x512
|
10 |
-
* Diffusion models trained on three LSUN classes at 256x256 resolution: cat, horse, bedroom
|
11 |
-
|
12 |
-
# Datasets
|
13 |
-
|
14 |
-
All of the models we are releasing were either trained on the [ILSVRC 2012 subset of ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) or on single classes of [LSUN](https://arxiv.org/abs/1506.03365).
|
15 |
-
Here, we describe characteristics of these datasets which impact model behavior:
|
16 |
-
|
17 |
-
**LSUN**: This dataset was collected in 2015 using a combination of human labeling (from Amazon Mechanical Turk) and automated data labeling.
|
18 |
-
* Each of the three classes we consider contain over a million images.
|
19 |
-
* The dataset creators found that the label accuracy was roughly 90% across the entire LSUN dataset when measured by trained experts.
|
20 |
-
* Images are scraped from the internet, and LSUN cat images in particular tend to often follow a “meme” format.
|
21 |
-
* We found that there are occasionally humans in these photos, including faces, especially within the cat class.
|
22 |
-
|
23 |
-
**ILSVRC 2012 subset of ImageNet**: This dataset was curated in 2012 and consists of roughly one million images, each belonging to one of 1000 classes.
|
24 |
-
* A large portion of the classes in this dataset are animals, plants, and other naturally-occurring objects.
|
25 |
-
* Many images contain humans, although usually these humans aren’t reflected by the class label (e.g. the class “Tench, tinca tinca” contains many photos of people holding fish).
|
26 |
-
|
27 |
-
# Performance
|
28 |
-
|
29 |
-
These models are intended to generate samples consistent with their training distributions.
|
30 |
-
This has been measured in terms of FID, Precision, and Recall.
|
31 |
-
These metrics all rely on the representations of a [pre-trained Inception-V3 model](https://arxiv.org/abs/1512.00567),
|
32 |
-
which was trained on ImageNet, and so is likely to focus more on the ImageNet classes (such as animals) than on other visual features (such as human faces).
|
33 |
-
|
34 |
-
Qualitatively, the samples produced by these models often look highly realistic, especially when a diffusion model is combined with a noisy classifier.
|
35 |
-
|
36 |
-
# Intended Use
|
37 |
-
|
38 |
-
These models are intended to be used for research purposes only.
|
39 |
-
In particular, they can be used as a baseline for generative modeling research, or as a starting point to build off of for such research.
|
40 |
-
|
41 |
-
These models are not intended to be commercially deployed.
|
42 |
-
Additionally, they are not intended to be used to create propaganda or offensive imagery.
|
43 |
-
|
44 |
-
Before releasing these models, we probed their ability to ease the creation of targeted imagery, since doing so could be potentially harmful.
|
45 |
-
We did this either by fine-tuning our ImageNet models on a target LSUN class, or through classifier guidance with publicly available [CLIP models](https://github.com/openai/CLIP).
|
46 |
-
* To probe fine-tuning capabilities, we restricted our compute budget to roughly $100 and tried both standard fine-tuning,
|
47 |
-
and a diffusion-specific approach where we train a specialized classifier for the LSUN class. The resulting FIDs were significantly worse than publicly available GAN models, indicating that fine-tuning an ImageNet diffusion model does not significantly lower the cost of image generation.
|
48 |
-
* To probe guidance with CLIP, we tried two approaches for using pre-trained CLIP models for classifier guidance. Either we fed the noised image to CLIP directly and used its gradients, or we fed the diffusion model's denoised prediction to the CLIP model and differentiated through the whole process. In both cases, we found that it was difficult to recover information from the CLIP model, indicating that these diffusion models are unlikely to make it significantly easier to extract knowledge from CLIP compared to existing GAN models.
|
49 |
-
|
50 |
-
# Limitations
|
51 |
-
|
52 |
-
These models sometimes produce highly unrealistic outputs, particularly when generating images containing human faces.
|
53 |
-
This may stem from ImageNet's emphasis on non-human objects.
|
54 |
-
|
55 |
-
While classifier guidance can improve sample quality, it reduces diversity, resulting in some modes of the data distribution being underrepresented.
|
56 |
-
This can potentially amplify existing biases in the training dataset such as gender and racial biases.
|
57 |
-
|
58 |
-
Because ImageNet and LSUN contain images from the internet, they include photos of real people, and the model may have memorized some of the information contained in these photos.
|
59 |
-
However, these images are already publicly available, and existing generative models trained on ImageNet have not demonstrated significant leakage of this information.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/scripts/image_train.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Train a diffusion model on images.
|
3 |
-
"""
|
4 |
-
|
5 |
-
import argparse
|
6 |
-
|
7 |
-
from guided_diffusion import dist_util, logger
|
8 |
-
from guided_diffusion.image_datasets import load_data
|
9 |
-
from guided_diffusion.resample import create_named_schedule_sampler
|
10 |
-
from guided_diffusion.script_util import (
|
11 |
-
model_and_diffusion_defaults,
|
12 |
-
create_model_and_diffusion,
|
13 |
-
args_to_dict,
|
14 |
-
add_dict_to_argparser,
|
15 |
-
)
|
16 |
-
from guided_diffusion.train_util import TrainLoop
|
17 |
-
|
18 |
-
|
19 |
-
def main():
|
20 |
-
args = create_argparser().parse_args()
|
21 |
-
|
22 |
-
dist_util.setup_dist()
|
23 |
-
logger.configure()
|
24 |
-
|
25 |
-
logger.log("creating model and diffusion...")
|
26 |
-
model, diffusion = create_model_and_diffusion(
|
27 |
-
**args_to_dict(args, model_and_diffusion_defaults().keys())
|
28 |
-
)
|
29 |
-
model.to(dist_util.dev())
|
30 |
-
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
|
31 |
-
|
32 |
-
logger.log("creating data loader...")
|
33 |
-
data = load_data(
|
34 |
-
data_dir=args.data_dir,
|
35 |
-
batch_size=args.batch_size,
|
36 |
-
image_size=args.image_size,
|
37 |
-
class_cond=args.class_cond,
|
38 |
-
)
|
39 |
-
|
40 |
-
logger.log("training...")
|
41 |
-
TrainLoop(
|
42 |
-
model=model,
|
43 |
-
diffusion=diffusion,
|
44 |
-
data=data,
|
45 |
-
batch_size=args.batch_size,
|
46 |
-
microbatch=args.microbatch,
|
47 |
-
lr=args.lr,
|
48 |
-
ema_rate=args.ema_rate,
|
49 |
-
log_interval=args.log_interval,
|
50 |
-
save_interval=args.save_interval,
|
51 |
-
resume_checkpoint=args.resume_checkpoint,
|
52 |
-
use_fp16=args.use_fp16,
|
53 |
-
fp16_scale_growth=args.fp16_scale_growth,
|
54 |
-
schedule_sampler=schedule_sampler,
|
55 |
-
weight_decay=args.weight_decay,
|
56 |
-
lr_anneal_steps=args.lr_anneal_steps,
|
57 |
-
).run_loop()
|
58 |
-
|
59 |
-
|
60 |
-
def create_argparser():
|
61 |
-
defaults = dict(
|
62 |
-
data_dir="",
|
63 |
-
schedule_sampler="uniform",
|
64 |
-
lr=1e-4,
|
65 |
-
weight_decay=0.0,
|
66 |
-
lr_anneal_steps=0,
|
67 |
-
batch_size=1,
|
68 |
-
microbatch=-1, # -1 disables microbatches
|
69 |
-
ema_rate="0.9999", # comma-separated list of EMA values
|
70 |
-
log_interval=10,
|
71 |
-
save_interval=10000,
|
72 |
-
resume_checkpoint="",
|
73 |
-
use_fp16=False,
|
74 |
-
fp16_scale_growth=1e-3,
|
75 |
-
)
|
76 |
-
defaults.update(model_and_diffusion_defaults())
|
77 |
-
parser = argparse.ArgumentParser()
|
78 |
-
add_dict_to_argparser(parser, defaults)
|
79 |
-
return parser
|
80 |
-
|
81 |
-
|
82 |
-
if __name__ == "__main__":
|
83 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
|
4 |
-
from ..builder import PIXEL_SAMPLERS
|
5 |
-
from .base_pixel_sampler import BasePixelSampler
|
6 |
-
|
7 |
-
|
8 |
-
@PIXEL_SAMPLERS.register_module()
|
9 |
-
class OHEMPixelSampler(BasePixelSampler):
|
10 |
-
"""Online Hard Example Mining Sampler for segmentation.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
context (nn.Module): The context of sampler, subclass of
|
14 |
-
:obj:`BaseDecodeHead`.
|
15 |
-
thresh (float, optional): The threshold for hard example selection.
|
16 |
-
Below which, are prediction with low confidence. If not
|
17 |
-
specified, the hard examples will be pixels of top ``min_kept``
|
18 |
-
loss. Default: None.
|
19 |
-
min_kept (int, optional): The minimum number of predictions to keep.
|
20 |
-
Default: 100000.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, context, thresh=None, min_kept=100000):
|
24 |
-
super(OHEMPixelSampler, self).__init__()
|
25 |
-
self.context = context
|
26 |
-
assert min_kept > 1
|
27 |
-
self.thresh = thresh
|
28 |
-
self.min_kept = min_kept
|
29 |
-
|
30 |
-
def sample(self, seg_logit, seg_label):
|
31 |
-
"""Sample pixels that have high loss or with low prediction confidence.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
|
35 |
-
seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
|
36 |
-
|
37 |
-
Returns:
|
38 |
-
torch.Tensor: segmentation weight, shape (N, H, W)
|
39 |
-
"""
|
40 |
-
with torch.no_grad():
|
41 |
-
assert seg_logit.shape[2:] == seg_label.shape[2:]
|
42 |
-
assert seg_label.shape[1] == 1
|
43 |
-
seg_label = seg_label.squeeze(1).long()
|
44 |
-
batch_kept = self.min_kept * seg_label.size(0)
|
45 |
-
valid_mask = seg_label != self.context.ignore_index
|
46 |
-
seg_weight = seg_logit.new_zeros(size=seg_label.size())
|
47 |
-
valid_seg_weight = seg_weight[valid_mask]
|
48 |
-
if self.thresh is not None:
|
49 |
-
seg_prob = F.softmax(seg_logit, dim=1)
|
50 |
-
|
51 |
-
tmp_seg_label = seg_label.clone().unsqueeze(1)
|
52 |
-
tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
|
53 |
-
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
|
54 |
-
sort_prob, sort_indices = seg_prob[valid_mask].sort()
|
55 |
-
|
56 |
-
if sort_prob.numel() > 0:
|
57 |
-
min_threshold = sort_prob[min(batch_kept,
|
58 |
-
sort_prob.numel() - 1)]
|
59 |
-
else:
|
60 |
-
min_threshold = 0.0
|
61 |
-
threshold = max(min_threshold, self.thresh)
|
62 |
-
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
|
63 |
-
else:
|
64 |
-
losses = self.context.loss_decode(
|
65 |
-
seg_logit,
|
66 |
-
seg_label,
|
67 |
-
weight=None,
|
68 |
-
ignore_index=self.context.ignore_index,
|
69 |
-
reduction_override='none')
|
70 |
-
# faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
|
71 |
-
_, sort_indices = losses[valid_mask].sort(descending=True)
|
72 |
-
valid_seg_weight[sort_indices[:batch_kept]] = 1.
|
73 |
-
|
74 |
-
seg_weight[valid_mask] = valid_seg_weight
|
75 |
-
|
76 |
-
return seg_weight
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/attention.py
DELETED
@@ -1,341 +0,0 @@
|
|
1 |
-
from inspect import isfunction
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from torch import nn, einsum
|
6 |
-
from einops import rearrange, repeat
|
7 |
-
from typing import Optional, Any
|
8 |
-
|
9 |
-
from ldm.modules.diffusionmodules.util import checkpoint
|
10 |
-
|
11 |
-
|
12 |
-
try:
|
13 |
-
import xformers
|
14 |
-
import xformers.ops
|
15 |
-
XFORMERS_IS_AVAILBLE = True
|
16 |
-
except:
|
17 |
-
XFORMERS_IS_AVAILBLE = False
|
18 |
-
|
19 |
-
# CrossAttn precision handling
|
20 |
-
import os
|
21 |
-
_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")
|
22 |
-
|
23 |
-
def exists(val):
|
24 |
-
return val is not None
|
25 |
-
|
26 |
-
|
27 |
-
def uniq(arr):
|
28 |
-
return{el: True for el in arr}.keys()
|
29 |
-
|
30 |
-
|
31 |
-
def default(val, d):
|
32 |
-
if exists(val):
|
33 |
-
return val
|
34 |
-
return d() if isfunction(d) else d
|
35 |
-
|
36 |
-
|
37 |
-
def max_neg_value(t):
|
38 |
-
return -torch.finfo(t.dtype).max
|
39 |
-
|
40 |
-
|
41 |
-
def init_(tensor):
|
42 |
-
dim = tensor.shape[-1]
|
43 |
-
std = 1 / math.sqrt(dim)
|
44 |
-
tensor.uniform_(-std, std)
|
45 |
-
return tensor
|
46 |
-
|
47 |
-
|
48 |
-
# feedforward
|
49 |
-
class GEGLU(nn.Module):
|
50 |
-
def __init__(self, dim_in, dim_out):
|
51 |
-
super().__init__()
|
52 |
-
self.proj = nn.Linear(dim_in, dim_out * 2)
|
53 |
-
|
54 |
-
def forward(self, x):
|
55 |
-
x, gate = self.proj(x).chunk(2, dim=-1)
|
56 |
-
return x * F.gelu(gate)
|
57 |
-
|
58 |
-
|
59 |
-
class FeedForward(nn.Module):
|
60 |
-
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
|
61 |
-
super().__init__()
|
62 |
-
inner_dim = int(dim * mult)
|
63 |
-
dim_out = default(dim_out, dim)
|
64 |
-
project_in = nn.Sequential(
|
65 |
-
nn.Linear(dim, inner_dim),
|
66 |
-
nn.GELU()
|
67 |
-
) if not glu else GEGLU(dim, inner_dim)
|
68 |
-
|
69 |
-
self.net = nn.Sequential(
|
70 |
-
project_in,
|
71 |
-
nn.Dropout(dropout),
|
72 |
-
nn.Linear(inner_dim, dim_out)
|
73 |
-
)
|
74 |
-
|
75 |
-
def forward(self, x):
|
76 |
-
return self.net(x)
|
77 |
-
|
78 |
-
|
79 |
-
def zero_module(module):
|
80 |
-
"""
|
81 |
-
Zero out the parameters of a module and return it.
|
82 |
-
"""
|
83 |
-
for p in module.parameters():
|
84 |
-
p.detach().zero_()
|
85 |
-
return module
|
86 |
-
|
87 |
-
|
88 |
-
def Normalize(in_channels):
|
89 |
-
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
90 |
-
|
91 |
-
|
92 |
-
class SpatialSelfAttention(nn.Module):
|
93 |
-
def __init__(self, in_channels):
|
94 |
-
super().__init__()
|
95 |
-
self.in_channels = in_channels
|
96 |
-
|
97 |
-
self.norm = Normalize(in_channels)
|
98 |
-
self.q = torch.nn.Conv2d(in_channels,
|
99 |
-
in_channels,
|
100 |
-
kernel_size=1,
|
101 |
-
stride=1,
|
102 |
-
padding=0)
|
103 |
-
self.k = torch.nn.Conv2d(in_channels,
|
104 |
-
in_channels,
|
105 |
-
kernel_size=1,
|
106 |
-
stride=1,
|
107 |
-
padding=0)
|
108 |
-
self.v = torch.nn.Conv2d(in_channels,
|
109 |
-
in_channels,
|
110 |
-
kernel_size=1,
|
111 |
-
stride=1,
|
112 |
-
padding=0)
|
113 |
-
self.proj_out = torch.nn.Conv2d(in_channels,
|
114 |
-
in_channels,
|
115 |
-
kernel_size=1,
|
116 |
-
stride=1,
|
117 |
-
padding=0)
|
118 |
-
|
119 |
-
def forward(self, x):
|
120 |
-
h_ = x
|
121 |
-
h_ = self.norm(h_)
|
122 |
-
q = self.q(h_)
|
123 |
-
k = self.k(h_)
|
124 |
-
v = self.v(h_)
|
125 |
-
|
126 |
-
# compute attention
|
127 |
-
b,c,h,w = q.shape
|
128 |
-
q = rearrange(q, 'b c h w -> b (h w) c')
|
129 |
-
k = rearrange(k, 'b c h w -> b c (h w)')
|
130 |
-
w_ = torch.einsum('bij,bjk->bik', q, k)
|
131 |
-
|
132 |
-
w_ = w_ * (int(c)**(-0.5))
|
133 |
-
w_ = torch.nn.functional.softmax(w_, dim=2)
|
134 |
-
|
135 |
-
# attend to values
|
136 |
-
v = rearrange(v, 'b c h w -> b c (h w)')
|
137 |
-
w_ = rearrange(w_, 'b i j -> b j i')
|
138 |
-
h_ = torch.einsum('bij,bjk->bik', v, w_)
|
139 |
-
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
|
140 |
-
h_ = self.proj_out(h_)
|
141 |
-
|
142 |
-
return x+h_
|
143 |
-
|
144 |
-
|
145 |
-
class CrossAttention(nn.Module):
|
146 |
-
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
|
147 |
-
super().__init__()
|
148 |
-
inner_dim = dim_head * heads
|
149 |
-
context_dim = default(context_dim, query_dim)
|
150 |
-
|
151 |
-
self.scale = dim_head ** -0.5
|
152 |
-
self.heads = heads
|
153 |
-
|
154 |
-
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
155 |
-
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
156 |
-
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
157 |
-
|
158 |
-
self.to_out = nn.Sequential(
|
159 |
-
nn.Linear(inner_dim, query_dim),
|
160 |
-
nn.Dropout(dropout)
|
161 |
-
)
|
162 |
-
|
163 |
-
def forward(self, x, context=None, mask=None):
|
164 |
-
h = self.heads
|
165 |
-
|
166 |
-
q = self.to_q(x)
|
167 |
-
context = default(context, x)
|
168 |
-
k = self.to_k(context)
|
169 |
-
v = self.to_v(context)
|
170 |
-
|
171 |
-
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
172 |
-
|
173 |
-
# force cast to fp32 to avoid overflowing
|
174 |
-
if _ATTN_PRECISION =="fp32":
|
175 |
-
with torch.autocast(enabled=False, device_type = 'cuda'):
|
176 |
-
q, k = q.float(), k.float()
|
177 |
-
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
178 |
-
else:
|
179 |
-
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
180 |
-
|
181 |
-
del q, k
|
182 |
-
|
183 |
-
if exists(mask):
|
184 |
-
mask = rearrange(mask, 'b ... -> b (...)')
|
185 |
-
max_neg_value = -torch.finfo(sim.dtype).max
|
186 |
-
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
187 |
-
sim.masked_fill_(~mask, max_neg_value)
|
188 |
-
|
189 |
-
# attention, what we cannot get enough of
|
190 |
-
sim = sim.softmax(dim=-1)
|
191 |
-
|
192 |
-
out = einsum('b i j, b j d -> b i d', sim, v)
|
193 |
-
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
|
194 |
-
return self.to_out(out)
|
195 |
-
|
196 |
-
|
197 |
-
class MemoryEfficientCrossAttention(nn.Module):
|
198 |
-
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
199 |
-
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
|
200 |
-
super().__init__()
|
201 |
-
print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
|
202 |
-
f"{heads} heads.")
|
203 |
-
inner_dim = dim_head * heads
|
204 |
-
context_dim = default(context_dim, query_dim)
|
205 |
-
|
206 |
-
self.heads = heads
|
207 |
-
self.dim_head = dim_head
|
208 |
-
|
209 |
-
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
210 |
-
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
211 |
-
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
212 |
-
|
213 |
-
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
|
214 |
-
self.attention_op: Optional[Any] = None
|
215 |
-
|
216 |
-
def forward(self, x, context=None, mask=None):
|
217 |
-
q = self.to_q(x)
|
218 |
-
context = default(context, x)
|
219 |
-
k = self.to_k(context)
|
220 |
-
v = self.to_v(context)
|
221 |
-
|
222 |
-
b, _, _ = q.shape
|
223 |
-
q, k, v = map(
|
224 |
-
lambda t: t.unsqueeze(3)
|
225 |
-
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
226 |
-
.permute(0, 2, 1, 3)
|
227 |
-
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
228 |
-
.contiguous(),
|
229 |
-
(q, k, v),
|
230 |
-
)
|
231 |
-
|
232 |
-
# actually compute the attention, what we cannot get enough of
|
233 |
-
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
|
234 |
-
|
235 |
-
if exists(mask):
|
236 |
-
raise NotImplementedError
|
237 |
-
out = (
|
238 |
-
out.unsqueeze(0)
|
239 |
-
.reshape(b, self.heads, out.shape[1], self.dim_head)
|
240 |
-
.permute(0, 2, 1, 3)
|
241 |
-
.reshape(b, out.shape[1], self.heads * self.dim_head)
|
242 |
-
)
|
243 |
-
return self.to_out(out)
|
244 |
-
|
245 |
-
|
246 |
-
class BasicTransformerBlock(nn.Module):
|
247 |
-
ATTENTION_MODES = {
|
248 |
-
"softmax": CrossAttention, # vanilla attention
|
249 |
-
"softmax-xformers": MemoryEfficientCrossAttention
|
250 |
-
}
|
251 |
-
def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
|
252 |
-
disable_self_attn=False):
|
253 |
-
super().__init__()
|
254 |
-
attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax"
|
255 |
-
assert attn_mode in self.ATTENTION_MODES
|
256 |
-
attn_cls = self.ATTENTION_MODES[attn_mode]
|
257 |
-
self.disable_self_attn = disable_self_attn
|
258 |
-
self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
|
259 |
-
context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn
|
260 |
-
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
261 |
-
self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim,
|
262 |
-
heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
|
263 |
-
self.norm1 = nn.LayerNorm(dim)
|
264 |
-
self.norm2 = nn.LayerNorm(dim)
|
265 |
-
self.norm3 = nn.LayerNorm(dim)
|
266 |
-
self.checkpoint = checkpoint
|
267 |
-
|
268 |
-
def forward(self, x, context=None):
|
269 |
-
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
|
270 |
-
|
271 |
-
def _forward(self, x, context=None):
|
272 |
-
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
|
273 |
-
x = self.attn2(self.norm2(x), context=context) + x
|
274 |
-
x = self.ff(self.norm3(x)) + x
|
275 |
-
return x
|
276 |
-
|
277 |
-
|
278 |
-
class SpatialTransformer(nn.Module):
|
279 |
-
"""
|
280 |
-
Transformer block for image-like data.
|
281 |
-
First, project the input (aka embedding)
|
282 |
-
and reshape to b, t, d.
|
283 |
-
Then apply standard transformer action.
|
284 |
-
Finally, reshape to image
|
285 |
-
NEW: use_linear for more efficiency instead of the 1x1 convs
|
286 |
-
"""
|
287 |
-
def __init__(self, in_channels, n_heads, d_head,
|
288 |
-
depth=1, dropout=0., context_dim=None,
|
289 |
-
disable_self_attn=False, use_linear=False,
|
290 |
-
use_checkpoint=True):
|
291 |
-
super().__init__()
|
292 |
-
if exists(context_dim) and not isinstance(context_dim, list):
|
293 |
-
context_dim = [context_dim]
|
294 |
-
self.in_channels = in_channels
|
295 |
-
inner_dim = n_heads * d_head
|
296 |
-
self.norm = Normalize(in_channels)
|
297 |
-
if not use_linear:
|
298 |
-
self.proj_in = nn.Conv2d(in_channels,
|
299 |
-
inner_dim,
|
300 |
-
kernel_size=1,
|
301 |
-
stride=1,
|
302 |
-
padding=0)
|
303 |
-
else:
|
304 |
-
self.proj_in = nn.Linear(in_channels, inner_dim)
|
305 |
-
|
306 |
-
self.transformer_blocks = nn.ModuleList(
|
307 |
-
[BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
|
308 |
-
disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)
|
309 |
-
for d in range(depth)]
|
310 |
-
)
|
311 |
-
if not use_linear:
|
312 |
-
self.proj_out = zero_module(nn.Conv2d(inner_dim,
|
313 |
-
in_channels,
|
314 |
-
kernel_size=1,
|
315 |
-
stride=1,
|
316 |
-
padding=0))
|
317 |
-
else:
|
318 |
-
self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
|
319 |
-
self.use_linear = use_linear
|
320 |
-
|
321 |
-
def forward(self, x, context=None):
|
322 |
-
# note: if no context is given, cross-attention defaults to self-attention
|
323 |
-
if not isinstance(context, list):
|
324 |
-
context = [context]
|
325 |
-
b, c, h, w = x.shape
|
326 |
-
x_in = x
|
327 |
-
x = self.norm(x)
|
328 |
-
if not self.use_linear:
|
329 |
-
x = self.proj_in(x)
|
330 |
-
x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
|
331 |
-
if self.use_linear:
|
332 |
-
x = self.proj_in(x)
|
333 |
-
for i, block in enumerate(self.transformer_blocks):
|
334 |
-
x = block(x, context=context[i])
|
335 |
-
if self.use_linear:
|
336 |
-
x = self.proj_out(x)
|
337 |
-
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
|
338 |
-
if not self.use_linear:
|
339 |
-
x = self.proj_out(x)
|
340 |
-
return x + x_in
|
341 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/__init__.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
|
3 |
-
depend on something external.
|
4 |
-
|
5 |
-
Files inside of pip._vendor should be considered immutable and should only be
|
6 |
-
updated to versions from upstream.
|
7 |
-
"""
|
8 |
-
from __future__ import absolute_import
|
9 |
-
|
10 |
-
import glob
|
11 |
-
import os.path
|
12 |
-
import sys
|
13 |
-
|
14 |
-
# Downstream redistributors which have debundled our dependencies should also
|
15 |
-
# patch this value to be true. This will trigger the additional patching
|
16 |
-
# to cause things like "six" to be available as pip.
|
17 |
-
DEBUNDLED = False
|
18 |
-
|
19 |
-
# By default, look in this directory for a bunch of .whl files which we will
|
20 |
-
# add to the beginning of sys.path before attempting to import anything. This
|
21 |
-
# is done to support downstream re-distributors like Debian and Fedora who
|
22 |
-
# wish to create their own Wheels for our dependencies to aid in debundling.
|
23 |
-
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
|
24 |
-
|
25 |
-
|
26 |
-
# Define a small helper function to alias our vendored modules to the real ones
|
27 |
-
# if the vendored ones do not exist. This idea of this was taken from
|
28 |
-
# https://github.com/kennethreitz/requests/pull/2567.
|
29 |
-
def vendored(modulename):
|
30 |
-
vendored_name = "{0}.{1}".format(__name__, modulename)
|
31 |
-
|
32 |
-
try:
|
33 |
-
__import__(modulename, globals(), locals(), level=0)
|
34 |
-
except ImportError:
|
35 |
-
# We can just silently allow import failures to pass here. If we
|
36 |
-
# got to this point it means that ``import pip._vendor.whatever``
|
37 |
-
# failed and so did ``import whatever``. Since we're importing this
|
38 |
-
# upfront in an attempt to alias imports, not erroring here will
|
39 |
-
# just mean we get a regular import error whenever pip *actually*
|
40 |
-
# tries to import one of these modules to use it, which actually
|
41 |
-
# gives us a better error message than we would have otherwise
|
42 |
-
# gotten.
|
43 |
-
pass
|
44 |
-
else:
|
45 |
-
sys.modules[vendored_name] = sys.modules[modulename]
|
46 |
-
base, head = vendored_name.rsplit(".", 1)
|
47 |
-
setattr(sys.modules[base], head, sys.modules[modulename])
|
48 |
-
|
49 |
-
|
50 |
-
# If we're operating in a debundled setup, then we want to go ahead and trigger
|
51 |
-
# the aliasing of our vendored libraries as well as looking for wheels to add
|
52 |
-
# to our sys.path. This will cause all of this code to be a no-op typically
|
53 |
-
# however downstream redistributors can enable it in a consistent way across
|
54 |
-
# all platforms.
|
55 |
-
if DEBUNDLED:
|
56 |
-
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
|
57 |
-
# front of our sys.path.
|
58 |
-
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
|
59 |
-
|
60 |
-
# Actually alias all of our vendored dependencies.
|
61 |
-
vendored("cachecontrol")
|
62 |
-
vendored("certifi")
|
63 |
-
vendored("colorama")
|
64 |
-
vendored("distlib")
|
65 |
-
vendored("distro")
|
66 |
-
vendored("six")
|
67 |
-
vendored("six.moves")
|
68 |
-
vendored("six.moves.urllib")
|
69 |
-
vendored("six.moves.urllib.parse")
|
70 |
-
vendored("packaging")
|
71 |
-
vendored("packaging.version")
|
72 |
-
vendored("packaging.specifiers")
|
73 |
-
vendored("pep517")
|
74 |
-
vendored("pkg_resources")
|
75 |
-
vendored("platformdirs")
|
76 |
-
vendored("progress")
|
77 |
-
vendored("requests")
|
78 |
-
vendored("requests.exceptions")
|
79 |
-
vendored("requests.packages")
|
80 |
-
vendored("requests.packages.urllib3")
|
81 |
-
vendored("requests.packages.urllib3._collections")
|
82 |
-
vendored("requests.packages.urllib3.connection")
|
83 |
-
vendored("requests.packages.urllib3.connectionpool")
|
84 |
-
vendored("requests.packages.urllib3.contrib")
|
85 |
-
vendored("requests.packages.urllib3.contrib.ntlmpool")
|
86 |
-
vendored("requests.packages.urllib3.contrib.pyopenssl")
|
87 |
-
vendored("requests.packages.urllib3.exceptions")
|
88 |
-
vendored("requests.packages.urllib3.fields")
|
89 |
-
vendored("requests.packages.urllib3.filepost")
|
90 |
-
vendored("requests.packages.urllib3.packages")
|
91 |
-
vendored("requests.packages.urllib3.packages.ordered_dict")
|
92 |
-
vendored("requests.packages.urllib3.packages.six")
|
93 |
-
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
|
94 |
-
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
|
95 |
-
"_implementation")
|
96 |
-
vendored("requests.packages.urllib3.poolmanager")
|
97 |
-
vendored("requests.packages.urllib3.request")
|
98 |
-
vendored("requests.packages.urllib3.response")
|
99 |
-
vendored("requests.packages.urllib3.util")
|
100 |
-
vendored("requests.packages.urllib3.util.connection")
|
101 |
-
vendored("requests.packages.urllib3.util.request")
|
102 |
-
vendored("requests.packages.urllib3.util.response")
|
103 |
-
vendored("requests.packages.urllib3.util.retry")
|
104 |
-
vendored("requests.packages.urllib3.util.ssl_")
|
105 |
-
vendored("requests.packages.urllib3.util.timeout")
|
106 |
-
vendored("requests.packages.urllib3.util.url")
|
107 |
-
vendored("resolvelib")
|
108 |
-
vendored("rich")
|
109 |
-
vendored("rich.console")
|
110 |
-
vendored("rich.highlighter")
|
111 |
-
vendored("rich.logging")
|
112 |
-
vendored("rich.markup")
|
113 |
-
vendored("rich.progress")
|
114 |
-
vendored("rich.segment")
|
115 |
-
vendored("rich.style")
|
116 |
-
vendored("rich.text")
|
117 |
-
vendored("rich.traceback")
|
118 |
-
vendored("tenacity")
|
119 |
-
vendored("tomli")
|
120 |
-
vendored("urllib3")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/box.py
DELETED
@@ -1,517 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from typing import TYPE_CHECKING, Iterable, List
|
3 |
-
|
4 |
-
if sys.version_info >= (3, 8):
|
5 |
-
from typing import Literal
|
6 |
-
else:
|
7 |
-
from pip._vendor.typing_extensions import Literal # pragma: no cover
|
8 |
-
|
9 |
-
|
10 |
-
from ._loop import loop_last
|
11 |
-
|
12 |
-
if TYPE_CHECKING:
|
13 |
-
from pip._vendor.rich.console import ConsoleOptions
|
14 |
-
|
15 |
-
|
16 |
-
class Box:
|
17 |
-
"""Defines characters to render boxes.
|
18 |
-
|
19 |
-
┌─┬┐ top
|
20 |
-
│ ││ head
|
21 |
-
├─┼┤ head_row
|
22 |
-
│ ││ mid
|
23 |
-
├─┼┤ row
|
24 |
-
├─┼┤ foot_row
|
25 |
-
│ ││ foot
|
26 |
-
└─┴┘ bottom
|
27 |
-
|
28 |
-
Args:
|
29 |
-
box (str): Characters making up box.
|
30 |
-
ascii (bool, optional): True if this box uses ascii characters only. Default is False.
|
31 |
-
"""
|
32 |
-
|
33 |
-
def __init__(self, box: str, *, ascii: bool = False) -> None:
|
34 |
-
self._box = box
|
35 |
-
self.ascii = ascii
|
36 |
-
line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
|
37 |
-
# top
|
38 |
-
self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
|
39 |
-
# head
|
40 |
-
self.head_left, _, self.head_vertical, self.head_right = iter(line2)
|
41 |
-
# head_row
|
42 |
-
(
|
43 |
-
self.head_row_left,
|
44 |
-
self.head_row_horizontal,
|
45 |
-
self.head_row_cross,
|
46 |
-
self.head_row_right,
|
47 |
-
) = iter(line3)
|
48 |
-
|
49 |
-
# mid
|
50 |
-
self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
|
51 |
-
# row
|
52 |
-
self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
|
53 |
-
# foot_row
|
54 |
-
(
|
55 |
-
self.foot_row_left,
|
56 |
-
self.foot_row_horizontal,
|
57 |
-
self.foot_row_cross,
|
58 |
-
self.foot_row_right,
|
59 |
-
) = iter(line6)
|
60 |
-
# foot
|
61 |
-
self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
|
62 |
-
# bottom
|
63 |
-
self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
|
64 |
-
line8
|
65 |
-
)
|
66 |
-
|
67 |
-
def __repr__(self) -> str:
|
68 |
-
return "Box(...)"
|
69 |
-
|
70 |
-
def __str__(self) -> str:
|
71 |
-
return self._box
|
72 |
-
|
73 |
-
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
|
74 |
-
"""Substitute this box for another if it won't render due to platform issues.
|
75 |
-
|
76 |
-
Args:
|
77 |
-
options (ConsoleOptions): Console options used in rendering.
|
78 |
-
safe (bool, optional): Substitute this for another Box if there are known problems
|
79 |
-
displaying on the platform (currently only relevant on Windows). Default is True.
|
80 |
-
|
81 |
-
Returns:
|
82 |
-
Box: A different Box or the same Box.
|
83 |
-
"""
|
84 |
-
box = self
|
85 |
-
if options.legacy_windows and safe:
|
86 |
-
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
|
87 |
-
if options.ascii_only and not box.ascii:
|
88 |
-
box = ASCII
|
89 |
-
return box
|
90 |
-
|
91 |
-
def get_plain_headed_box(self) -> "Box":
|
92 |
-
"""If this box uses special characters for the borders of the header, then
|
93 |
-
return the equivalent box that does not.
|
94 |
-
|
95 |
-
Returns:
|
96 |
-
Box: The most similar Box that doesn't use header-specific box characters.
|
97 |
-
If the current Box already satisfies this criterion, then it's returned.
|
98 |
-
"""
|
99 |
-
return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
|
100 |
-
|
101 |
-
def get_top(self, widths: Iterable[int]) -> str:
|
102 |
-
"""Get the top of a simple box.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
widths (List[int]): Widths of columns.
|
106 |
-
|
107 |
-
Returns:
|
108 |
-
str: A string of box characters.
|
109 |
-
"""
|
110 |
-
|
111 |
-
parts: List[str] = []
|
112 |
-
append = parts.append
|
113 |
-
append(self.top_left)
|
114 |
-
for last, width in loop_last(widths):
|
115 |
-
append(self.top * width)
|
116 |
-
if not last:
|
117 |
-
append(self.top_divider)
|
118 |
-
append(self.top_right)
|
119 |
-
return "".join(parts)
|
120 |
-
|
121 |
-
def get_row(
|
122 |
-
self,
|
123 |
-
widths: Iterable[int],
|
124 |
-
level: Literal["head", "row", "foot", "mid"] = "row",
|
125 |
-
edge: bool = True,
|
126 |
-
) -> str:
|
127 |
-
"""Get the top of a simple box.
|
128 |
-
|
129 |
-
Args:
|
130 |
-
width (List[int]): Widths of columns.
|
131 |
-
|
132 |
-
Returns:
|
133 |
-
str: A string of box characters.
|
134 |
-
"""
|
135 |
-
if level == "head":
|
136 |
-
left = self.head_row_left
|
137 |
-
horizontal = self.head_row_horizontal
|
138 |
-
cross = self.head_row_cross
|
139 |
-
right = self.head_row_right
|
140 |
-
elif level == "row":
|
141 |
-
left = self.row_left
|
142 |
-
horizontal = self.row_horizontal
|
143 |
-
cross = self.row_cross
|
144 |
-
right = self.row_right
|
145 |
-
elif level == "mid":
|
146 |
-
left = self.mid_left
|
147 |
-
horizontal = " "
|
148 |
-
cross = self.mid_vertical
|
149 |
-
right = self.mid_right
|
150 |
-
elif level == "foot":
|
151 |
-
left = self.foot_row_left
|
152 |
-
horizontal = self.foot_row_horizontal
|
153 |
-
cross = self.foot_row_cross
|
154 |
-
right = self.foot_row_right
|
155 |
-
else:
|
156 |
-
raise ValueError("level must be 'head', 'row' or 'foot'")
|
157 |
-
|
158 |
-
parts: List[str] = []
|
159 |
-
append = parts.append
|
160 |
-
if edge:
|
161 |
-
append(left)
|
162 |
-
for last, width in loop_last(widths):
|
163 |
-
append(horizontal * width)
|
164 |
-
if not last:
|
165 |
-
append(cross)
|
166 |
-
if edge:
|
167 |
-
append(right)
|
168 |
-
return "".join(parts)
|
169 |
-
|
170 |
-
def get_bottom(self, widths: Iterable[int]) -> str:
|
171 |
-
"""Get the bottom of a simple box.
|
172 |
-
|
173 |
-
Args:
|
174 |
-
widths (List[int]): Widths of columns.
|
175 |
-
|
176 |
-
Returns:
|
177 |
-
str: A string of box characters.
|
178 |
-
"""
|
179 |
-
|
180 |
-
parts: List[str] = []
|
181 |
-
append = parts.append
|
182 |
-
append(self.bottom_left)
|
183 |
-
for last, width in loop_last(widths):
|
184 |
-
append(self.bottom * width)
|
185 |
-
if not last:
|
186 |
-
append(self.bottom_divider)
|
187 |
-
append(self.bottom_right)
|
188 |
-
return "".join(parts)
|
189 |
-
|
190 |
-
|
191 |
-
ASCII: Box = Box(
|
192 |
-
"""\
|
193 |
-
+--+
|
194 |
-
| ||
|
195 |
-
|-+|
|
196 |
-
| ||
|
197 |
-
|-+|
|
198 |
-
|-+|
|
199 |
-
| ||
|
200 |
-
+--+
|
201 |
-
""",
|
202 |
-
ascii=True,
|
203 |
-
)
|
204 |
-
|
205 |
-
ASCII2: Box = Box(
|
206 |
-
"""\
|
207 |
-
+-++
|
208 |
-
| ||
|
209 |
-
+-++
|
210 |
-
| ||
|
211 |
-
+-++
|
212 |
-
+-++
|
213 |
-
| ||
|
214 |
-
+-++
|
215 |
-
""",
|
216 |
-
ascii=True,
|
217 |
-
)
|
218 |
-
|
219 |
-
ASCII_DOUBLE_HEAD: Box = Box(
|
220 |
-
"""\
|
221 |
-
+-++
|
222 |
-
| ||
|
223 |
-
+=++
|
224 |
-
| ||
|
225 |
-
+-++
|
226 |
-
+-++
|
227 |
-
| ||
|
228 |
-
+-++
|
229 |
-
""",
|
230 |
-
ascii=True,
|
231 |
-
)
|
232 |
-
|
233 |
-
SQUARE: Box = Box(
|
234 |
-
"""\
|
235 |
-
┌─┬┐
|
236 |
-
│ ││
|
237 |
-
├─┼┤
|
238 |
-
│ ││
|
239 |
-
├─┼┤
|
240 |
-
├─┼┤
|
241 |
-
│ ││
|
242 |
-
└─┴┘
|
243 |
-
"""
|
244 |
-
)
|
245 |
-
|
246 |
-
SQUARE_DOUBLE_HEAD: Box = Box(
|
247 |
-
"""\
|
248 |
-
┌─┬┐
|
249 |
-
│ ││
|
250 |
-
╞═╪╡
|
251 |
-
│ ││
|
252 |
-
├─┼┤
|
253 |
-
├─┼┤
|
254 |
-
│ ││
|
255 |
-
└─┴┘
|
256 |
-
"""
|
257 |
-
)
|
258 |
-
|
259 |
-
MINIMAL: Box = Box(
|
260 |
-
"""\
|
261 |
-
╷
|
262 |
-
│
|
263 |
-
╶─┼╴
|
264 |
-
│
|
265 |
-
╶─┼╴
|
266 |
-
╶─┼╴
|
267 |
-
│
|
268 |
-
╵
|
269 |
-
"""
|
270 |
-
)
|
271 |
-
|
272 |
-
|
273 |
-
MINIMAL_HEAVY_HEAD: Box = Box(
|
274 |
-
"""\
|
275 |
-
╷
|
276 |
-
│
|
277 |
-
╺━┿╸
|
278 |
-
│
|
279 |
-
╶─┼╴
|
280 |
-
╶─┼╴
|
281 |
-
│
|
282 |
-
╵
|
283 |
-
"""
|
284 |
-
)
|
285 |
-
|
286 |
-
MINIMAL_DOUBLE_HEAD: Box = Box(
|
287 |
-
"""\
|
288 |
-
╷
|
289 |
-
│
|
290 |
-
═╪
|
291 |
-
│
|
292 |
-
─┼
|
293 |
-
─┼
|
294 |
-
│
|
295 |
-
╵
|
296 |
-
"""
|
297 |
-
)
|
298 |
-
|
299 |
-
|
300 |
-
SIMPLE: Box = Box(
|
301 |
-
"""\
|
302 |
-
|
303 |
-
|
304 |
-
──
|
305 |
-
|
306 |
-
|
307 |
-
──
|
308 |
-
|
309 |
-
|
310 |
-
"""
|
311 |
-
)
|
312 |
-
|
313 |
-
SIMPLE_HEAD: Box = Box(
|
314 |
-
"""\
|
315 |
-
|
316 |
-
|
317 |
-
──
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
"""
|
324 |
-
)
|
325 |
-
|
326 |
-
|
327 |
-
SIMPLE_HEAVY: Box = Box(
|
328 |
-
"""\
|
329 |
-
|
330 |
-
|
331 |
-
━━
|
332 |
-
|
333 |
-
|
334 |
-
━━
|
335 |
-
|
336 |
-
|
337 |
-
"""
|
338 |
-
)
|
339 |
-
|
340 |
-
|
341 |
-
HORIZONTALS: Box = Box(
|
342 |
-
"""\
|
343 |
-
──
|
344 |
-
|
345 |
-
──
|
346 |
-
|
347 |
-
──
|
348 |
-
──
|
349 |
-
|
350 |
-
──
|
351 |
-
"""
|
352 |
-
)
|
353 |
-
|
354 |
-
ROUNDED: Box = Box(
|
355 |
-
"""\
|
356 |
-
╭─┬╮
|
357 |
-
│ ││
|
358 |
-
├─┼┤
|
359 |
-
│ ││
|
360 |
-
├─┼┤
|
361 |
-
├─┼┤
|
362 |
-
│ ││
|
363 |
-
╰─┴╯
|
364 |
-
"""
|
365 |
-
)
|
366 |
-
|
367 |
-
HEAVY: Box = Box(
|
368 |
-
"""\
|
369 |
-
┏━┳┓
|
370 |
-
┃ ┃┃
|
371 |
-
┣━╋┫
|
372 |
-
┃ ┃┃
|
373 |
-
┣━╋┫
|
374 |
-
┣━╋┫
|
375 |
-
┃ ┃┃
|
376 |
-
┗━┻┛
|
377 |
-
"""
|
378 |
-
)
|
379 |
-
|
380 |
-
HEAVY_EDGE: Box = Box(
|
381 |
-
"""\
|
382 |
-
┏━┯┓
|
383 |
-
┃ │┃
|
384 |
-
┠─┼┨
|
385 |
-
┃ │┃
|
386 |
-
┠─┼┨
|
387 |
-
┠─┼┨
|
388 |
-
┃ │┃
|
389 |
-
┗━┷┛
|
390 |
-
"""
|
391 |
-
)
|
392 |
-
|
393 |
-
HEAVY_HEAD: Box = Box(
|
394 |
-
"""\
|
395 |
-
┏━┳┓
|
396 |
-
┃ ┃┃
|
397 |
-
┡━╇┩
|
398 |
-
│ ││
|
399 |
-
├─┼┤
|
400 |
-
├─┼┤
|
401 |
-
│ ││
|
402 |
-
└─┴┘
|
403 |
-
"""
|
404 |
-
)
|
405 |
-
|
406 |
-
DOUBLE: Box = Box(
|
407 |
-
"""\
|
408 |
-
╔═╦╗
|
409 |
-
║ ║║
|
410 |
-
╠═╬╣
|
411 |
-
║ ║║
|
412 |
-
╠═╬╣
|
413 |
-
╠═╬╣
|
414 |
-
║ ║║
|
415 |
-
╚═╩╝
|
416 |
-
"""
|
417 |
-
)
|
418 |
-
|
419 |
-
DOUBLE_EDGE: Box = Box(
|
420 |
-
"""\
|
421 |
-
╔═╤╗
|
422 |
-
║ │║
|
423 |
-
╟─┼╢
|
424 |
-
║ │║
|
425 |
-
╟─┼╢
|
426 |
-
╟─┼╢
|
427 |
-
║ │║
|
428 |
-
╚═╧╝
|
429 |
-
"""
|
430 |
-
)
|
431 |
-
|
432 |
-
MARKDOWN: Box = Box(
|
433 |
-
"""\
|
434 |
-
|
435 |
-
| ||
|
436 |
-
|-||
|
437 |
-
| ||
|
438 |
-
|-||
|
439 |
-
|-||
|
440 |
-
| ||
|
441 |
-
|
442 |
-
""",
|
443 |
-
ascii=True,
|
444 |
-
)
|
445 |
-
|
446 |
-
# Map Boxes that don't render with raster fonts on to equivalent that do
|
447 |
-
LEGACY_WINDOWS_SUBSTITUTIONS = {
|
448 |
-
ROUNDED: SQUARE,
|
449 |
-
MINIMAL_HEAVY_HEAD: MINIMAL,
|
450 |
-
SIMPLE_HEAVY: SIMPLE,
|
451 |
-
HEAVY: SQUARE,
|
452 |
-
HEAVY_EDGE: SQUARE,
|
453 |
-
HEAVY_HEAD: SQUARE,
|
454 |
-
}
|
455 |
-
|
456 |
-
# Map headed boxes to their headerless equivalents
|
457 |
-
PLAIN_HEADED_SUBSTITUTIONS = {
|
458 |
-
HEAVY_HEAD: SQUARE,
|
459 |
-
SQUARE_DOUBLE_HEAD: SQUARE,
|
460 |
-
MINIMAL_DOUBLE_HEAD: MINIMAL,
|
461 |
-
MINIMAL_HEAVY_HEAD: MINIMAL,
|
462 |
-
ASCII_DOUBLE_HEAD: ASCII2,
|
463 |
-
}
|
464 |
-
|
465 |
-
|
466 |
-
if __name__ == "__main__": # pragma: no cover
|
467 |
-
|
468 |
-
from pip._vendor.rich.columns import Columns
|
469 |
-
from pip._vendor.rich.panel import Panel
|
470 |
-
|
471 |
-
from . import box as box
|
472 |
-
from .console import Console
|
473 |
-
from .table import Table
|
474 |
-
from .text import Text
|
475 |
-
|
476 |
-
console = Console(record=True)
|
477 |
-
|
478 |
-
BOXES = [
|
479 |
-
"ASCII",
|
480 |
-
"ASCII2",
|
481 |
-
"ASCII_DOUBLE_HEAD",
|
482 |
-
"SQUARE",
|
483 |
-
"SQUARE_DOUBLE_HEAD",
|
484 |
-
"MINIMAL",
|
485 |
-
"MINIMAL_HEAVY_HEAD",
|
486 |
-
"MINIMAL_DOUBLE_HEAD",
|
487 |
-
"SIMPLE",
|
488 |
-
"SIMPLE_HEAD",
|
489 |
-
"SIMPLE_HEAVY",
|
490 |
-
"HORIZONTALS",
|
491 |
-
"ROUNDED",
|
492 |
-
"HEAVY",
|
493 |
-
"HEAVY_EDGE",
|
494 |
-
"HEAVY_HEAD",
|
495 |
-
"DOUBLE",
|
496 |
-
"DOUBLE_EDGE",
|
497 |
-
"MARKDOWN",
|
498 |
-
]
|
499 |
-
|
500 |
-
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
|
501 |
-
console.print()
|
502 |
-
|
503 |
-
columns = Columns(expand=True, padding=2)
|
504 |
-
for box_name in sorted(BOXES):
|
505 |
-
table = Table(
|
506 |
-
show_footer=True, style="dim", border_style="not dim", expand=True
|
507 |
-
)
|
508 |
-
table.add_column("Header 1", "Footer 1")
|
509 |
-
table.add_column("Header 2", "Footer 2")
|
510 |
-
table.add_row("Cell", "Cell")
|
511 |
-
table.add_row("Cell", "Cell")
|
512 |
-
table.box = getattr(box, box_name)
|
513 |
-
table.title = Text(f"box.{box_name}", style="magenta")
|
514 |
-
columns.add_renderable(table)
|
515 |
-
console.print(columns)
|
516 |
-
|
517 |
-
# console.save_svg("box.svg")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/setopt.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
from distutils.util import convert_path
|
2 |
-
from distutils import log
|
3 |
-
from distutils.errors import DistutilsOptionError
|
4 |
-
import distutils
|
5 |
-
import os
|
6 |
-
import configparser
|
7 |
-
|
8 |
-
from setuptools import Command
|
9 |
-
|
10 |
-
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
|
11 |
-
|
12 |
-
|
13 |
-
def config_file(kind="local"):
|
14 |
-
"""Get the filename of the distutils, local, global, or per-user config
|
15 |
-
|
16 |
-
`kind` must be one of "local", "global", or "user"
|
17 |
-
"""
|
18 |
-
if kind == 'local':
|
19 |
-
return 'setup.cfg'
|
20 |
-
if kind == 'global':
|
21 |
-
return os.path.join(
|
22 |
-
os.path.dirname(distutils.__file__), 'distutils.cfg'
|
23 |
-
)
|
24 |
-
if kind == 'user':
|
25 |
-
dot = os.name == 'posix' and '.' or ''
|
26 |
-
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
|
27 |
-
raise ValueError(
|
28 |
-
"config_file() type must be 'local', 'global', or 'user'", kind
|
29 |
-
)
|
30 |
-
|
31 |
-
|
32 |
-
def edit_config(filename, settings, dry_run=False):
|
33 |
-
"""Edit a configuration file to include `settings`
|
34 |
-
|
35 |
-
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
|
36 |
-
command/section name. A ``None`` value means to delete the entire section,
|
37 |
-
while a dictionary lists settings to be changed or deleted in that section.
|
38 |
-
A setting of ``None`` means to delete that setting.
|
39 |
-
"""
|
40 |
-
log.debug("Reading configuration from %s", filename)
|
41 |
-
opts = configparser.RawConfigParser()
|
42 |
-
opts.optionxform = lambda x: x
|
43 |
-
opts.read([filename])
|
44 |
-
for section, options in settings.items():
|
45 |
-
if options is None:
|
46 |
-
log.info("Deleting section [%s] from %s", section, filename)
|
47 |
-
opts.remove_section(section)
|
48 |
-
else:
|
49 |
-
if not opts.has_section(section):
|
50 |
-
log.debug("Adding new section [%s] to %s", section, filename)
|
51 |
-
opts.add_section(section)
|
52 |
-
for option, value in options.items():
|
53 |
-
if value is None:
|
54 |
-
log.debug(
|
55 |
-
"Deleting %s.%s from %s",
|
56 |
-
section, option, filename
|
57 |
-
)
|
58 |
-
opts.remove_option(section, option)
|
59 |
-
if not opts.options(section):
|
60 |
-
log.info("Deleting empty [%s] section from %s",
|
61 |
-
section, filename)
|
62 |
-
opts.remove_section(section)
|
63 |
-
else:
|
64 |
-
log.debug(
|
65 |
-
"Setting %s.%s to %r in %s",
|
66 |
-
section, option, value, filename
|
67 |
-
)
|
68 |
-
opts.set(section, option, value)
|
69 |
-
|
70 |
-
log.info("Writing %s", filename)
|
71 |
-
if not dry_run:
|
72 |
-
with open(filename, 'w') as f:
|
73 |
-
opts.write(f)
|
74 |
-
|
75 |
-
|
76 |
-
class option_base(Command):
|
77 |
-
"""Abstract base class for commands that mess with config files"""
|
78 |
-
|
79 |
-
user_options = [
|
80 |
-
('global-config', 'g',
|
81 |
-
"save options to the site-wide distutils.cfg file"),
|
82 |
-
('user-config', 'u',
|
83 |
-
"save options to the current user's pydistutils.cfg file"),
|
84 |
-
('filename=', 'f',
|
85 |
-
"configuration file to use (default=setup.cfg)"),
|
86 |
-
]
|
87 |
-
|
88 |
-
boolean_options = [
|
89 |
-
'global-config', 'user-config',
|
90 |
-
]
|
91 |
-
|
92 |
-
def initialize_options(self):
|
93 |
-
self.global_config = None
|
94 |
-
self.user_config = None
|
95 |
-
self.filename = None
|
96 |
-
|
97 |
-
def finalize_options(self):
|
98 |
-
filenames = []
|
99 |
-
if self.global_config:
|
100 |
-
filenames.append(config_file('global'))
|
101 |
-
if self.user_config:
|
102 |
-
filenames.append(config_file('user'))
|
103 |
-
if self.filename is not None:
|
104 |
-
filenames.append(self.filename)
|
105 |
-
if not filenames:
|
106 |
-
filenames.append(config_file('local'))
|
107 |
-
if len(filenames) > 1:
|
108 |
-
raise DistutilsOptionError(
|
109 |
-
"Must specify only one configuration file option",
|
110 |
-
filenames
|
111 |
-
)
|
112 |
-
self.filename, = filenames
|
113 |
-
|
114 |
-
|
115 |
-
class setopt(option_base):
|
116 |
-
"""Save command-line options to a file"""
|
117 |
-
|
118 |
-
description = "set an option in setup.cfg or another config file"
|
119 |
-
|
120 |
-
user_options = [
|
121 |
-
('command=', 'c', 'command to set an option for'),
|
122 |
-
('option=', 'o', 'option to set'),
|
123 |
-
('set-value=', 's', 'value of the option'),
|
124 |
-
('remove', 'r', 'remove (unset) the value'),
|
125 |
-
] + option_base.user_options
|
126 |
-
|
127 |
-
boolean_options = option_base.boolean_options + ['remove']
|
128 |
-
|
129 |
-
def initialize_options(self):
|
130 |
-
option_base.initialize_options(self)
|
131 |
-
self.command = None
|
132 |
-
self.option = None
|
133 |
-
self.set_value = None
|
134 |
-
self.remove = None
|
135 |
-
|
136 |
-
def finalize_options(self):
|
137 |
-
option_base.finalize_options(self)
|
138 |
-
if self.command is None or self.option is None:
|
139 |
-
raise DistutilsOptionError("Must specify --command *and* --option")
|
140 |
-
if self.set_value is None and not self.remove:
|
141 |
-
raise DistutilsOptionError("Must specify --set-value or --remove")
|
142 |
-
|
143 |
-
def run(self):
|
144 |
-
edit_config(
|
145 |
-
self.filename, {
|
146 |
-
self.command: {self.option.replace('-', '_'): self.set_value}
|
147 |
-
},
|
148 |
-
self.dry_run
|
149 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
# Change Log and Backward Compatibility
|
2 |
-
|
3 |
-
### Releases
|
4 |
-
See release logs at
|
5 |
-
[https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases)
|
6 |
-
for new updates.
|
7 |
-
|
8 |
-
### Backward Compatibility
|
9 |
-
|
10 |
-
Due to the research nature of what the library does, there might be backward incompatible changes.
|
11 |
-
But we try to reduce users' disruption by the following ways:
|
12 |
-
* APIs listed in [API documentation](https://detectron2.readthedocs.io/modules/index.html), including
|
13 |
-
function/class names, their arguments, and documented class attributes, are considered *stable* unless
|
14 |
-
otherwise noted in the documentation.
|
15 |
-
They are less likely to be broken, but if needed, will trigger a deprecation warning for a reasonable period
|
16 |
-
before getting broken, and will be documented in release logs.
|
17 |
-
* Others functions/classses/attributes are considered internal, and are more likely to change.
|
18 |
-
However, we're aware that some of them may be already used by other projects, and in particular we may
|
19 |
-
use them for convenience among projects under `detectron2/projects`.
|
20 |
-
For such APIs, we may treat them as stable APIs and also apply the above strategies.
|
21 |
-
They may be promoted to stable when we're ready.
|
22 |
-
* Projects under "detectron2/projects" or imported with "detectron2.projects" are research projects
|
23 |
-
and are all considered experimental.
|
24 |
-
* Classes/functions that contain the word "default" or are explicitly documented to produce
|
25 |
-
"default behavior" may change their behaviors when new features are added.
|
26 |
-
|
27 |
-
Despite of the possible breakage, if a third-party project would like to keep up with the latest updates
|
28 |
-
in detectron2, using it as a library will still be less disruptive than forking, because
|
29 |
-
the frequency and scope of API changes will be much smaller than code changes.
|
30 |
-
|
31 |
-
To see such changes, search for "incompatible changes" in [release logs](https://github.com/facebookresearch/detectron2/releases).
|
32 |
-
|
33 |
-
### Config Version Change Log
|
34 |
-
|
35 |
-
Detectron2's config version has not been changed since open source.
|
36 |
-
There is no need for an open source user to worry about this.
|
37 |
-
|
38 |
-
* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`.
|
39 |
-
* v2: A batch of rename of many configurations before release.
|
40 |
-
|
41 |
-
### Silent Regressions in Historical Versions:
|
42 |
-
|
43 |
-
We list a few silent regressions, since they may silently produce incorrect results and will be hard to debug.
|
44 |
-
|
45 |
-
* 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True.
|
46 |
-
* 03/30/2020 - 04/01/2020: ResNets are not correctly built.
|
47 |
-
* 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy.
|
48 |
-
* - 11/9/2019: Test time augmentation does not predict the last category.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet2_docs/MODEL_ZOO.md
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
# MODEL_ZOO
|
2 |
-
|
3 |
-
### Common settings and notes
|
4 |
-
|
5 |
-
- Multiscale training is used by default in all models. The results are all reported using single-scale testing.
|
6 |
-
- We report runtime on our local workstation with a TitanXp GPU and a Titan RTX GPU.
|
7 |
-
- All models are trained on 8-GPU servers by default. The 1280 models are trained on 24G GPUs. Reducing the batchsize with the linear learning rate rule should be fine.
|
8 |
-
- All models can be downloaded directly from [Google drive](https://drive.google.com/drive/folders/1eae1cTX8tvIaCeof36sBgxrXEXALYlf-?usp=sharing).
|
9 |
-
|
10 |
-
|
11 |
-
## COCO
|
12 |
-
|
13 |
-
### CenterNet
|
14 |
-
|
15 |
-
| Model | val mAP | FPS (Titan Xp/ Titan RTX) | links |
|
16 |
-
|-------------------------------------------|---------|---------|-----------|
|
17 |
-
| CenterNet-S4_DLA_8x | 42.5 | 50 / 71 |[config](../configs/CenterNet-S4_DLA_8x.yaml)/[model](https://drive.google.com/file/d/1lNBhVHnZAEBRD66MFaHjm5Ij6Z4KYrJq/view?usp=sharing)|
|
18 |
-
| CenterNet-FPN_R50_1x | 40.2 | 20 / 24 |[config](../configs/CenterNet-FPN_R50_1x.yaml)/[model](https://drive.google.com/file/d/1rVG1YTthMXvutC6jr9KoE2DthT5-jhGj/view?usp=sharing)|
|
19 |
-
|
20 |
-
#### Note
|
21 |
-
|
22 |
-
- `CenterNet-S4_DLA_8x` is a re-implemented version of the original CenterNet (stride 4), with several changes, including
|
23 |
-
- Using top-left-right-bottom box encoding and GIoU Loss; adding regression loss to the center 3x3 region.
|
24 |
-
- Adding more positive pixels for the heatmap loss whose regression loss is small and is within the center3x3 region.
|
25 |
-
- Using more heavy crop augmentation (EfficientDet-style crop ratio 0.1-2), and removing color augmentations.
|
26 |
-
- Using standard NMS instead of max pooling.
|
27 |
-
- Using RetinaNet-style optimizer (SGD), learning rate rule (0.01 for each batch size 16), and schedule (8x12 epochs).
|
28 |
-
- `CenterNet-FPN_R50_1x` is a (new) FPN version of CenterNet. It includes the changes above, and assigns objects to FPN levels based on a fixed size range. The model is trained with standard short edge 640-800 multi-scale training with 12 epochs (1x).
|
29 |
-
|
30 |
-
|
31 |
-
### CenterNet2
|
32 |
-
|
33 |
-
| Model | val mAP | FPS (Titan Xp/ Titan RTX) | links |
|
34 |
-
|-------------------------------------------|---------|---------|-----------|
|
35 |
-
| CenterNet2-F_R50_1x | 41.7 | 22 / 27 |[config](../configs/CenterNet2-F_R50_1x.yaml)/[model](X)|
|
36 |
-
| CenterNet2_R50_1x | 42.9 | 18 / 24 |[config](../configs/CenterNet2_R50_1x.yaml)/[model](https://drive.google.com/file/d/1Osu1J_sskt_1FaGdfJKa4vd2N71TWS9W/view?usp=sharing)|
|
37 |
-
| CenterNet2_X101-DCN_2x | 49.9 | 6 / 8 |[config](../configs/CenterNet2_X101-DCN_2x.yaml)/[model](https://drive.google.com/file/d/1IHgpUHVJWpvMuFUUetgKWsw27pRNN2oK/view?usp=sharing)|
|
38 |
-
| CenterNet2_DLA-BiFPN-P3_4x | 43.8 | 40 / 50|[config](../configs/CenterNet2_DLA-BiFPN-P3_4x.yaml)/[model](https://drive.google.com/file/d/12GUNlDW9RmOs40UEMSiiUsk5QK_lpGsE/view?usp=sharing)|
|
39 |
-
| CenterNet2_DLA-BiFPN-P3_24x | 45.6 | 40 / 50 |[config](../configs/CenterNet2_DLA-BiFPN-P3_24x.yaml)/[model](https://drive.google.com/file/d/15ZES1ySxubDPzKsHPA7pYg8o_Vwmf-Mb/view?usp=sharing)|
|
40 |
-
| CenterNet2_R2-101-DCN_896_4x | 51.2 | 9 / 13 |[config](../configs/CenterNet2_R2-101-DCN_896_4x.yaml)/[model](https://drive.google.com/file/d/1S7_GE8ZDQBWuLEfKHkxzeF3KBsxsbABg/view?usp=sharing)|
|
41 |
-
| CenterNet2_R2-101-DCN-BiFPN_1280_4x | 52.9 | 6 / 8 |[config](../configs/CenterNet2_R2-101-DCN-BiFPN_1280_4x.yaml)/[model](https://drive.google.com/file/d/14EBHNMagBCNTQjOXcHoZwLYIi2lFIm7F/view?usp=sharing)|
|
42 |
-
| CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST | 56.1 | 3 / 5 |[config](../configs/CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST.yaml)/[model](https://drive.google.com/file/d/11ww9VlOi_nhpdsU_vBAecSxBU0dR_JzW/view?usp=sharing)|
|
43 |
-
| CenterNet2_DLA-BiFPN-P5_640_24x_ST | 49.2 | 33 / 38 |[config](../configs/CenterNet2_DLA-BiFPN-P5_640_24x_ST.yaml)/[model](https://drive.google.com/file/d/1qsHp2HrM1u8WrtBzF5S0oCoLMz-B40wk/view?usp=sharing)|
|
44 |
-
|
45 |
-
#### Note
|
46 |
-
|
47 |
-
- `CenterNet2-F_R50_1x` uses Faster RCNN as the second stage. All other CenterNet2 models use Cascade RCNN as the second stage.
|
48 |
-
- `CenterNet2_DLA-BiFPN-P3_4x` follows the same training setting as [realtime-FCOS](https://github.com/aim-uofa/AdelaiDet/blob/master/configs/FCOS-Detection/README.md).
|
49 |
-
- `CenterNet2_DLA-BiFPN-P3_24x` is trained by repeating the `4x` schedule (starting from learning rate 0.01) 6 times.
|
50 |
-
- R2 means [Res2Net](https://github.com/Res2Net/Res2Net-detectron2) backbone. To train Res2Net models, you need to download the ImageNet pre-trained weight [here](https://github.com/Res2Net/Res2Net-detectron2) and place it in `output/r2_101.pkl`.
|
51 |
-
- The last 4 models in the table are trained with the EfficientDet-style resize-and-crop augmentation, instead of the default random resizing short edge in detectron2. We found this trains faster (per-iteration) and gives better performance under a long schedule.
|
52 |
-
- `_ST` means using [self-training](https://arxiv.org/abs/2006.06882) using pseudo-labels produced by [Scaled-YOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) on COCO unlabeled images, with a hard score threshold 0.5. Our processed pseudo-labels can be downloaded [here](https://drive.google.com/file/d/1LMBjtHhLp6dYf6MjwEQmzCLWQLkmWPpw/view?usp=sharing).
|
53 |
-
- `CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST` finetunes from `CenterNet2_R2-101-DCN-BiFPN_1280_4x` for an additional `4x` schedule with the self-training data. It is trained under `1280x1280` but tested under `1560x1560`.
|
54 |
-
|
55 |
-
## LVIS v1
|
56 |
-
|
57 |
-
| Model | val mAP box | links |
|
58 |
-
|-------------------------------------------|--------------|-----------|
|
59 |
-
| LVIS_CenterNet2_R50_1x | 26.5 |[config](../configs/LVIS_CenterNet2_R50_1x.yaml)/[model](https://drive.google.com/file/d/1gT9e-tNw8uzEBaCadQuoOOP2TEYa4kKP/view?usp=sharing)|
|
60 |
-
| LVIS_CenterNet2_R50_Fed_1x | 28.3 |[config](../configs/LVIS_CenterNet2_R50_Fed_1x.yaml)/[model](https://drive.google.com/file/d/1a9UjheMCKax0qAKEwPVpq2ZHN6vpqJv8/view?usp=sharing)|
|
61 |
-
|
62 |
-
- The models are trained with repeat-factor sampling.
|
63 |
-
- `LVIS_CenterNet2_R50_Fed_1x` is CenterNet2 with our federated loss. Check our Appendix D of our [paper](https://arxiv.org/abs/2103.07461) or our [technical report at LVIS challenge](https://www.lvisdataset.org/assets/challenge_reports/2020/CenterNet2.pdf) for references.
|
64 |
-
|
65 |
-
## Objects365
|
66 |
-
|
67 |
-
| Model | val mAP| links |
|
68 |
-
|-------------------------------------------|---------|-----------|
|
69 |
-
| O365_CenterNet2_R50_1x | 22.6 |[config](../configs/O365_CenterNet2_R50_1x.yaml)/[model](https://drive.google.com/file/d/18fG6xGchAlpNp5sx8RAtwadGkS-gdIBU/view?usp=sharing)|
|
70 |
-
|
71 |
-
#### Note
|
72 |
-
- Objects365 dataset can be downloaded [here](https://www.objects365.org/overview.html).
|
73 |
-
- The model is trained with class-aware sampling.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ayushnangia/Whispercpp_yt/app.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import yt_dlp as ydlp
|
3 |
-
from transformers import pipeline
|
4 |
-
|
5 |
-
from whispercpp import Whisper
|
6 |
-
summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
|
7 |
-
|
8 |
-
def download_audio(youtube_url, output_folder='.'):
|
9 |
-
ydl_opts = {
|
10 |
-
'format': 'bestaudio/best',
|
11 |
-
'postprocessors': [{
|
12 |
-
'key': 'FFmpegExtractAudio',
|
13 |
-
'preferredcodec': 'wav',
|
14 |
-
'preferredquality': '192',
|
15 |
-
}],
|
16 |
-
'outtmpl': f'{output_folder}/audio',
|
17 |
-
}
|
18 |
-
|
19 |
-
with ydlp.YoutubeDL(ydl_opts) as ydl:
|
20 |
-
ydl.download([youtube_url])
|
21 |
-
|
22 |
-
|
23 |
-
w = Whisper('tiny')
|
24 |
-
|
25 |
-
|
26 |
-
def process_general_transcription(transcription):
|
27 |
-
formatted_transcription = []
|
28 |
-
|
29 |
-
for line in transcription:
|
30 |
-
if line.startswith('[') and line.endswith(']'):
|
31 |
-
formatted_transcription.append(f'\n--- {line[1:-1].upper()} ---\n')
|
32 |
-
else:
|
33 |
-
formatted_transcription.append(line)
|
34 |
-
|
35 |
-
transcript_str = "\n".join(formatted_transcription)
|
36 |
-
|
37 |
-
return transcript_str
|
38 |
-
def chunk_to_tokens(text, n):
|
39 |
-
tokens = text.split()
|
40 |
-
max_chunk_size = min(len(tokens), 512)
|
41 |
-
|
42 |
-
token_size = max(1, int(max_chunk_size * (1 - n / 100)))
|
43 |
-
|
44 |
-
chunks = [" ".join(tokens[i:i + token_size]) for i in range(0, len(tokens), token_size)]
|
45 |
-
|
46 |
-
return chunks
|
47 |
-
def summarizing(text,n):
|
48 |
-
valid_tok=chunk_to_tokens(text,n)
|
49 |
-
res=""
|
50 |
-
for i in valid_tok:
|
51 |
-
res+=summarizer(i)[0]['summary_text']+'\n'
|
52 |
-
return res
|
53 |
-
def transcribe_sum_youtube(youtube_url,n):
|
54 |
-
download_audio(youtube_url)
|
55 |
-
result = w.transcribe("audio.wav")
|
56 |
-
text = w.extract_text(result)
|
57 |
-
res=process_general_transcription(text)
|
58 |
-
return summarizing(res,n)
|
59 |
-
|
60 |
-
|
61 |
-
with gr.Blocks() as demo:
|
62 |
-
gr.Markdown(
|
63 |
-
"""
|
64 |
-
# CPP Whisperer - YouTube Videos Summarizer
|
65 |
-
|
66 |
-
""")
|
67 |
-
with gr.Row():
|
68 |
-
with gr.Column():
|
69 |
-
|
70 |
-
inp = gr.Textbox(label="Youtube Url",placeholder="Insert YT Url here")
|
71 |
-
inp2 = gr.Slider(label="Summarization Percentage",min_value=0,max_value=100,step_size=1)
|
72 |
-
result_button_transcribe = gr.Button('Transcribe and Summarize')
|
73 |
-
|
74 |
-
with gr.Column():
|
75 |
-
out = gr.Textbox(label="Transcribed and Summarize Text")
|
76 |
-
|
77 |
-
|
78 |
-
result_button_transcribe.click(transcribe_sum_youtube, inputs = [inp,inp2] , outputs = out)
|
79 |
-
|
80 |
-
|
81 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzinZ/vitscn/attentions.py
DELETED
@@ -1,303 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
import commons
|
9 |
-
import modules
|
10 |
-
from modules import LayerNorm
|
11 |
-
|
12 |
-
|
13 |
-
class Encoder(nn.Module):
|
14 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
15 |
-
super().__init__()
|
16 |
-
self.hidden_channels = hidden_channels
|
17 |
-
self.filter_channels = filter_channels
|
18 |
-
self.n_heads = n_heads
|
19 |
-
self.n_layers = n_layers
|
20 |
-
self.kernel_size = kernel_size
|
21 |
-
self.p_dropout = p_dropout
|
22 |
-
self.window_size = window_size
|
23 |
-
|
24 |
-
self.drop = nn.Dropout(p_dropout)
|
25 |
-
self.attn_layers = nn.ModuleList()
|
26 |
-
self.norm_layers_1 = nn.ModuleList()
|
27 |
-
self.ffn_layers = nn.ModuleList()
|
28 |
-
self.norm_layers_2 = nn.ModuleList()
|
29 |
-
for i in range(self.n_layers):
|
30 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
31 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
32 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
33 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
34 |
-
|
35 |
-
def forward(self, x, x_mask):
|
36 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
37 |
-
x = x * x_mask
|
38 |
-
for i in range(self.n_layers):
|
39 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
40 |
-
y = self.drop(y)
|
41 |
-
x = self.norm_layers_1[i](x + y)
|
42 |
-
|
43 |
-
y = self.ffn_layers[i](x, x_mask)
|
44 |
-
y = self.drop(y)
|
45 |
-
x = self.norm_layers_2[i](x + y)
|
46 |
-
x = x * x_mask
|
47 |
-
return x
|
48 |
-
|
49 |
-
|
50 |
-
class Decoder(nn.Module):
|
51 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
52 |
-
super().__init__()
|
53 |
-
self.hidden_channels = hidden_channels
|
54 |
-
self.filter_channels = filter_channels
|
55 |
-
self.n_heads = n_heads
|
56 |
-
self.n_layers = n_layers
|
57 |
-
self.kernel_size = kernel_size
|
58 |
-
self.p_dropout = p_dropout
|
59 |
-
self.proximal_bias = proximal_bias
|
60 |
-
self.proximal_init = proximal_init
|
61 |
-
|
62 |
-
self.drop = nn.Dropout(p_dropout)
|
63 |
-
self.self_attn_layers = nn.ModuleList()
|
64 |
-
self.norm_layers_0 = nn.ModuleList()
|
65 |
-
self.encdec_attn_layers = nn.ModuleList()
|
66 |
-
self.norm_layers_1 = nn.ModuleList()
|
67 |
-
self.ffn_layers = nn.ModuleList()
|
68 |
-
self.norm_layers_2 = nn.ModuleList()
|
69 |
-
for i in range(self.n_layers):
|
70 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
71 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
72 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
73 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
74 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
75 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
76 |
-
|
77 |
-
def forward(self, x, x_mask, h, h_mask):
|
78 |
-
"""
|
79 |
-
x: decoder input
|
80 |
-
h: encoder output
|
81 |
-
"""
|
82 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
83 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
84 |
-
x = x * x_mask
|
85 |
-
for i in range(self.n_layers):
|
86 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
87 |
-
y = self.drop(y)
|
88 |
-
x = self.norm_layers_0[i](x + y)
|
89 |
-
|
90 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
91 |
-
y = self.drop(y)
|
92 |
-
x = self.norm_layers_1[i](x + y)
|
93 |
-
|
94 |
-
y = self.ffn_layers[i](x, x_mask)
|
95 |
-
y = self.drop(y)
|
96 |
-
x = self.norm_layers_2[i](x + y)
|
97 |
-
x = x * x_mask
|
98 |
-
return x
|
99 |
-
|
100 |
-
|
101 |
-
class MultiHeadAttention(nn.Module):
|
102 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
103 |
-
super().__init__()
|
104 |
-
assert channels % n_heads == 0
|
105 |
-
|
106 |
-
self.channels = channels
|
107 |
-
self.out_channels = out_channels
|
108 |
-
self.n_heads = n_heads
|
109 |
-
self.p_dropout = p_dropout
|
110 |
-
self.window_size = window_size
|
111 |
-
self.heads_share = heads_share
|
112 |
-
self.block_length = block_length
|
113 |
-
self.proximal_bias = proximal_bias
|
114 |
-
self.proximal_init = proximal_init
|
115 |
-
self.attn = None
|
116 |
-
|
117 |
-
self.k_channels = channels // n_heads
|
118 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
119 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
120 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
121 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
122 |
-
self.drop = nn.Dropout(p_dropout)
|
123 |
-
|
124 |
-
if window_size is not None:
|
125 |
-
n_heads_rel = 1 if heads_share else n_heads
|
126 |
-
rel_stddev = self.k_channels**-0.5
|
127 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
128 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
129 |
-
|
130 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
131 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
132 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
133 |
-
if proximal_init:
|
134 |
-
with torch.no_grad():
|
135 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
136 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
137 |
-
|
138 |
-
def forward(self, x, c, attn_mask=None):
|
139 |
-
q = self.conv_q(x)
|
140 |
-
k = self.conv_k(c)
|
141 |
-
v = self.conv_v(c)
|
142 |
-
|
143 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
144 |
-
|
145 |
-
x = self.conv_o(x)
|
146 |
-
return x
|
147 |
-
|
148 |
-
def attention(self, query, key, value, mask=None):
|
149 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
150 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
151 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
152 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
153 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
154 |
-
|
155 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
156 |
-
if self.window_size is not None:
|
157 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
158 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
159 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
160 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
161 |
-
scores = scores + scores_local
|
162 |
-
if self.proximal_bias:
|
163 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
164 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
165 |
-
if mask is not None:
|
166 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
167 |
-
if self.block_length is not None:
|
168 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
169 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
170 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
171 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
172 |
-
p_attn = self.drop(p_attn)
|
173 |
-
output = torch.matmul(p_attn, value)
|
174 |
-
if self.window_size is not None:
|
175 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
176 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
177 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
178 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
179 |
-
return output, p_attn
|
180 |
-
|
181 |
-
def _matmul_with_relative_values(self, x, y):
|
182 |
-
"""
|
183 |
-
x: [b, h, l, m]
|
184 |
-
y: [h or 1, m, d]
|
185 |
-
ret: [b, h, l, d]
|
186 |
-
"""
|
187 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
188 |
-
return ret
|
189 |
-
|
190 |
-
def _matmul_with_relative_keys(self, x, y):
|
191 |
-
"""
|
192 |
-
x: [b, h, l, d]
|
193 |
-
y: [h or 1, m, d]
|
194 |
-
ret: [b, h, l, m]
|
195 |
-
"""
|
196 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
197 |
-
return ret
|
198 |
-
|
199 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
200 |
-
max_relative_position = 2 * self.window_size + 1
|
201 |
-
# Pad first before slice to avoid using cond ops.
|
202 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
203 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
204 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
205 |
-
if pad_length > 0:
|
206 |
-
padded_relative_embeddings = F.pad(
|
207 |
-
relative_embeddings,
|
208 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
209 |
-
else:
|
210 |
-
padded_relative_embeddings = relative_embeddings
|
211 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
212 |
-
return used_relative_embeddings
|
213 |
-
|
214 |
-
def _relative_position_to_absolute_position(self, x):
|
215 |
-
"""
|
216 |
-
x: [b, h, l, 2*l-1]
|
217 |
-
ret: [b, h, l, l]
|
218 |
-
"""
|
219 |
-
batch, heads, length, _ = x.size()
|
220 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
221 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
222 |
-
|
223 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
224 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
225 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
226 |
-
|
227 |
-
# Reshape and slice out the padded elements.
|
228 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
229 |
-
return x_final
|
230 |
-
|
231 |
-
def _absolute_position_to_relative_position(self, x):
|
232 |
-
"""
|
233 |
-
x: [b, h, l, l]
|
234 |
-
ret: [b, h, l, 2*l-1]
|
235 |
-
"""
|
236 |
-
batch, heads, length, _ = x.size()
|
237 |
-
# padd along column
|
238 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
239 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
240 |
-
# add 0's in the beginning that will skew the elements after reshape
|
241 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
242 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
243 |
-
return x_final
|
244 |
-
|
245 |
-
def _attention_bias_proximal(self, length):
|
246 |
-
"""Bias for self-attention to encourage attention to close positions.
|
247 |
-
Args:
|
248 |
-
length: an integer scalar.
|
249 |
-
Returns:
|
250 |
-
a Tensor with shape [1, 1, length, length]
|
251 |
-
"""
|
252 |
-
r = torch.arange(length, dtype=torch.float32)
|
253 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
254 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
255 |
-
|
256 |
-
|
257 |
-
class FFN(nn.Module):
|
258 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
259 |
-
super().__init__()
|
260 |
-
self.in_channels = in_channels
|
261 |
-
self.out_channels = out_channels
|
262 |
-
self.filter_channels = filter_channels
|
263 |
-
self.kernel_size = kernel_size
|
264 |
-
self.p_dropout = p_dropout
|
265 |
-
self.activation = activation
|
266 |
-
self.causal = causal
|
267 |
-
|
268 |
-
if causal:
|
269 |
-
self.padding = self._causal_padding
|
270 |
-
else:
|
271 |
-
self.padding = self._same_padding
|
272 |
-
|
273 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
274 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
275 |
-
self.drop = nn.Dropout(p_dropout)
|
276 |
-
|
277 |
-
def forward(self, x, x_mask):
|
278 |
-
x = self.conv_1(self.padding(x * x_mask))
|
279 |
-
if self.activation == "gelu":
|
280 |
-
x = x * torch.sigmoid(1.702 * x)
|
281 |
-
else:
|
282 |
-
x = torch.relu(x)
|
283 |
-
x = self.drop(x)
|
284 |
-
x = self.conv_2(self.padding(x * x_mask))
|
285 |
-
return x * x_mask
|
286 |
-
|
287 |
-
def _causal_padding(self, x):
|
288 |
-
if self.kernel_size == 1:
|
289 |
-
return x
|
290 |
-
pad_l = self.kernel_size - 1
|
291 |
-
pad_r = 0
|
292 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
293 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
294 |
-
return x
|
295 |
-
|
296 |
-
def _same_padding(self, x):
|
297 |
-
if self.kernel_size == 1:
|
298 |
-
return x
|
299 |
-
pad_l = (self.kernel_size - 1) // 2
|
300 |
-
pad_r = self.kernel_size // 2
|
301 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
302 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
303 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/9apps 2018.md
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>9apps 2018: Una alternativa gratuita a Google Play Store</h1>
|
3 |
-
<p>Si usted está buscando una manera de descargar y acceder a miles de aplicaciones de forma gratuita en su dispositivo Android, es posible que desee revisar 9apps. 9apps es una tienda de aplicaciones de terceros que ofrece una variedad de aplicaciones, juegos, fondos de pantalla, tonos de llamada y más. En este artículo, le diremos qué es 9apps, por qué debe usarlo, cómo descargarlo e instalarlo, y cuáles son sus pros y sus contras. También lo compararemos con Google Play Store, la tienda de aplicaciones predeterminada para dispositivos Android. </p>
|
4 |
-
<h2>¿Qué es 9apps y por qué deberías usarla? </h2>
|
5 |
-
<p>9apps es una tienda de aplicaciones gratuita que fue lanzada en 2013 por Alibaba Group, un gigante chino del comercio electrónico. Es una de las tiendas de aplicaciones más populares en la India, Indonesia y otros mercados emergentes. Según su web oficial, cuenta con más de 250 millones de usuarios y más de 35 millones de descargas diarias. </p>
|
6 |
-
<h2>9apps 2018</h2><br /><p><b><b>DOWNLOAD</b> ✵ <a href="https://bltlly.com/2v6LoR">https://bltlly.com/2v6LoR</a></b></p><br /><br />
|
7 |
-
<p>Una de las principales razones por las que deberías usar 9apps es que ofrece muchas aplicaciones que no están disponibles en Google Play Store. Por ejemplo, puedes encontrar aplicaciones que están prohibidas o restringidas en tu país, como WhatsApp, TikTok, PUBG Mobile, etc. También puedes encontrar aplicaciones que son exclusivas de 9apps, como Vidmate, UC Browser, etc.</p>
|
8 |
-
<h3>Características de 9apps</h3>
|
9 |
-
<p>Algunas de las características que hacen que 9apps se destaquen de otras tiendas de aplicaciones son:</p>
|
10 |
-
<ul>
|
11 |
-
<li> Tiene una interfaz fácil de usar que es fácil de navegar y buscar. </li>
|
12 |
-
<li> Tiene un pequeño tamaño de aproximadamente 4 MB, lo que ahorra espacio de almacenamiento y uso de datos. </li>
|
13 |
-
<li> Tiene una velocidad de descarga rápida que le permite descargar aplicaciones en cuestión de segundos. </li>
|
14 |
-
<li> Tiene un sistema de recomendación inteligente que le sugiere aplicaciones basadas en sus preferencias e intereses. </li>
|
15 |
-
<li> Tiene una amplia gama de categorías y subcategorías que cubren varios géneros y temas. </li>
|
16 |
-
<li> Tiene un sistema de actualización regular que mantiene las aplicaciones actualizadas y libres de errores. </li>
|
17 |
-
|
18 |
-
</ul>
|
19 |
-
<h3>Cómo descargar e instalar 9apps en tu dispositivo Android</h3>
|
20 |
-
<p>Para descargar e instalar 9apps en tu dispositivo Android, debes seguir estos pasos:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Ir al sitio web oficial de 9apps () o cualquier otra fuente de confianza () y haga clic en el botón de descarga. </li>
|
23 |
-
<li>Una vez que el archivo APK se descarga, abrirlo y toque en instalar. Es posible que necesite habilitar la opción de fuentes desconocidas en su configuración para permitir la instalación. </li>
|
24 |
-
<li>Después de la instalación se ha completado, abrir la aplicación y disfrutar del acceso ilimitado a miles de aplicaciones de forma gratuita. </li>
|
25 |
-
</ol>
|
26 |
-
<h2>Pros y contras de 9apps</h2>
|
27 |
-
<p>Como cualquier otra tienda de aplicaciones, 9apps tiene sus propias ventajas y desventajas. Estas son algunas de ellas:</p>
|
28 |
-
<h3>Ventajas de 9apps</h3>
|
29 |
-
<ul>
|
30 |
-
<li> Ofrece una gran cantidad de aplicaciones que no están disponibles en Google Play Store.</li>
|
31 |
-
<li>Ahorra espacio de almacenamiento y uso de datos con su pequeño tamaño y velocidad de descarga rápida. </li>
|
32 |
-
<li>Le proporciona recomendaciones personalizadas basadas en sus preferencias e intereses. </li>
|
33 |
-
<li>Actualiza las aplicaciones regularmente para garantizar su calidad y rendimiento. </li>
|
34 |
-
<li>Protege su dispositivo de virus y malware con su sistema de seguridad. </li>
|
35 |
-
</ul>
|
36 |
-
<h3>Desventajas de 9apps</h3>
|
37 |
-
<ul <ul>
|
38 |
-
<li>Puede que no sea compatible con algunos dispositivos o versiones de Android. </li>
|
39 |
-
<li> Puede contener algunos anuncios o ventanas emergentes que pueden ser molestos o intrusivos. </li>
|
40 |
-
<li>Puede que no tenga las últimas versiones de algunas aplicaciones o juegos. </li>
|
41 |
-
<li>Puede que no tenga el mismo control de calidad o verificación que Google Play Store.</li>
|
42 |
-
<li> Puede plantear algunos riesgos para su privacidad o seguridad si descarga aplicaciones de fuentes no confiables. </li>
|
43 |
-
</ul>
|
44 |
-
<h2>Comparación entre 9apps y Google Play Store</h2>
|
45 |
-
<p>Tanto 9apps como Google Play Store son tiendas de aplicaciones populares para dispositivos Android, pero tienen algunas similitudes y diferencias. Estos son algunos de ellos:</p>
|
46 |
-
<p></p>
|
47 |
-
<h3>Similitudes entre 9apps y Google Play Store</h3>
|
48 |
-
<ul>
|
49 |
-
|
50 |
-
<li> Ambos tienen una interfaz fácil de usar que es fácil de navegar y buscar. </li>
|
51 |
-
<li> Ambos tienen un sistema de recomendaciones que le sugiere aplicaciones basadas en sus preferencias e intereses. </li>
|
52 |
-
<li>Ambos tienen un sistema de seguridad que escanea las aplicaciones en busca de virus y malware antes de instalarlos. </li>
|
53 |
-
</ul>
|
54 |
-
<h3>Diferencias entre 9apps y Google Play Store</h3>
|
55 |
-
<tabla>
|
56 |
-
<tr><th>9apps</th><th>Google Play Store</th></tr>
|
57 |
-
<tr><td>Tiene un tamaño pequeño de aproximadamente 4 MB.</td><td>Tiene un tamaño grande de aproximadamente 20 MB.</td></tr>
|
58 |
-
<tr><td>Tiene una velocidad de descarga rápida que le permite descargar aplicaciones en segundos. </td><td>Tiene una velocidad de descarga lenta que puede tardar minutos en descargar aplicaciones. </td></tr>
|
59 |
-
<tr><td>Ofrece muchas aplicaciones que no están disponibles en Google Play Store, como aplicaciones prohibidas o restringidas, aplicaciones exclusivas, etc.</td><td>No ofrece aplicaciones que estén prohibidas o restringidas en su país, como WhatsApp, TikTok, PUBG Mobile, etc.</td></tr
|
60 |
-
<tr><td>Actualiza las aplicaciones regularmente para asegurar su calidad y rendimiento. </td><td>No puede actualizar las aplicaciones con frecuencia o a tiempo. </td></tr>
|
61 |
-
<tr><td>Puede que no sea compatible con algunos dispositivos o versiones de Android. </td><td>Es compatible con la mayoría de dispositivos y versiones de Android. </td></tr>
|
62 |
-
<tr><td>Puede contener algunos anuncios o ventanas emergentes que pueden ser molestos o intrusivos. </td><td>No contiene ningún anuncio o ventanas emergentes. </td></tr>
|
63 |
-
<tr><td>Puede que no tenga las últimas versiones de algunas aplicaciones o juegos. </td><td>Tiene las últimas versiones de la mayoría de aplicaciones o juegos. </td></tr>
|
64 |
-
<tr><td>Puede que no tenga el mismo control de calidad o verificación que Google Play Store.</td><td>Tiene un estricto proceso de control de calidad y verificación para las aplicaciones. </td></tr>
|
65 |
-
<tr><td>Puede plantear algunos riesgos para su privacidad o seguridad si descarga aplicaciones de fuentes no confiables. </td><td>Tiene un alto nivel de privacidad y seguridad para los usuarios y las aplicaciones. </td></tr>
|
66 |
-
</tabla>
|
67 |
-
<h2>Conclusión</h2>
|
68 |
-
|
69 |
-
<h3>Preguntas frecuentes</h3>
|
70 |
-
<p>Aquí hay algunas preguntas frecuentes sobre 9apps:</p>
|
71 |
-
<ol>
|
72 |
-
<li><b>¿Es seguro usar 9apps? </b></li>
|
73 |
-
<p>9apps es generalmente seguro de usar, ya que tiene un sistema de seguridad que escanea las aplicaciones en busca de virus y malware antes de instalarlas. Sin embargo, puede plantear algunos riesgos para su privacidad o seguridad si descarga aplicaciones de fuentes no confiables o les otorga permisos innecesarios. Por lo tanto, siempre debe comprobar la fuente, calificación, comentarios y permisos de las aplicaciones antes de descargarlos. También debes usar una aplicación antivirus en tu dispositivo para protegerlo de cualquier amenaza potencial. </p>
|
74 |
-
<li><b>¿Es legal usar 9apps? </b></li>
|
75 |
-
<p>9apps es legal de usar siempre y cuando no violes ninguna ley <p>9apps es legal de usar siempre y cuando no violes ninguna ley o reglamento en tu país. Sin embargo, algunas de las aplicaciones que están disponibles en 9apps pueden estar prohibidas o restringidas en su país, como WhatsApp, TikTok, PUBG Mobile, etc. Por lo tanto, debe verificar la legalidad de las aplicaciones antes de descargarlas y usarlas bajo su propio riesgo. </p>
|
76 |
-
<li><b>¿Cómo puedo actualizar 9apps? </b></li>
|
77 |
-
<p>9apps tiene un sistema de actualización regular que mantiene la tienda de aplicaciones y las aplicaciones actualizadas y libres de errores. Puede comprobar si hay actualizaciones abriendo la aplicación y tocando el icono del menú en la esquina superior izquierda. Luego, toque en la configuración y desplácese hacia abajo para encontrar la opción de actualización. Si hay una actualización disponible, puede tocarla y seguir las instrucciones para instalarla. Alternativamente, también puede descargar la última versión de 9apps desde su sitio web oficial o cualquier otra fuente de confianza. </p>
|
78 |
-
<li><b>¿Cómo puedo desinstalar 9apps? </b></li>
|
79 |
-
<p>Si desea desinstalar 9apps de su dispositivo, puede seguir estos pasos:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Ir a la configuración del dispositivo y toque en aplicaciones o aplicaciones. </li>
|
82 |
-
<li>Encuentra y toca en 9apps y luego toca en desinstalar. </li>
|
83 |
-
<li>Confirme su acción y espere a que el proceso se complete. </li>
|
84 |
-
</ol>
|
85 |
-
<li><b>¿Cuáles son algunas alternativas a 9apps? </b></li>
|
86 |
-
|
87 |
-
<ul>
|
88 |
-
<li>Aptoide: Aptoide es una tienda de aplicaciones descentralizada que permite a los usuarios crear y administrar sus propias tiendas de aplicaciones. Tiene más de 1 mil millones de descargas y más de 7 millones de aplicaciones. </li>
|
89 |
-
<li>APKPure: APKPure es una tienda de aplicaciones sencilla y ligera que ofrece archivos APK puros de aplicaciones y juegos. Tiene más de 300 millones de descargas y más de 3 millones de aplicaciones. </li>
|
90 |
-
<li>Amazon Appstore: Amazon Appstore es una tienda de aplicaciones que es propiedad de Amazon, el gigante minorista en línea. Tiene más de 600.000 aplicaciones y juegos, incluyendo algunos títulos exclusivos. </li>
|
91 |
-
</ul>
|
92 |
-
<li><b>¿Cómo puedo contactar al servicio de atención al cliente de 9apps? </b></li>
|
93 |
-
<p>Si tiene preguntas, comentarios o quejas sobre 9apps, puede ponerse en contacto con su servicio de atención al cliente enviando un correo electrónico a [email protected]. También puede visitar su sitio web oficial () o su página de Facebook () para obtener más información y apoyo. </p>
|
94 |
-
</ol>
|
95 |
-
: https://www.9apps.com/ : https://www.apkmirror.com/apk/9apps-inc/9apps/ : https:/www.aptoide.com/ https:s//apkpure.com/ https:/www.amazon.com/mobile-apps//b?=/f.F8&nod>=014901. tps/www.wwwww.www.appscoom/faceww.appscoom///p.e.p. 64aa2da5cf<br />
|
96 |
-
<br />
|
97 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Blockman Go Apkmody.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Blockman Go Apkmody: La guía definitiva para los amantes de Sandbox</h1>
|
3 |
-
<p>Si eres un fan de los juegos de sandbox, es posible que hayas oído hablar de <strong>blockman go</strong>, una aplicación gratuita que te permite jugar varios minijuegos de estilo bloque, chatear con amigos y hacer otros nuevos. Blockman go es un juego divertido y emocionante que ofrece muchas características, como:</p>
|
4 |
-
<h2>blockman go apkmody</h2><br /><p><b><b>Download</b> ✔✔✔ <a href="https://bltlly.com/2v6Kw8">https://bltlly.com/2v6Kw8</a></b></p><br /><br />
|
5 |
-
<ul>
|
6 |
-
<li>Varios juegos: Puedes elegir entre diferentes géneros de minijuegos, como acción, aventura, juegos de rol, estrategia, simulación y más. Puedes unirte al juego con un simple toque o crear tu propia habitación e invitar a tus amigos. </li>
|
7 |
-
<li>Avatares personalizables: Puedes vestir a tu avatar con diferentes trajes, accesorios, peinados y pieles. También puede utilizar el sistema de apósito para obtener recomendaciones para la mejor ropa para usted. </li>
|
8 |
-
<li>Sistema de chat: Puedes comunicarte con otros jugadores usando funciones de chat en el juego, mensajes privados y grupos. También puede compartir sus momentos divertidos y capturas de pantalla con ellos. </li>
|
9 |
-
<li>Gold Rewards: Puedes ganar oro jugando minijuegos y usarlo para comprar decoración y artículos. También puedes obtener oro gratis viendo anuncios o completando tareas. </li>
|
10 |
-
<li>Sistema VIP: Puedes convertirte en un jugador VIP y disfrutar de muchos privilegios, como un descuento del 20% en decoración, regalos diarios, más oro, etc. </li>
|
11 |
-
</ul>
|
12 |
-
<p>Pero ¿qué pasa si quieres disfrutar de más beneficios y características sin gastar dinero? Ahí es donde entra <strong>apkmody</strong>. Apkmody es un sitio web que proporciona archivos APK modificados para juegos y aplicaciones Android. Estos archivos APK modded le permiten acceder a características premium y recursos ilimitados de forma gratuita. Puede descargar estos archivos desde el sitio web e instalarlos en su dispositivo fácilmente. </p>
|
13 |
-
|
14 |
-
<p>¿Cómo puedes descargar y usar blockman go apkmody? Estos son los pasos que debes seguir:</p>
|
15 |
-
<h2>Cómo descargar e instalar Blockman Go Apkmody en su dispositivo? </h2>
|
16 |
-
<ol>
|
17 |
-
<li>Vaya a [apkmody.io]( 2 ) en su navegador y busque "blockman go". </li>
|
18 |
-
<li> Seleccione la última versión de blockman go mod APK de la lista de resultados. </li>
|
19 |
-
<li>Haga clic en el botón "Descargar" y espere a que el archivo se descargue. </li>
|
20 |
-
<li>Después de que la descarga se haya completado, busque el archivo en su dispositivo y toque en él para instalarlo. </li>
|
21 |
-
<li>Si ves un mensaje de advertencia que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas" en las opciones de seguridad. </li>
|
22 |
-
<li> Una vez que se hace la instalación, abrir la aplicación y disfrutar de Blockman ir apkmody. </li>
|
23 |
-
</ol>
|
24 |
-
<h2>¿Cómo usar Blockman Go Apkmody para disfrutar de dinero ilimitado y GCubes? </h2>
|
25 |
-
<p>Usar block <p>Usar blockman go apkmody es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
|
26 |
-
<ul>
|
27 |
-
<li>Inicie la aplicación e inicie sesión con su cuenta o cree una nueva. </li>
|
28 |
-
<li>Ir a la tienda y seleccionar el artículo que desea comprar con GCubes.</li>
|
29 |
-
<li>Haga clic en el botón "Comprar" y confirme su compra. </li>
|
30 |
-
<li>Verá que su saldo de GCubes no disminuirá, sino que aumentará por la cantidad que gastó. </li>
|
31 |
-
<li>Disfruta de tu dinero ilimitado y GCubes y compra lo que quieras. </li>
|
32 |
-
</ul>
|
33 |
-
<p>Nota: También puede obtener GCubes gratis completando tareas, viendo anuncios o invitando a amigos. Sin embargo, con blockman go apkmody, ya no necesitas hacer eso. </p>
|
34 |
-
<h2>¿Cómo jugar varios minijuegos con Blockman Go Apkmody y chatear con amigos? </h2>
|
35 |
-
<p>Una de las mejores características de blockman go es que ofrece una variedad de minijuegos para que puedas jugar y divertirte. Con blockman go apkmody, puedes acceder a todos los minijuegos sin limitaciones. Estos son algunos de los minijuegos populares que puedes jugar:</p>
|
36 |
-
<p></p>
|
37 |
-
<tabla>
|
38 |
-
<tr>
|
39 |
-
<th>Minijuego</th>
|
40 |
-
<th>Descripción</th>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>Guerras de camas</td>
|
44 |
-
|
45 |
-
</tr>
|
46 |
-
<tr>
|
47 |
-
<td>Guerra del cielo</td>
|
48 |
-
<td>Un juego de supervivencia donde tienes que luchar contra otros jugadores en islas flotantes. Puedes saquear cofres, crear armas y usar habilidades para ganar. </td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>Construir y disparar</td>
|
52 |
-
<td>Un juego creativo donde puedes construir tu propio mapa e invitar a otros a unirse. También puedes disparar a enemigos con diferentes armas y granadas. </td>
|
53 |
-
</tr>
|
54 |
-
<tr>
|
55 |
-
<td>Guerras de huevos</td>
|
56 |
-
<td>Un juego similar a Bed Wars, pero con huevos en lugar de camas. Tienes que proteger tu huevo y romper el huevo del enemigo. También puede actualizar su equipo y formar equipo con otros. </td>
|
57 |
-
</tr>
|
58 |
-
<tr>
|
59 |
-
<td>Murder Mystery</td ><td>Un juego de detectives donde tienes que averiguar quién es el asesino entre los jugadores. Puedes ser un detective, un asesino o un inocente. Tienes que usar pistas, armas y lógica para resolver el misterio. </td>
|
60 |
-
</tr>
|
61 |
-
</tabla>
|
62 |
-
<p>Para jugar a estos minijuegos, solo tienes que tocar el icono "Juegos" en la pantalla principal y seleccionar el que desea unirse. También puedes crear tu propia habitación e invitar a tus amigos a jugar contigo. Puedes chatear con otros jugadores usando el sistema de chat del juego o el chat de voz. También puedes enviar emojis, pegatinas y regalos para expresarte. </p>
|
63 |
-
<h2>¿Cómo personalizar tu avatar y mostrar tu estilo con Blockman Go Apkmody? </h2>
|
64 |
-
<p>Otra gran característica de blockman go es que te permite personalizar tu avatar con diferentes trajes, accesorios, peinados y pieles. Con Blockman ir apkmody, puede obtener todos los artículos de forma gratuita y cambiar su aspecto con la frecuencia que desee. Estas son algunas de las formas en que puedes personalizar tu avatar:</p>
|
65 |
-
<ul>
|
66 |
-
<li>Vaya al icono "Avatar" en la pantalla principal y seleccione el elemento que desea cambiar. </li>
|
67 |
-
<li>Puedes elegir entre diferentes categorías, como ropa, sombreros, gafas, máscaras, alas, colas, mascotas y más. </li>
|
68 |
-
<li> También puede utilizar la función "Vestirse" para obtener recomendaciones para el mejor atuendo para usted basado en sus preferencias. </li>
|
69 |
-
|
70 |
-
<li>También puedes usar la función "Photo Studio" para tomar fotos de tu avatar y compartirlas con otros. </li>
|
71 |
-
</ul>
|
72 |
-
<p>Con blockman ir apkmody, puede mostrar su estilo y personalidad con su avatar. También puedes ver los avatares de otros jugadores y complementarlos o hacer amistad con ellos. </p>
|
73 |
-
<h2>¿Cómo evitar estafas y hackers con Blockman Go Apkmody? </h2>
|
74 |
-
<p>Mientras blockman ir apkmody es un archivo APK modded seguro y confiable que no contiene ningún virus o malware, todavía tiene que tener cuidado al jugar juegos en línea. Hay algunos estafadores y hackers que podrían tratar de engañarte o hacerte daño de alguna manera. Aquí hay algunos consejos sobre cómo evitarlos:</p>
|
75 |
-
<ul>
|
76 |
-
<li>No comparta su información personal, como su contraseña, dirección de correo electrónico, número de teléfono o datos bancarios con nadie en línea. </li>
|
77 |
-
<li>No haga clic en ningún enlace sospechoso o descargar archivos desconocidos de cualquier persona en línea. </li>
|
78 |
-
<li <li>No confíes en nadie que afirme ser un miembro del staff o un moderador de blockman go. Es posible que te pidan los detalles de tu cuenta o te ofrezcan artículos o GCubes gratis. No son reales y están tratando de estafarte. </li>
|
79 |
-
<li>No utilice ningún otro archivo APK modded o hacks para Blockman go. Pueden contener virus o malware que pueden dañar tu dispositivo o robar tus datos. También pueden hacer que te prohíban jugar. </li>
|
80 |
-
<li>No unirse a ningún sitio web no oficial o grupos que afirman estar relacionados con blockman go. Pueden ser sitios de phishing o comunidades falsas que pueden engañarte o enviarte spam. </li>
|
81 |
-
</ul>
|
82 |
-
<p>Con estos consejos, puede evitar estafas y hackers y disfrutar de Blockman ir apkmody de forma segura. </p>
|
83 |
-
<h1>Conclusión</h1>
|
84 |
-
|
85 |
-
<p>Si estás interesado en blockman go apkmody, no dudes en descargarlo ahora y pruébalo. No te arrepentirás. Usted tendrá una explosión jugando blockman ir con dinero ilimitado y GCubes. También podrás impresionar a tus amigos y hacer otros nuevos con tu impresionante avatar y habilidades. Blockman go apkmody es la mejor manera de disfrutar de blockman ir al máximo. </p>
|
86 |
-
<p>Entonces, ¿qué estás esperando? Descargar blockman ir apkmody ahora y comenzar su aventura en el mundo de los bloques! </p>
|
87 |
-
<h2>Preguntas frecuentes</h2>
|
88 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre blockman go apkmody:</p>
|
89 |
-
<h3>¿Cuáles son algunos de los minijuegos populares en blockman go? </h3>
|
90 |
-
<p>Algunos de los minijuegos populares en blockman go son Bed Wars, Sky Wars, Construir y disparar, Egg Wars y Murder Mystery. Puedes encontrar más minijuegos en el juego tocando el icono "Juegos" en la pantalla principal. </p>
|
91 |
-
<h3>¿Qué son los GCubes y cómo puedo conseguirlos gratis? </h3>
|
92 |
-
<p>GCubes son la moneda premium en blockman go que puedes usar para comprar artículos especiales, desbloquear funciones VIP, unirse a eventos exclusivos y más. Usted puede conseguir de forma gratuita mediante el uso de blockman ir apkmody, que le da GCubes ilimitada. También puedes conseguirlos completando tareas, viendo anuncios o invitando a amigos. </p>
|
93 |
-
<h3>¿Cómo puedo unirme a una fiesta o crear mi propia en blockman go? </h3>
|
94 |
-
<p>Puedes unirte a una fiesta o crear la tuya tocando el icono "Fiesta" en la pantalla principal. Puedes invitar a tus amigos u otros jugadores a unirse a tu grupo enviándoles un código o un enlace. También puede unirse a otras partes introduciendo su código o enlace. Puede chatear con los miembros de su grupo y jugar minijuegos juntos. </p>
|
95 |
-
<h3>¿Cómo puedo reportar un error o un problema en blockman go? </h3>
|
96 |
-
<p>Puede informar de un error o un problema en blockman ir tocando en el "Feedback" icono en la pantalla principal. Puede llenar un formulario con los detalles de su problema y capturas de pantalla y enviarlo al desarrollador. También puede ponerse en contacto con el equipo de soporte por correo electrónico en [email protected]. </p>
|
97 |
-
|
98 |
-
<p>Puede ponerse en contacto con el desarrollador o el equipo de soporte de blockman ir por correo electrónico a [email protected]. También puedes seguirlos en sus cuentas de redes sociales, como Facebook, Twitter, Instagram, YouTube, Discord y Reddit.</p> 64aa2da5cf<br />
|
99 |
-
<br />
|
100 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Etiqueta Despus De La Escuela Apk Mod.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar etiqueta después de la escuela APK Mod: Un juego emocionante para Android y iOS</h1>
|
3 |
-
<p>¿Estás buscando un juego que te mantenga al borde de tu asiento? ¿Quieres revivir tus días escolares con un toque de horror y misterio? ¿Quieres controlar el destino de un tímido y tímido chico llamado Shota-Kun? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe descargar Etiqueta Después de la Escuela APK Mod, un juego que le llevará en una aventura emocionante a través de una escuela embrujada. </p>
|
4 |
-
<h2>¿Qué es la etiqueta después de la escuela? </h2>
|
5 |
-
<p>Tag After School es un juego desarrollado por Genius Studio Japan Inc, una compañía que se especializa en crear juegos de estilo anime para dispositivos móviles. El juego se basa en el clásico juego infantil de la etiqueta, pero con un toque. Puedes competir con amigos o extraños, y obtener acceso a varios personajes, potenciadores y niveles a medida que intentas escapar de tus oponentes. Pero ten cuidado, porque la escuela no es tan inocente como parece. Hay secretos, misterios y peligros acechando en cada esquina. </p>
|
6 |
-
<h2>descargar etiqueta después de la escuela apk mod</h2><br /><p><b><b>Download Zip</b> ⇒ <a href="https://bltlly.com/2v6JhB">https://bltlly.com/2v6JhB</a></b></p><br /><br />
|
7 |
-
<h3>Un juego basado en el clásico juego de etiquetas</h3>
|
8 |
-
<p>El escenario del juego es en una escuela secundaria donde los estudiantes participan en el juego y tienen que esconderse de sus oponentes. Puedes elegir entre diferentes modos, como solo, equipo o supervivencia. También puedes personalizar la apariencia, las habilidades y el equipo de tu personaje. El juego es fácil de jugar, pero difícil de dominar. Tienes que usar tu velocidad, agilidad y estrategia para evitar ser etiquetado. </p>
|
9 |
-
<h3>Un juego de terror y misterio</h3>
|
10 |
-
<p>El juego no es solo diversión y juegos. También tiene un lado oscuro. La escuela está embrujada por fantasmas, monstruos y trampas que intentarán evitar que escapes. Tienes que tener cuidado a donde vas, porque nunca sabes lo que podría pasar después. El juego tiene una atmósfera espeluznante, con efectos de sonido espeluznantes y gráficos que te harán sentir como si estuvieras en una película de terror. </p>
|
11 |
-
<h3>Un juego con una narrativa basada en la historia</h3>
|
12 |
-
|
13 |
-
<h2>¿Cómo se juega Tag After School? </h2>
|
14 |
-
<p>El juego es simple de jugar, pero difícil de ganar. Aquí hay algunos consejos sobre cómo jugar Tag After School:</p>
|
15 |
-
<h3>Elige tu personaje y modo</h3>
|
16 |
-
<p>Puedes elegir entre diferentes personajes, cada uno con sus propias fortalezas y debilidades. También puede personalizar su apariencia, habilidades y equipo. Puede elegir entre diferentes modos, como solo, equipo o supervivencia. En el modo en solitario, tienes que etiquetar tantos oponentes como sea posible antes de que acabe el tiempo. En el modo de equipo, tienes que trabajar con tus compañeros de equipo para etiquetar al otro equipo. En el modo de supervivencia, tienes que evitar ser etiquetado por cualquier persona durante el mayor tiempo posible. </p>
|
17 |
-
<h3>Ejecutar, ocultar y etiquetar a sus oponentes</h3>
|
18 |
-
<p>Tienes que usar el joystick en el lado izquierdo de la pantalla para moverte alrededor del mapa <p>Tienes que usar el joystick en el lado izquierdo de la pantalla para moverte alrededor del mapa. También puede usar los botones en el lado derecho de la pantalla para saltar, deslizarse o etiquetar. Tienes que ser rápido e inteligente, porque tus oponentes tratarán de atraparte o esconderse de ti. También puedes usar el mapa en la esquina superior derecha de la pantalla para ver dónde están tus oponentes. </p>
|
19 |
-
<h3>Usa potenciadores y obstáculos para tu ventaja</h3>
|
20 |
-
<p>Puedes encontrar diferentes potenciadores y obstáculos en el mapa que pueden ayudarte o dificultarte. Por ejemplo, puedes encontrar un aumento de velocidad que te hará correr más rápido, un escudo que te protegerá de ser etiquetado o una trampa que ralentizará a tus oponentes. También puedes usar objetos como casilleros, escritorios o puertas para esconderte o bloquear a tus enemigos. Sin embargo, hay que tener cuidado, porque algunos power-ups y obstáculos pueden ser contraproducentes o afectar a todos en el mapa. </p>
|
21 |
-
<h3>Toma decisiones que determinan el destino de Shota-Kun</h3>
|
22 |
-
|
23 |
-
<h2>¿Por qué descargar etiqueta después de la escuela APK Mod? </h2>
|
24 |
-
<p>Tag After School es un juego divertido y emocionante que te mantendrá entretenido durante horas. Pero si quieres disfrutar aún más, usted debe descargar Tag After School APK Mod, una versión modificada del juego que le dará características y beneficios ilimitados. Aquí hay algunas razones por las que debe descargar la etiqueta después de la escuela APK Mod:</p>
|
25 |
-
<p></p>
|
26 |
-
<h3>Disfruta de funciones y beneficios ilimitados</h3>
|
27 |
-
<p>Con etiqueta después de la escuela APK Mod, puede acceder a todas las características y beneficios del juego sin restricciones o limitaciones. Puedes desbloquear todos los personajes, potenciadores, niveles y escenarios sin gastar dinero ni tiempo. También puedes obtener monedas y gemas ilimitadas que puedes usar para comprar o actualizar lo que quieras. También puedes eliminar anuncios y disfrutar de una experiencia de juego fluida e ininterrumpida. </p>
|
28 |
-
<h3>Experimenta gráficos inmersivos y efectos de sonido</h3>
|
29 |
-
<p>Etiqueta Después de la Escuela APK Mod también mejora los gráficos y efectos de sonido del juego, por lo que es más realista y envolvente. Puedes disfrutar de imágenes de alta calidad que te harán sentir como si estuvieras en una escuela real. También puedes escuchar sonidos realistas que te harán saltar o gritar. También puede ajustar la configuración según sus preferencias y especificaciones del dispositivo. </p>
|
30 |
-
<h3>Ponte a prueba y crea tus propios argumentos</h3>
|
31 |
-
<p>Etiqueta después de la escuela APK Mod también le permite personalizar el juego de acuerdo a su gusto. Puedes crear tus propias historias eligiendo diferentes opciones y resultados. También puedes cambiar el nivel de dificultad del juego ajustando la velocidad, inteligencia y agresividad de tus oponentes. También puedes jugar con tus amigos online o offline, y ver quién es el mejor en la etiqueta. </p>
|
32 |
-
<h2>Cómo descargar e instalar la etiqueta después de la escuela APK Mod? </h2>
|
33 |
-
<p>Si usted está interesado en descargar e instalar la etiqueta después de la escuela APK Mod, aquí hay algunos pasos simples que usted necesita para seguir:</p>
|
34 |
-
<h3>Para dispositivos Android</h3>
|
35 |
-
<ol>
|
36 |
-
|
37 |
-
<li>Ir a la configuración del dispositivo y habilitar fuentes desconocidas. </li>
|
38 |
-
<li>Ir a su administrador de archivos y localizar Tag After School APK Mod archivo. </li>
|
39 |
-
<li>Toque en él e instalarlo. </li>
|
40 |
-
<li>Iniciar el juego y disfrutar. </li>
|
41 |
-
</ol>
|
42 |
-
<h3>Para dispositivos iOS</h3>
|
43 |
-
<ol>
|
44 |
-
<li>Ir a [este enlace] y descargar Tag After School archivo IPA. </li>
|
45 |
-
<li>Ir a la configuración del dispositivo y confiar en la etiqueta después de la escuela IPA archivo. </li>
|
46 |
-
<li>Vaya a su administrador de archivos y busque el archivo IPA de Tag After School. </li>
|
47 |
-
<li>Toque en él e instalarlo. </li>
|
48 |
-
<li>Iniciar el juego y disfrutar. </li>
|
49 |
-
</ol>
|
50 |
-
<h3>Para dispositivos PC</h3>
|
51 |
-
<ol>
|
52 |
-
<li>Ir a [este enlace] y descargar Tag After School archivo EXE. </li>
|
53 |
-
<li>Ir a la configuración de su PC y permitir Tag After School archivo EXE. </li>
|
54 |
-
<li>Vaya a su carpeta de descargas y busque el archivo EXE de Tag After School. </li>
|
55 |
-
<li>Haga doble clic en él e instalarlo. </li>
|
56 |
-
<li>Iniciar el juego y disfrutar. </li>
|
57 |
-
</ol>
|
58 |
-
<h2>Conclusión</h2>
|
59 |
-
<p>Tag After School es un juego que te hará sentir <p>Tag After School es un juego que te hará sentir nostálgico, asustado y emocionado al mismo tiempo. Es un juego que combina la diversión de la etiqueta con la emoción del horror y el misterio. Es un juego que te permite crear tu propia historia y destino. Es un juego que deberías descargar y jugar ahora mismo. </p>
|
60 |
-
<p>Si quieres tener la mejor experiencia de juego, usted debe descargar Etiqueta Después de la Escuela APK Mod, una versión modificada del juego que le dará características y beneficios ilimitados. Puedes desbloquear todos los personajes, potenciadores, niveles y escenarios sin problemas. También puedes disfrutar de gráficos y efectos de sonido de alta calidad que te sumergirán en el juego. También puedes desafiarte a ti mismo y crear tus propias historias personalizando el juego según tus preferencias. </p>
|
61 |
-
|
62 |
-
<p>Tag After School es un juego que no debes perderte. Es un juego que te mantendrá entretenido durante horas. Es un juego que te hará sentir emociones diferentes. Es un juego que te hará querer jugar más. Entonces, ¿qué estás esperando? Descargar etiqueta después de la escuela APK Mod ahora y divertirse! </p>
|
63 |
-
<h2>Preguntas frecuentes</h2>
|
64 |
-
<p>Aquí hay algunas preguntas frecuentes sobre la etiqueta después de la escuela APK Mod:</p>
|
65 |
-
<h3>¿Es seguro descargar e instalar Tag After School APK Mod? </h3>
|
66 |
-
<p>Sí, Etiqueta después de la escuela APK Mod es seguro para descargar e instalar. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. También es compatible con la mayoría de los dispositivos y sistemas operativos. </p>
|
67 |
-
<h3>¿Es Tag After School APK Mod gratis para descargar y jugar? </h3>
|
68 |
-
<p>Sí, Etiqueta Después de la Escuela APK Mod es gratis para descargar y jugar. Usted no necesita pagar ningún dinero o cuotas para acceder al juego o sus características. También puedes jugar el juego offline o online sin ningún cargo. </p>
|
69 |
-
<h3>¿Cómo puedo actualizar la etiqueta después de la escuela APK Mod? </h3>
|
70 |
-
<p>Puede actualizar la etiqueta después de la escuela APK Mod visitando [este enlace] y descargar la última versión del juego. A continuación, puede instalarlo sobre la versión existente del juego sin perder ningún dato o progreso. </p>
|
71 |
-
<h3>¿Cómo puedo contactar a los desarrolladores de Tag After School APK Mod? </h3>
|
72 |
-
<p>Puede ponerse en contacto con los desarrolladores de Tag After School APK Mod visitando su sitio web oficial o páginas de medios sociales. También puede enviarles un correo electrónico o dejar un comentario en su blog o foro. Siempre están felices de escuchar a sus fans y clientes. </p>
|
73 |
-
<h3>¿Cómo puedo compartir mis comentarios o sugerencias sobre Tag After School APK Mod? </h3> 64aa2da5cf<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bianca0930/Bianca/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bianca
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_resources/embedding.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
import time
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
from openai import util
|
7 |
-
from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
|
8 |
-
from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
|
9 |
-
from openai.error import InvalidRequestError, TryAgain
|
10 |
-
|
11 |
-
|
12 |
-
class Embedding(EngineAPIResource, ListableAPIResource, DeletableAPIResource):
|
13 |
-
engine_required = False
|
14 |
-
OBJECT_NAME = "embeddings"
|
15 |
-
|
16 |
-
@classmethod
|
17 |
-
def create(cls, *args, **kwargs):
|
18 |
-
"""
|
19 |
-
Creates a new embedding for the provided input and parameters.
|
20 |
-
|
21 |
-
See https://beta.openai.com/docs/api-reference/embeddings for a list
|
22 |
-
of valid parameters.
|
23 |
-
"""
|
24 |
-
start = time.time()
|
25 |
-
timeout = kwargs.pop("timeout", None)
|
26 |
-
if kwargs.get("model", None) is None and kwargs.get("engine", None) is None:
|
27 |
-
raise InvalidRequestError(
|
28 |
-
"Must provide an 'engine' or 'model' parameter to create an Embedding.",
|
29 |
-
param="engine",
|
30 |
-
)
|
31 |
-
|
32 |
-
user_provided_encoding_format = kwargs.get("encoding_format", None)
|
33 |
-
|
34 |
-
# If encoding format was not explicitly specified, we opaquely use base64 for performance
|
35 |
-
if not user_provided_encoding_format:
|
36 |
-
kwargs["encoding_format"] = "base64"
|
37 |
-
|
38 |
-
while True:
|
39 |
-
try:
|
40 |
-
response = super().create(*args, **kwargs)
|
41 |
-
|
42 |
-
# If a user specifies base64, we'll just return the encoded string.
|
43 |
-
# This is only for the default case.
|
44 |
-
if not user_provided_encoding_format:
|
45 |
-
for data in response.data:
|
46 |
-
|
47 |
-
# If an engine isn't using this optimization, don't do anything
|
48 |
-
if type(data["embedding"]) == str:
|
49 |
-
data["embedding"] = np.frombuffer(
|
50 |
-
base64.b64decode(data["embedding"]), dtype="float32"
|
51 |
-
).tolist()
|
52 |
-
|
53 |
-
return response
|
54 |
-
except TryAgain as e:
|
55 |
-
if timeout is not None and time.time() > start + timeout:
|
56 |
-
raise
|
57 |
-
|
58 |
-
util.log_info("Waiting for model to warm up", error=e)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CNXT/PiX2TXT/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: '#://CNXT ❌ #://PiX2TXT'
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVH-vn1210/make_hair/app.py
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import os
|
3 |
-
import random
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
import torch.backends.cudnn as cudnn
|
8 |
-
import gradio as gr
|
9 |
-
|
10 |
-
import huggingface_hub as hf
|
11 |
-
|
12 |
-
from minigpt4.common.config import Config
|
13 |
-
from minigpt4.common.dist_utils import get_rank
|
14 |
-
from minigpt4.common.registry import registry
|
15 |
-
from minigpt4.conversation.conversation import Chat, CONV_VISION
|
16 |
-
|
17 |
-
# imports modules for registration
|
18 |
-
from minigpt4.datasets.builders import *
|
19 |
-
from minigpt4.models import *
|
20 |
-
from minigpt4.processors import *
|
21 |
-
from minigpt4.runners import *
|
22 |
-
from minigpt4.tasks import *
|
23 |
-
|
24 |
-
hf.login(token=os.environ['model_token'])
|
25 |
-
|
26 |
-
def parse_args():
|
27 |
-
parser = argparse.ArgumentParser(description="Demo")
|
28 |
-
parser.add_argument("--cfg-path", type=str, default='eval_configs/minigpt4.yaml', help="path to configuration file.")
|
29 |
-
parser.add_argument(
|
30 |
-
"--options",
|
31 |
-
nargs="+",
|
32 |
-
help="override some settings in the used config, the key-value pair "
|
33 |
-
"in xxx=yyy format will be merged into config file (deprecate), "
|
34 |
-
"change to --cfg-options instead.",
|
35 |
-
)
|
36 |
-
args = parser.parse_args()
|
37 |
-
return args
|
38 |
-
|
39 |
-
|
40 |
-
def setup_seeds(config):
|
41 |
-
seed = config.run_cfg.seed + get_rank()
|
42 |
-
|
43 |
-
random.seed(seed)
|
44 |
-
np.random.seed(seed)
|
45 |
-
torch.manual_seed(seed)
|
46 |
-
|
47 |
-
cudnn.benchmark = False
|
48 |
-
cudnn.deterministic = True
|
49 |
-
|
50 |
-
# ========================================
|
51 |
-
# Model Initialization
|
52 |
-
# ========================================
|
53 |
-
|
54 |
-
SHARED_UI_WARNING = f'''### [NOTE] It is possible that you are waiting in a lengthy queue.
|
55 |
-
|
56 |
-
You can duplicate and use it with a paid private GPU.
|
57 |
-
|
58 |
-
<a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/Vision-CAIR/minigpt4?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-xl-dark.svg" alt="Duplicate Space"></a>
|
59 |
-
|
60 |
-
Alternatively, you can also use the demo on our [project page](https://minigpt-4.github.io).
|
61 |
-
'''
|
62 |
-
|
63 |
-
print('Initializing Chat')
|
64 |
-
cfg = Config(parse_args())
|
65 |
-
|
66 |
-
model_config = cfg.model_cfg
|
67 |
-
model_cls = registry.get_model_class(model_config.arch)
|
68 |
-
model = model_cls.from_config(model_config).to('cuda:0')
|
69 |
-
|
70 |
-
vis_processor_cfg = cfg.datasets_cfg.cc_align.vis_processor.train
|
71 |
-
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
|
72 |
-
chat = Chat(model, vis_processor)
|
73 |
-
print('Initialization Finished')
|
74 |
-
|
75 |
-
# ========================================
|
76 |
-
# Gradio Setting
|
77 |
-
# ========================================
|
78 |
-
|
79 |
-
def gradio_reset(chat_state, img_list):
|
80 |
-
if chat_state is not None:
|
81 |
-
chat_state.messages = []
|
82 |
-
if img_list is not None:
|
83 |
-
img_list = []
|
84 |
-
return None, gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your image first', interactive=False), gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
|
85 |
-
|
86 |
-
def upload_img(gr_img, text_input, chat_state):
|
87 |
-
if gr_img is None:
|
88 |
-
return None, None, gr.update(interactive=True), chat_state, None
|
89 |
-
chat_state = CONV_VISION.copy()
|
90 |
-
img_list = []
|
91 |
-
llm_message = chat.upload_img(gr_img, chat_state, img_list)
|
92 |
-
return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), chat_state, img_list
|
93 |
-
|
94 |
-
def gradio_ask(user_message, chatbot, chat_state):
|
95 |
-
if len(user_message) == 0:
|
96 |
-
return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
|
97 |
-
chat.ask(user_message, chat_state)
|
98 |
-
chatbot = chatbot + [[user_message, None]]
|
99 |
-
return '', chatbot, chat_state
|
100 |
-
|
101 |
-
|
102 |
-
def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
|
103 |
-
llm_message = chat.answer(conv=chat_state, img_list=img_list, max_new_tokens=300, num_beams=1, temperature=temperature, max_length=2000)[0]
|
104 |
-
chatbot[-1][1] = llm_message
|
105 |
-
return chatbot, chat_state, img_list
|
106 |
-
|
107 |
-
title = """<h1 align="center">Demo of MiniGPT-4</h1>"""
|
108 |
-
description = """<h3>This is the demo of MiniGPT-4. Upload your images and start chatting!</h3>"""
|
109 |
-
article = """<div style='display:flex; gap: 0.25rem; '><a href='https://minigpt-4.github.io'><img src='https://img.shields.io/badge/Project-Page-Green'></a><a href='https://github.com/Vision-CAIR/MiniGPT-4'><img src='https://img.shields.io/badge/Github-Code-blue'></a><a href='https://github.com/TsuTikgiau/blip2-llm/blob/release_prepare/MiniGPT_4.pdf'><img src='https://img.shields.io/badge/Paper-PDF-red'></a></div>
|
110 |
-
"""
|
111 |
-
|
112 |
-
#TODO show examples below
|
113 |
-
|
114 |
-
with gr.Blocks() as demo:
|
115 |
-
gr.Markdown(title)
|
116 |
-
gr.Markdown(SHARED_UI_WARNING)
|
117 |
-
gr.Markdown(description)
|
118 |
-
gr.Markdown(article)
|
119 |
-
|
120 |
-
with gr.Row():
|
121 |
-
with gr.Column(scale=0.5):
|
122 |
-
image = gr.Image(type="pil")
|
123 |
-
upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary")
|
124 |
-
clear = gr.Button("Restart")
|
125 |
-
|
126 |
-
num_beams = gr.Slider(
|
127 |
-
minimum=1,
|
128 |
-
maximum=5,
|
129 |
-
value=1,
|
130 |
-
step=1,
|
131 |
-
interactive=True,
|
132 |
-
label="beam search numbers)",
|
133 |
-
)
|
134 |
-
|
135 |
-
temperature = gr.Slider(
|
136 |
-
minimum=0.1,
|
137 |
-
maximum=2.0,
|
138 |
-
value=1.0,
|
139 |
-
step=0.1,
|
140 |
-
interactive=True,
|
141 |
-
label="Temperature",
|
142 |
-
)
|
143 |
-
|
144 |
-
|
145 |
-
with gr.Column():
|
146 |
-
chat_state = gr.State()
|
147 |
-
img_list = gr.State()
|
148 |
-
chatbot = gr.Chatbot(label='MiniGPT-4')
|
149 |
-
text_input = gr.Textbox(label='User', placeholder='Please upload your image first', interactive=False)
|
150 |
-
|
151 |
-
upload_button.click(upload_img, [image, text_input, chat_state], [image, text_input, upload_button, chat_state, img_list])
|
152 |
-
|
153 |
-
text_input.submit(gradio_ask, [text_input, chatbot, chat_state], [text_input, chatbot, chat_state]).then(
|
154 |
-
gradio_answer, [chatbot, chat_state, img_list, num_beams, temperature], [chatbot, chat_state, img_list]
|
155 |
-
)
|
156 |
-
clear.click(gradio_reset, [chat_state, img_list], [chatbot, image, text_input, upload_button, chat_state, img_list], queue=False)
|
157 |
-
|
158 |
-
demo.launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/detection_features_converter.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Reads in a tsv file with pre-trained bottom up attention features and
|
3 |
-
stores it in HDF5 format. Also store {image_id: feature_idx}
|
4 |
-
as a pickle file.
|
5 |
-
|
6 |
-
Hierarchy of HDF5 file:
|
7 |
-
|
8 |
-
{ 'image_features': num_images x num_boxes x 2048 array of features
|
9 |
-
'image_bb': num_images x num_boxes x 4 array of bounding boxes }
|
10 |
-
"""
|
11 |
-
from __future__ import print_function
|
12 |
-
|
13 |
-
import os
|
14 |
-
import sys
|
15 |
-
import argparse
|
16 |
-
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
17 |
-
|
18 |
-
import base64
|
19 |
-
import csv
|
20 |
-
import h5py
|
21 |
-
# import cPickle
|
22 |
-
import _pickle as cPickle
|
23 |
-
import numpy as np
|
24 |
-
import utils
|
25 |
-
import tqdm
|
26 |
-
|
27 |
-
csv.field_size_limit(sys.maxsize)
|
28 |
-
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
|
29 |
-
|
30 |
-
|
31 |
-
def detection_features_converter(dataroot, ver, detector, feature_length, num_fixed_boxes):
|
32 |
-
infile = os.path.join(dataroot, ver, "trainval_%s_%i.tsv"%(detector, num_fixed_boxes))
|
33 |
-
|
34 |
-
train_data_file = os.path.join(dataroot, ver, 'train_%s_%i.hdf5'%(detector, num_fixed_boxes))
|
35 |
-
val_data_file = os.path.join(dataroot, ver, 'val_%s_%i.hdf5'%(detector, num_fixed_boxes))
|
36 |
-
train_indices_file = os.path.join(dataroot, ver, 'train_%s_%i_imgid2idx.pkl'%(detector, num_fixed_boxes))
|
37 |
-
val_indices_file = os.path.join(dataroot, ver, 'val_%s_%i_imgid2idx.pkl'%(detector, num_fixed_boxes))
|
38 |
-
train_ids_file = os.path.join(dataroot, 'train_ids.pkl')
|
39 |
-
val_ids_file = os.path.join(dataroot, 'val_ids.pkl')
|
40 |
-
|
41 |
-
h_train = h5py.File(train_data_file, "w")
|
42 |
-
h_val = h5py.File(val_data_file, "w")
|
43 |
-
|
44 |
-
if os.path.exists(train_ids_file) and os.path.exists(val_ids_file):
|
45 |
-
train_imgids = cPickle.load(open(train_ids_file, 'rb'))
|
46 |
-
val_imgids = cPickle.load(open(val_ids_file, 'rb'))
|
47 |
-
else:
|
48 |
-
train_imgids = utils.load_imageid(os.path.join(dataroot, 'clean', 'train2014'))
|
49 |
-
val_imgids = utils.load_imageid(os.path.join(dataroot, 'clean', 'val2014'))
|
50 |
-
cPickle.dump(train_imgids, open(train_ids_file, 'wb'))
|
51 |
-
cPickle.dump(val_imgids, open(val_ids_file, 'wb'))
|
52 |
-
|
53 |
-
train_indices = {}
|
54 |
-
val_indices = {}
|
55 |
-
|
56 |
-
train_img_features = h_train.create_dataset(
|
57 |
-
'image_features', (len(train_imgids), num_fixed_boxes, feature_length), 'f')
|
58 |
-
train_img_bb = h_train.create_dataset(
|
59 |
-
'image_bb', (len(train_imgids), num_fixed_boxes, 4), 'f')
|
60 |
-
train_spatial_img_features = h_train.create_dataset(
|
61 |
-
'spatial_features', (len(train_imgids), num_fixed_boxes, 6), 'f')
|
62 |
-
|
63 |
-
val_img_bb = h_val.create_dataset(
|
64 |
-
'image_bb', (len(val_imgids), num_fixed_boxes, 4), 'f')
|
65 |
-
val_img_features = h_val.create_dataset(
|
66 |
-
'image_features', (len(val_imgids), num_fixed_boxes, feature_length), 'f')
|
67 |
-
val_spatial_img_features = h_val.create_dataset(
|
68 |
-
'spatial_features', (len(val_imgids), num_fixed_boxes, 6), 'f')
|
69 |
-
|
70 |
-
train_counter = 0
|
71 |
-
val_counter = 0
|
72 |
-
|
73 |
-
print("reading tsv...")
|
74 |
-
# with open(infile, "r+b") as tsv_in_file:
|
75 |
-
with open(infile, "r") as tsv_in_file:
|
76 |
-
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
|
77 |
-
for item in tqdm.tqdm(reader):
|
78 |
-
item['num_boxes'] = int(item['num_boxes'])
|
79 |
-
image_id = int(item['image_id'])
|
80 |
-
image_w = float(item['image_w'])
|
81 |
-
image_h = float(item['image_h'])
|
82 |
-
# bboxes = np.frombuffer(
|
83 |
-
# base64.decodestring(item['boxes']),
|
84 |
-
# dtype=np.float32).reshape((item['num_boxes'], -1))
|
85 |
-
bboxes = np.frombuffer(
|
86 |
-
base64.b64decode(item['boxes']),
|
87 |
-
dtype=np.float32).reshape((item['num_boxes'], -1))
|
88 |
-
box_width = bboxes[:, 2] - bboxes[:, 0]
|
89 |
-
box_height = bboxes[:, 3] - bboxes[:, 1]
|
90 |
-
scaled_width = box_width / image_w
|
91 |
-
scaled_height = box_height / image_h
|
92 |
-
scaled_x = bboxes[:, 0] / image_w
|
93 |
-
scaled_y = bboxes[:, 1] / image_h
|
94 |
-
|
95 |
-
box_width = box_width[..., np.newaxis]
|
96 |
-
box_height = box_height[..., np.newaxis]
|
97 |
-
scaled_width = scaled_width[..., np.newaxis]
|
98 |
-
scaled_height = scaled_height[..., np.newaxis]
|
99 |
-
scaled_x = scaled_x[..., np.newaxis]
|
100 |
-
scaled_y = scaled_y[..., np.newaxis]
|
101 |
-
|
102 |
-
spatial_features = np.concatenate(
|
103 |
-
(scaled_x,
|
104 |
-
scaled_y,
|
105 |
-
scaled_x + scaled_width,
|
106 |
-
scaled_y + scaled_height,
|
107 |
-
scaled_width,
|
108 |
-
scaled_height),
|
109 |
-
axis=1)
|
110 |
-
|
111 |
-
if image_id in train_imgids:
|
112 |
-
train_imgids.remove(image_id)
|
113 |
-
train_indices[image_id] = train_counter
|
114 |
-
train_img_bb[train_counter, :, :] = bboxes
|
115 |
-
# train_img_features[train_counter, :, :] = np.frombuffer(
|
116 |
-
# base64.decodestring(item['features']),
|
117 |
-
# dtype=np.float32).reshape((item['num_boxes'], -1))
|
118 |
-
train_img_features[train_counter, :, :] = np.frombuffer(
|
119 |
-
base64.b64decode(item['features']),
|
120 |
-
dtype=np.float32).reshape((item['num_boxes'], -1))
|
121 |
-
train_spatial_img_features[train_counter, :, :] = spatial_features
|
122 |
-
train_counter += 1
|
123 |
-
elif image_id in val_imgids:
|
124 |
-
val_imgids.remove(image_id)
|
125 |
-
val_indices[image_id] = val_counter
|
126 |
-
val_img_bb[val_counter, :, :] = bboxes
|
127 |
-
# val_img_features[val_counter, :, :] = np.frombuffer(
|
128 |
-
# base64.decodestring(item['features']),
|
129 |
-
# dtype=np.float32).reshape((item['num_boxes'], -1))
|
130 |
-
val_img_features[val_counter, :, :] = np.frombuffer(
|
131 |
-
base64.b64decode(item['features']),
|
132 |
-
dtype=np.float32).reshape((item['num_boxes'], -1))
|
133 |
-
val_spatial_img_features[val_counter, :, :] = spatial_features
|
134 |
-
val_counter += 1
|
135 |
-
else:
|
136 |
-
assert False, 'Unknown image id: %d' % image_id
|
137 |
-
|
138 |
-
if len(train_imgids) != 0:
|
139 |
-
print('Warning: train_image_ids is not empty')
|
140 |
-
|
141 |
-
if len(val_imgids) != 0:
|
142 |
-
print('Warning: val_image_ids is not empty')
|
143 |
-
|
144 |
-
cPickle.dump(train_indices, open(train_indices_file, 'wb'))
|
145 |
-
cPickle.dump(val_indices, open(val_indices_file, 'wb'))
|
146 |
-
# pickle.dump(train_indices, open(train_indices_file, 'w'))
|
147 |
-
# pickle.dump(val_indices, open(val_indices_file, 'w'))
|
148 |
-
h_train.close()
|
149 |
-
h_val.close()
|
150 |
-
print("done!")
|
151 |
-
|
152 |
-
|
153 |
-
if __name__ == '__main__':
|
154 |
-
parser = argparse.ArgumentParser()
|
155 |
-
parser.add_argument('--dataroot', type=str, default='../data/')
|
156 |
-
parser.add_argument('--ver', type=str, default='clean', help='version of the VQAv2 dataset to process. "clean" for the original data. default: clean')
|
157 |
-
parser.add_argument('--detector', type=str, default='R-50')
|
158 |
-
parser.add_argument('--feat', type=int, default=1024, help='feature size')
|
159 |
-
parser.add_argument('--nb', type=int, default=36)
|
160 |
-
args = parser.parse_args()
|
161 |
-
detection_features_converter(args.dataroot, args.ver, args.detector, args.feat, args.nb)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/count.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits count
|
22 |
-
#include <thrust/system/cpp/detail/count.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|