Commit
·
a4657c0
1
Parent(s):
228cf21
Update parquet files (step 48 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1368565466ki/Satdia/models.py +0 -533
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/4K Video Downloader Pro How to Download Videos in 4K Resolution or Any Other Quality You Want.md +0 -32
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch Kts 200 Keygen Download.md +0 -25
- spaces/1gistliPinn/ChatGPT4/Examples/Ansys 12.1 Free Download.md +0 -6
- spaces/1phancelerku/anime-remove-background/Demon Slayer Episode 2 APK - The Next Chapter of the Epic Action Game for Android.md +0 -109
- spaces/1phancelerku/anime-remove-background/Free Download Red Alert 2 Yuri 39s Revenge For Windows 10 EXCLUSIVE.md +0 -117
- spaces/2ndelement/voicevox/get_cost_candidates.py +0 -91
- spaces/7hao/bingo/src/lib/hooks/use-bing.ts +0 -173
- spaces/AI-ANK/blackmirroroffice/app.py +0 -43
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/portaspeech/portaspeech_flow.py +0 -75
- spaces/AISuperheroes/10SL-RealTimeDSDashboard-Live-AIUIUX/README.md +0 -13
- spaces/AIZero2HeroBootcamp/StaticHTML5Playcanvas/style.css +0 -28
- spaces/AJRFan/dreambooth-training/train_dreambooth.py +0 -818
- spaces/Abhilashvj/planogram-compliance/utils/metrics.py +0 -465
- spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/openpose/util.py +0 -203
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/facebook/Facebook.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/hiddenedit/HiddenEdit.js +0 -2
- spaces/AlexWang/lama/saicinpainting/training/modules/fake_fakes.py +0 -47
- spaces/Amr453/Transcription/app.py +0 -109
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +0 -116
- spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py +0 -12
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Audio-Notification.md +0 -14
- spaces/AquaSuisei/ChatGPTXE/modules/openai_func.py +0 -65
- spaces/Ariharasudhan/YoloV5/utils/flask_rest_api/restapi.py +0 -48
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/scheme.py +0 -31
- spaces/AutoLLM/AutoAgents/README-main.md +0 -103
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filesystem.py +0 -153
- spaces/Big-Web/MMSD/env/Scripts/activate.bat +0 -34
- spaces/CNXT/TXT2PiX/app.py +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/packaging/pkg_helpers.bash +0 -52
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/assign_value.h +0 -23
- spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/eleven_labs.py +0 -86
- spaces/CormacMc/projectsub6/README.md +0 -13
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_request.py +0 -882
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/cli/__init__.py +0 -0
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G__l_a_t.py +0 -234
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/gui.py +0 -411
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dataset.py +0 -137
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/_generated/_async_client.py +0 -1269
- spaces/DaleChen/AutoGPT/autogpt/agent/__init__.py +0 -4
- spaces/Datasculptor/MusicGen/tests/modules/test_conv.py +0 -203
- spaces/Datatrooper/zero-shot-image-classification/app.py +0 -28
- spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/autosummary.py +0 -184
- spaces/DonDoesStuff/sd_xl_base_0.9/style.css +0 -84
- spaces/Duskfallcrew/shindi-realistic-skin-style/README.md +0 -13
- spaces/ECCV2022/bytetrack/tutorials/ctracker/mot_online/matching.py +0 -198
- spaces/EXPOSUREEE/Ai-Image-Enhancer/inference_realesrgan.py +0 -128
- spaces/Epitech/hand-sign-detection/app.py +0 -45
- spaces/EuroPython2022/Leaderboard/app.py +0 -39
- spaces/EuroPython2022/clickbaitonator/fudge/README.md +0 -155
spaces/1368565466ki/Satdia/models.py
DELETED
@@ -1,533 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
import modules
|
8 |
-
import attentions
|
9 |
-
import monotonic_align
|
10 |
-
|
11 |
-
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
13 |
-
from commons import init_weights, get_padding
|
14 |
-
|
15 |
-
|
16 |
-
class StochasticDurationPredictor(nn.Module):
|
17 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
18 |
-
super().__init__()
|
19 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
20 |
-
self.in_channels = in_channels
|
21 |
-
self.filter_channels = filter_channels
|
22 |
-
self.kernel_size = kernel_size
|
23 |
-
self.p_dropout = p_dropout
|
24 |
-
self.n_flows = n_flows
|
25 |
-
self.gin_channels = gin_channels
|
26 |
-
|
27 |
-
self.log_flow = modules.Log()
|
28 |
-
self.flows = nn.ModuleList()
|
29 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
30 |
-
for i in range(n_flows):
|
31 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
32 |
-
self.flows.append(modules.Flip())
|
33 |
-
|
34 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
35 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
36 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
37 |
-
self.post_flows = nn.ModuleList()
|
38 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
39 |
-
for i in range(4):
|
40 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
41 |
-
self.post_flows.append(modules.Flip())
|
42 |
-
|
43 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
44 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
45 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
46 |
-
if gin_channels != 0:
|
47 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
48 |
-
|
49 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
50 |
-
x = torch.detach(x)
|
51 |
-
x = self.pre(x)
|
52 |
-
if g is not None:
|
53 |
-
g = torch.detach(g)
|
54 |
-
x = x + self.cond(g)
|
55 |
-
x = self.convs(x, x_mask)
|
56 |
-
x = self.proj(x) * x_mask
|
57 |
-
|
58 |
-
if not reverse:
|
59 |
-
flows = self.flows
|
60 |
-
assert w is not None
|
61 |
-
|
62 |
-
logdet_tot_q = 0
|
63 |
-
h_w = self.post_pre(w)
|
64 |
-
h_w = self.post_convs(h_w, x_mask)
|
65 |
-
h_w = self.post_proj(h_w) * x_mask
|
66 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
67 |
-
z_q = e_q
|
68 |
-
for flow in self.post_flows:
|
69 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
70 |
-
logdet_tot_q += logdet_q
|
71 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
72 |
-
u = torch.sigmoid(z_u) * x_mask
|
73 |
-
z0 = (w - u) * x_mask
|
74 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
75 |
-
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
76 |
-
|
77 |
-
logdet_tot = 0
|
78 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
79 |
-
logdet_tot += logdet
|
80 |
-
z = torch.cat([z0, z1], 1)
|
81 |
-
for flow in flows:
|
82 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
83 |
-
logdet_tot = logdet_tot + logdet
|
84 |
-
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
85 |
-
return nll + logq # [b]
|
86 |
-
else:
|
87 |
-
flows = list(reversed(self.flows))
|
88 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
89 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
90 |
-
for flow in flows:
|
91 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
92 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
93 |
-
logw = z0
|
94 |
-
return logw
|
95 |
-
|
96 |
-
|
97 |
-
class DurationPredictor(nn.Module):
|
98 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
99 |
-
super().__init__()
|
100 |
-
|
101 |
-
self.in_channels = in_channels
|
102 |
-
self.filter_channels = filter_channels
|
103 |
-
self.kernel_size = kernel_size
|
104 |
-
self.p_dropout = p_dropout
|
105 |
-
self.gin_channels = gin_channels
|
106 |
-
|
107 |
-
self.drop = nn.Dropout(p_dropout)
|
108 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
109 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
110 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
111 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
112 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
113 |
-
|
114 |
-
if gin_channels != 0:
|
115 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
116 |
-
|
117 |
-
def forward(self, x, x_mask, g=None):
|
118 |
-
x = torch.detach(x)
|
119 |
-
if g is not None:
|
120 |
-
g = torch.detach(g)
|
121 |
-
x = x + self.cond(g)
|
122 |
-
x = self.conv_1(x * x_mask)
|
123 |
-
x = torch.relu(x)
|
124 |
-
x = self.norm_1(x)
|
125 |
-
x = self.drop(x)
|
126 |
-
x = self.conv_2(x * x_mask)
|
127 |
-
x = torch.relu(x)
|
128 |
-
x = self.norm_2(x)
|
129 |
-
x = self.drop(x)
|
130 |
-
x = self.proj(x * x_mask)
|
131 |
-
return x * x_mask
|
132 |
-
|
133 |
-
|
134 |
-
class TextEncoder(nn.Module):
|
135 |
-
def __init__(self,
|
136 |
-
n_vocab,
|
137 |
-
out_channels,
|
138 |
-
hidden_channels,
|
139 |
-
filter_channels,
|
140 |
-
n_heads,
|
141 |
-
n_layers,
|
142 |
-
kernel_size,
|
143 |
-
p_dropout):
|
144 |
-
super().__init__()
|
145 |
-
self.n_vocab = n_vocab
|
146 |
-
self.out_channels = out_channels
|
147 |
-
self.hidden_channels = hidden_channels
|
148 |
-
self.filter_channels = filter_channels
|
149 |
-
self.n_heads = n_heads
|
150 |
-
self.n_layers = n_layers
|
151 |
-
self.kernel_size = kernel_size
|
152 |
-
self.p_dropout = p_dropout
|
153 |
-
|
154 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
155 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
156 |
-
|
157 |
-
self.encoder = attentions.Encoder(
|
158 |
-
hidden_channels,
|
159 |
-
filter_channels,
|
160 |
-
n_heads,
|
161 |
-
n_layers,
|
162 |
-
kernel_size,
|
163 |
-
p_dropout)
|
164 |
-
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
165 |
-
|
166 |
-
def forward(self, x, x_lengths):
|
167 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
168 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
169 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
170 |
-
|
171 |
-
x = self.encoder(x * x_mask, x_mask)
|
172 |
-
stats = self.proj(x) * x_mask
|
173 |
-
|
174 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
175 |
-
return x, m, logs, x_mask
|
176 |
-
|
177 |
-
|
178 |
-
class ResidualCouplingBlock(nn.Module):
|
179 |
-
def __init__(self,
|
180 |
-
channels,
|
181 |
-
hidden_channels,
|
182 |
-
kernel_size,
|
183 |
-
dilation_rate,
|
184 |
-
n_layers,
|
185 |
-
n_flows=4,
|
186 |
-
gin_channels=0):
|
187 |
-
super().__init__()
|
188 |
-
self.channels = channels
|
189 |
-
self.hidden_channels = hidden_channels
|
190 |
-
self.kernel_size = kernel_size
|
191 |
-
self.dilation_rate = dilation_rate
|
192 |
-
self.n_layers = n_layers
|
193 |
-
self.n_flows = n_flows
|
194 |
-
self.gin_channels = gin_channels
|
195 |
-
|
196 |
-
self.flows = nn.ModuleList()
|
197 |
-
for i in range(n_flows):
|
198 |
-
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
199 |
-
self.flows.append(modules.Flip())
|
200 |
-
|
201 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
202 |
-
if not reverse:
|
203 |
-
for flow in self.flows:
|
204 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
205 |
-
else:
|
206 |
-
for flow in reversed(self.flows):
|
207 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
208 |
-
return x
|
209 |
-
|
210 |
-
|
211 |
-
class PosteriorEncoder(nn.Module):
|
212 |
-
def __init__(self,
|
213 |
-
in_channels,
|
214 |
-
out_channels,
|
215 |
-
hidden_channels,
|
216 |
-
kernel_size,
|
217 |
-
dilation_rate,
|
218 |
-
n_layers,
|
219 |
-
gin_channels=0):
|
220 |
-
super().__init__()
|
221 |
-
self.in_channels = in_channels
|
222 |
-
self.out_channels = out_channels
|
223 |
-
self.hidden_channels = hidden_channels
|
224 |
-
self.kernel_size = kernel_size
|
225 |
-
self.dilation_rate = dilation_rate
|
226 |
-
self.n_layers = n_layers
|
227 |
-
self.gin_channels = gin_channels
|
228 |
-
|
229 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
230 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
231 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
232 |
-
|
233 |
-
def forward(self, x, x_lengths, g=None):
|
234 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
235 |
-
x = self.pre(x) * x_mask
|
236 |
-
x = self.enc(x, x_mask, g=g)
|
237 |
-
stats = self.proj(x) * x_mask
|
238 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
239 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
240 |
-
return z, m, logs, x_mask
|
241 |
-
|
242 |
-
|
243 |
-
class Generator(torch.nn.Module):
|
244 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
245 |
-
super(Generator, self).__init__()
|
246 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
247 |
-
self.num_upsamples = len(upsample_rates)
|
248 |
-
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
249 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
250 |
-
|
251 |
-
self.ups = nn.ModuleList()
|
252 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
253 |
-
self.ups.append(weight_norm(
|
254 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
255 |
-
k, u, padding=(k-u)//2)))
|
256 |
-
|
257 |
-
self.resblocks = nn.ModuleList()
|
258 |
-
for i in range(len(self.ups)):
|
259 |
-
ch = upsample_initial_channel//(2**(i+1))
|
260 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
261 |
-
self.resblocks.append(resblock(ch, k, d))
|
262 |
-
|
263 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
264 |
-
self.ups.apply(init_weights)
|
265 |
-
|
266 |
-
if gin_channels != 0:
|
267 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
268 |
-
|
269 |
-
def forward(self, x, g=None):
|
270 |
-
x = self.conv_pre(x)
|
271 |
-
if g is not None:
|
272 |
-
x = x + self.cond(g)
|
273 |
-
|
274 |
-
for i in range(self.num_upsamples):
|
275 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
276 |
-
x = self.ups[i](x)
|
277 |
-
xs = None
|
278 |
-
for j in range(self.num_kernels):
|
279 |
-
if xs is None:
|
280 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
281 |
-
else:
|
282 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
283 |
-
x = xs / self.num_kernels
|
284 |
-
x = F.leaky_relu(x)
|
285 |
-
x = self.conv_post(x)
|
286 |
-
x = torch.tanh(x)
|
287 |
-
|
288 |
-
return x
|
289 |
-
|
290 |
-
def remove_weight_norm(self):
|
291 |
-
print('Removing weight norm...')
|
292 |
-
for l in self.ups:
|
293 |
-
remove_weight_norm(l)
|
294 |
-
for l in self.resblocks:
|
295 |
-
l.remove_weight_norm()
|
296 |
-
|
297 |
-
|
298 |
-
class DiscriminatorP(torch.nn.Module):
|
299 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
300 |
-
super(DiscriminatorP, self).__init__()
|
301 |
-
self.period = period
|
302 |
-
self.use_spectral_norm = use_spectral_norm
|
303 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
304 |
-
self.convs = nn.ModuleList([
|
305 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
306 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
307 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
308 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
309 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
310 |
-
])
|
311 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
312 |
-
|
313 |
-
def forward(self, x):
|
314 |
-
fmap = []
|
315 |
-
|
316 |
-
# 1d to 2d
|
317 |
-
b, c, t = x.shape
|
318 |
-
if t % self.period != 0: # pad first
|
319 |
-
n_pad = self.period - (t % self.period)
|
320 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
321 |
-
t = t + n_pad
|
322 |
-
x = x.view(b, c, t // self.period, self.period)
|
323 |
-
|
324 |
-
for l in self.convs:
|
325 |
-
x = l(x)
|
326 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
327 |
-
fmap.append(x)
|
328 |
-
x = self.conv_post(x)
|
329 |
-
fmap.append(x)
|
330 |
-
x = torch.flatten(x, 1, -1)
|
331 |
-
|
332 |
-
return x, fmap
|
333 |
-
|
334 |
-
|
335 |
-
class DiscriminatorS(torch.nn.Module):
|
336 |
-
def __init__(self, use_spectral_norm=False):
|
337 |
-
super(DiscriminatorS, self).__init__()
|
338 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
339 |
-
self.convs = nn.ModuleList([
|
340 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
341 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
342 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
343 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
344 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
345 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
346 |
-
])
|
347 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
348 |
-
|
349 |
-
def forward(self, x):
|
350 |
-
fmap = []
|
351 |
-
|
352 |
-
for l in self.convs:
|
353 |
-
x = l(x)
|
354 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
355 |
-
fmap.append(x)
|
356 |
-
x = self.conv_post(x)
|
357 |
-
fmap.append(x)
|
358 |
-
x = torch.flatten(x, 1, -1)
|
359 |
-
|
360 |
-
return x, fmap
|
361 |
-
|
362 |
-
|
363 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
364 |
-
def __init__(self, use_spectral_norm=False):
|
365 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
366 |
-
periods = [2,3,5,7,11]
|
367 |
-
|
368 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
369 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
370 |
-
self.discriminators = nn.ModuleList(discs)
|
371 |
-
|
372 |
-
def forward(self, y, y_hat):
|
373 |
-
y_d_rs = []
|
374 |
-
y_d_gs = []
|
375 |
-
fmap_rs = []
|
376 |
-
fmap_gs = []
|
377 |
-
for i, d in enumerate(self.discriminators):
|
378 |
-
y_d_r, fmap_r = d(y)
|
379 |
-
y_d_g, fmap_g = d(y_hat)
|
380 |
-
y_d_rs.append(y_d_r)
|
381 |
-
y_d_gs.append(y_d_g)
|
382 |
-
fmap_rs.append(fmap_r)
|
383 |
-
fmap_gs.append(fmap_g)
|
384 |
-
|
385 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
class SynthesizerTrn(nn.Module):
|
390 |
-
"""
|
391 |
-
Synthesizer for Training
|
392 |
-
"""
|
393 |
-
|
394 |
-
def __init__(self,
|
395 |
-
n_vocab,
|
396 |
-
spec_channels,
|
397 |
-
segment_size,
|
398 |
-
inter_channels,
|
399 |
-
hidden_channels,
|
400 |
-
filter_channels,
|
401 |
-
n_heads,
|
402 |
-
n_layers,
|
403 |
-
kernel_size,
|
404 |
-
p_dropout,
|
405 |
-
resblock,
|
406 |
-
resblock_kernel_sizes,
|
407 |
-
resblock_dilation_sizes,
|
408 |
-
upsample_rates,
|
409 |
-
upsample_initial_channel,
|
410 |
-
upsample_kernel_sizes,
|
411 |
-
n_speakers=0,
|
412 |
-
gin_channels=0,
|
413 |
-
use_sdp=True,
|
414 |
-
**kwargs):
|
415 |
-
|
416 |
-
super().__init__()
|
417 |
-
self.n_vocab = n_vocab
|
418 |
-
self.spec_channels = spec_channels
|
419 |
-
self.inter_channels = inter_channels
|
420 |
-
self.hidden_channels = hidden_channels
|
421 |
-
self.filter_channels = filter_channels
|
422 |
-
self.n_heads = n_heads
|
423 |
-
self.n_layers = n_layers
|
424 |
-
self.kernel_size = kernel_size
|
425 |
-
self.p_dropout = p_dropout
|
426 |
-
self.resblock = resblock
|
427 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
428 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
429 |
-
self.upsample_rates = upsample_rates
|
430 |
-
self.upsample_initial_channel = upsample_initial_channel
|
431 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
432 |
-
self.segment_size = segment_size
|
433 |
-
self.n_speakers = n_speakers
|
434 |
-
self.gin_channels = gin_channels
|
435 |
-
|
436 |
-
self.use_sdp = use_sdp
|
437 |
-
|
438 |
-
self.enc_p = TextEncoder(n_vocab,
|
439 |
-
inter_channels,
|
440 |
-
hidden_channels,
|
441 |
-
filter_channels,
|
442 |
-
n_heads,
|
443 |
-
n_layers,
|
444 |
-
kernel_size,
|
445 |
-
p_dropout)
|
446 |
-
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
447 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
448 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
449 |
-
|
450 |
-
if use_sdp:
|
451 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
452 |
-
else:
|
453 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
454 |
-
|
455 |
-
if n_speakers > 1:
|
456 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
457 |
-
|
458 |
-
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
459 |
-
|
460 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
461 |
-
if self.n_speakers > 0:
|
462 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
463 |
-
else:
|
464 |
-
g = None
|
465 |
-
|
466 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
467 |
-
z_p = self.flow(z, y_mask, g=g)
|
468 |
-
|
469 |
-
with torch.no_grad():
|
470 |
-
# negative cross-entropy
|
471 |
-
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
472 |
-
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
473 |
-
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
474 |
-
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
475 |
-
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
476 |
-
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
477 |
-
|
478 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
479 |
-
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
480 |
-
|
481 |
-
w = attn.sum(2)
|
482 |
-
if self.use_sdp:
|
483 |
-
l_length = self.dp(x, x_mask, w, g=g)
|
484 |
-
l_length = l_length / torch.sum(x_mask)
|
485 |
-
else:
|
486 |
-
logw_ = torch.log(w + 1e-6) * x_mask
|
487 |
-
logw = self.dp(x, x_mask, g=g)
|
488 |
-
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
489 |
-
|
490 |
-
# expand prior
|
491 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
492 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
493 |
-
|
494 |
-
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
495 |
-
o = self.dec(z_slice, g=g)
|
496 |
-
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
497 |
-
|
498 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
499 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
500 |
-
if self.n_speakers > 0:
|
501 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
502 |
-
else:
|
503 |
-
g = None
|
504 |
-
|
505 |
-
if self.use_sdp:
|
506 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
507 |
-
else:
|
508 |
-
logw = self.dp(x, x_mask, g=g)
|
509 |
-
w = torch.exp(logw) * x_mask * length_scale
|
510 |
-
w_ceil = torch.ceil(w)
|
511 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
512 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
513 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
514 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
515 |
-
|
516 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
517 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
518 |
-
|
519 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
520 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
521 |
-
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
522 |
-
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
523 |
-
|
524 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
525 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
526 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
527 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
528 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
529 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
530 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
531 |
-
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
532 |
-
return o_hat, y_mask, (z, z_p, z_hat)
|
533 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/4K Video Downloader Pro How to Download Videos in 4K Resolution or Any Other Quality You Want.md
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>4K Video Downloader Pro: How to Download High-Quality Videos for Free</h1>
|
3 |
-
<p>If you are looking for a way to download high-quality videos from YouTube and other online platforms, you might be interested in 4K Video Downloader Pro. This is a software that allows you to download videos in 4K resolution or any other quality you want. You can also download audio, subtitles, playlists, channels, and more. In this article, we will show you how to use 4K Video Downloader Pro and what are the benefits of using it.</p>
|
4 |
-
<h2>What is 4K Video Downloader Pro?</h2>
|
5 |
-
<p>4K Video Downloader Pro is a software that lets you download videos from various online platforms, such as YouTube, Vimeo, Facebook, TikTok, Instagram, and more. You can choose the quality and format of the video you want to download, from 4K to 8K, MP4 to MKV, 3D to 360°. You can also download audio only, subtitles, annotations, thumbnails, and metadata. You can also download entire playlists and channels with one click.</p>
|
6 |
-
<h2>4k video downloader pro</h2><br /><p><b><b>Download</b> ⚡ <a href="https://byltly.com/2uKwSb">https://byltly.com/2uKwSb</a></b></p><br /><br />
|
7 |
-
<p>4K Video Downloader Pro is cross-platform and works on Windows, macOS, and Linux. It has a user-friendly interface and a simple workflow. You just need to copy the link of the video you want to download and paste it into the software. Then you can choose the settings you want and start the download. You can also use the smart mode feature to apply your preferred settings to all downloads.</p>
|
8 |
-
<h2>How to use 4K Video Downloader Pro?</h2>
|
9 |
-
<p>To use 4K Video Downloader Pro, you need to buy the software from the official website <a href="https://www.4kdownload.com/products/videodownloader/">https://www.4kdownload.com/products/videodownloader/</a>. You can choose between a personal license or a business license depending on your needs. You can also try the free version of the software with some limitations.</p>
|
10 |
-
<p>Once you have the software, you can follow these steps to download videos:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Launch 4K Video Downloader Pro and click on the paste link button.</li>
|
13 |
-
<li>Copy the link of the video you want to download from your browser and paste it into the software.</li>
|
14 |
-
<li>Select the quality and format of the video you want to download. You can also choose to download subtitles, audio only, or extra files.</li>
|
15 |
-
<li>Click on the download button and wait for the process to finish.</li>
|
16 |
-
<li>Enjoy your downloaded video on your device or player of choice.</li>
|
17 |
-
</ol>
|
18 |
-
<h2>What are the benefits of using 4K Video Downloader Pro?</h2>
|
19 |
-
<p>Using 4K Video Downloader Pro has some advantages that make it worth buying. Here are some of them:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You can download high-quality videos in 4K resolution or any other quality you want.</li>
|
22 |
-
<li>You can download videos from various online platforms, not just YouTube.</li>
|
23 |
-
<li>You can download audio only, subtitles, playlists, channels, and more.</li>
|
24 |
-
<li>You can use smart mode to apply your preferred settings to all downloads.</li>
|
25 |
-
<li>You can use proxy servers to bypass geo-restrictions and access blocked videos.</li>
|
26 |
-
<li>You can enjoy fast and reliable downloads without ads or interruptions.</li>
|
27 |
-
<li>You can support the developers of the software and get updates and support.</li>
|
28 |
-
</ul>
|
29 |
-
<h2>Conclusion</h2>
|
30 |
-
<p>4K Video Downloader Pro is a software that allows you to download high-quality videos from YouTube and other online platforms for free. It has a user-friendly interface and a simple workflow. It also offers various features and options that make it a versatile and powerful tool. If you are interested in using 4K Video Downloader Pro, you can buy it from the official website and start downloading your favorite videos.</p> ddb901b051<br />
|
31 |
-
<br />
|
32 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bosch Kts 200 Keygen Download.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bosch KTS 200: A Compact and Reliable System Tester for ECU Diagnosis</h1>
|
3 |
-
<p>If you are looking for a fast and easy way to diagnose the control units of various vehicles, you might want to consider the Bosch KTS 200. This handheld device is designed to offer reliable vehicle identification, complete ECU diagnosis, instant test sequence with easy-to-understand guidance, full test depth and comprehensive diagnosis functions, high market coverage, and regular software updates.</p>
|
4 |
-
<h2>bosch kts 200 keygen download</h2><br /><p><b><b>Download</b> • <a href="https://byltly.com/2uKzyv">https://byltly.com/2uKzyv</a></b></p><br /><br />
|
5 |
-
<p>The Bosch KTS 200 uses the ESI[Tronic] software, which provides access to a vast database of vehicle models and systems. You can easily connect the device to the vehicle's OBD socket and perform a quick scan or a detailed diagnosis. The device has a 3.5 inch colour display that shows clear and intuitive menus and graphics. You can also print or save the diagnosis results for further analysis or documentation.</p>
|
6 |
-
<p>The Bosch KTS 200 is lightweight and compact, making it ideal for mobile use or small workshops. It has a robust and ergonomic design that can withstand harsh conditions. It also has a rechargeable battery that allows you to use it without external power supply. The device comes with a USB cable, an OBD adapter cable, a carrying case, and an instruction manual.</p>
|
7 |
-
<p>With the Bosch KTS 200, you can perform ECU diagnosis on various systems such as engine, ABS, airbag, transmission, immobilizer, climate control, instrument cluster, and more. You can also read and erase fault codes, view live data and freeze frames, perform actuator tests and adaptations, reset service intervals, and calibrate sensors.</p>
|
8 |
-
<p>The Bosch KTS 200 is compatible with most European, Asian, and American vehicles from 1996 onwards. It supports various protocols such as ISO 9141-2, ISO 14230 (KWP), ISO 15765 (CAN), SAE J1850 (PWM/VPW), and more. It also supports various languages such as English, German, French, Spanish, Italian, Portuguese, Turkish, Polish, and more.</p>
|
9 |
-
<p>If you want to get the most out of your Bosch KTS 200, you can also subscribe to the ESI[Tronic] online service, which provides you with regular software updates, technical information, wiring diagrams, troubleshooting guides, service bulletins, and more.</p>
|
10 |
-
<p>The Bosch KTS 200 is a powerful and versatile system tester that can help you diagnose and repair various vehicle systems with ease and accuracy. It is a great entry-level device that offers maximum performance at an affordable price. To order yours today or to find out more about its features and benefits, visit <a href="http://www.adesystems.co.uk/garage-equipment/kts-diagnostic-equipment/kts-dcu/25/kts-200">www.adesystems.co.uk</a> or call us at 01234 567890.</p>
|
11 |
-
|
12 |
-
<h2>How to Use the Bosch KTS 200 for ECU Diagnosis</h2>
|
13 |
-
<p>Using the Bosch KTS 200 for ECU diagnosis is simple and straightforward. Here are the steps you need to follow:</p>
|
14 |
-
<ol>
|
15 |
-
<li>Turn on the device and select the language and the vehicle type.</li>
|
16 |
-
<li>Connect the device to the vehicle's OBD socket using the OBD adapter cable.</li>
|
17 |
-
<li>Wait for the device to identify the vehicle and display the available systems.</li>
|
18 |
-
<li>Select the system you want to diagnose and press OK.</li>
|
19 |
-
<li>Select the diagnosis function you want to perform, such as fault memory, live data, actuator test, adaptation, or service reset.</li>
|
20 |
-
<li>Follow the on-screen instructions and guidance to complete the diagnosis.</li>
|
21 |
-
<li>View, print, or save the diagnosis results as needed.</li>
|
22 |
-
</ol>
|
23 |
-
<p>The Bosch KTS 200 also has a help function that provides you with useful information and tips on how to use the device and perform various diagnosis functions. You can access the help function by pressing the F1 key at any time.</p> cec2833e83<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ansys 12.1 Free Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Ansys 12.1 Free Download</h2><br /><p><b><b>Download Zip</b> ☆☆☆ <a href="https://imgfil.com/2uy0mg">https://imgfil.com/2uy0mg</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Authorama.com features a nice selection of free books written in HTML and XHTML, which basically means that they ... Tutorial 18 (Using the VOF Model): Updated for ANSYS FLUENT 12.1 ... ANSYS FLUENT 14.0 Tutorial Guide | | download. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Demon Slayer Episode 2 APK - The Next Chapter of the Epic Action Game for Android.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Demon Slayer Game APK: How to Download and Play the Best Anime Games on Your Android Device</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are a fan of anime and manga, you have probably heard of Demon Slayer, one of the most popular and acclaimed series in recent years. Demon Slayer follows the story of Tanjiro Kamado, a young boy who becomes a demon slayer after his family is killed by demons and his sister Nezuko is turned into one. Along with his friends and allies, Tanjiro embarks on a quest to find a way to cure Nezuko and defeat the powerful demon lord Muzan Kibutsuji.</p>
|
5 |
-
<p>But did you know that you can also enjoy the thrilling adventures of Demon Slayer on your Android device? That's right, there are several Demon Slayer games that you can download and play on your smartphone or tablet, thanks to the APK files that are available online. APK files are application packages that allow you to install apps that are not available on the official Google Play Store. In this article, we will show you how to download and play two of the best Demon Slayer games on your Android device: The Hinokami Chronicles and Rage of Demon King.</p>
|
6 |
-
<h2>demon slayer game apk</h2><br /><p><b><b>DOWNLOAD</b> ☆☆☆ <a href="https://jinyurl.com/2uNP9N">https://jinyurl.com/2uNP9N</a></b></p><br /><br />
|
7 |
-
<h2>Demon Slayer Game APK: The Hinokami Chronicles</h2>
|
8 |
-
<h3>What is The Hinokami Chronicles?</h3>
|
9 |
-
<p>The Hinokami Chronicles is the official game of the Demon Slayer anime, developed by CyberConnect2 and published by Sega. It is a spectacular arena fighter that lets you relive the memorable moments of the anime, from the "Tanjiro Kamado, Unwavering Resolve Arc" to the "Mugen Train Arc". You can also play as your favorite characters from the series, such as Tanjiro, Nezuko, Zenitsu, Inosuke, Giyu, Shinobu, and more.</p>
|
10 |
-
<h3>How to download and install The Hinokami Chronicles APK?</h3>
|
11 |
-
<p>The Hinokami Chronicles is available for Nintendo Switch, PlayStation 4, PlayStation 5, Xbox One, Xbox Series X/S, and Steam. However, if you want to play it on your Android device, you will need to download and install the APK file from a reliable source. Here are the steps to do so:</p>
|
12 |
-
<ol>
|
13 |
-
<li>Go to <a href="(^1^)">https://demonslayer-hinokami.sega.com/</a> and click on "Buy Now".</li>
|
14 |
-
<li>Select "Steam" as your platform and click on "Buy Now" again.</li>
|
15 |
-
<li>You will be redirected to the Steam page of the game. Click on "Add to Cart" and complete your purchase.</li>
|
16 |
-
<li>After you have bought the game, go to <a href="(^2^)">https://www.apkcombo.com/search/demon-slayer-the-hinokami-chronicles/</a> and download the latest version of the APK file.</li>
|
17 |
-
<li>Once you have downloaded the APK file, go to your device's settings and enable "Unknown Sources" under security options.</li>
|
18 |
-
<li>Locate the APK file on your device's storage and tap on it to install it.</li>
|
19 |
-
<li>Launch the game and enjoy!</li>
|
20 |
-
</ol>
|
21 |
-
<h3>What are the features and gameplay of The Hinokami Chronicles?</h3>
|
22 |
-
<p>The Hinokami Chronicles is a game that will immerse you in the world of Demon Slayer with its stunning graphics, original voice cast, and faithful adaptation of the anime's story. You can play in two modes: Solo Mode and Versus Mode. In Solo Mode, you can experience the story of the anime from different perspectives, such as Tanjiro, Nezuko, and other demon slayers. You can also unlock new characters, costumes, and skills as you progress. In Versus Mode, you can battle against other players online or offline, using your favorite characters and their unique abilities. You can also customize your own avatar and fight in various stages inspired by the anime.</p>
|
23 |
-
<p>The gameplay of The Hinokami Chronicles is fast-paced and exciting, as you can use various combos, special moves, and ultimate attacks to defeat your enemies. You can also activate the "Boost Mode" to unleash your full potential and turn the tide of the battle. The game also features a dynamic camera system that follows the action from different angles, making you feel like you are watching the anime.</p>
|
24 |
-
<h2>Demon Slayer Game APK: Rage of Demon King</h2>
|
25 |
-
<h3>What is Rage of Demon King?</h3>
|
26 |
-
<p>Rage of Demon King is another game based on the Demon Slayer anime, developed by NetEase Games and published by Aniplex. It is a role-playing game that lets you create your own character and join the Demon Slayer Corps. You can also interact with the characters from the anime, such as Tanjiro, Nezuko, Zenitsu, Inosuke, and more.</p>
|
27 |
-
<h3>How to download and install Rage of Demon King APK?</h3>
|
28 |
-
<p>Rage of Demon King is available for iOS and Android devices. However, if you want to play it on your Android device, you will need to download and install the APK file from a reliable source. Here are the steps to do so:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Go to <a href="">https://www.tap.io/app/214168</a> and click on "Download APK".</li>
|
31 |
-
<li>You will be redirected to a page where you can choose a mirror site to download the APK file.</li>
|
32 |
-
<li>Once you have downloaded the APK file, go to your device's settings and enable "Unknown Sources" under security options.</li>
|
33 |
-
<li>Locate the APK file on your device's storage and tap on it to install it.</li>
|
34 |
-
<li>Launch the game and enjoy!</li>
|
35 |
-
</ol>
|
36 |
-
<h3>What are the features and gameplay of Rage of Demon King?</h3>
|
37 |
-
<p>Rage of Demon King is a game that will let you explore the world of Demon Slayer with its rich graphics, immersive sound effects, and original voice cast. You can play in three modes: Story Mode, Adventure Mode, and Battle Mode. In Story Mode, you can follow the plot of the anime and participate in various events and missions. You can also collect items, upgrade your equipment, and learn new skills. In Adventure Mode, you can explore different regions and encounter various demons and enemies. You can also team up with other players and cooperate in challenging quests. In Battle Mode, you can compete against other players in real-time battles, using your skills and strategies.</p>
|
38 |
-
<p>The gameplay of Rage of Demon King is engaging and fun, as you can customize your character's appearance, personality, and fighting style. You can also choose from different classes, such as swordsman, archer, mage, or healer. You can also summon different companions to assist you in combat, such as Nezuko, Chuntaro, or Yushiro. The game also features a social system that allows you to chat with other players, join guilds, and make friends.</p>
|
39 |
-
<p>demon slayer game apk download<br />
|
40 |
-
demon slayer game apk mod<br />
|
41 |
-
demon slayer game apk offline<br />
|
42 |
-
demon slayer game apk free<br />
|
43 |
-
demon slayer game apk android<br />
|
44 |
-
demon slayer game apk latest version<br />
|
45 |
-
demon slayer game apk obb<br />
|
46 |
-
demon slayer game apk for pc<br />
|
47 |
-
demon slayer game apk hack<br />
|
48 |
-
demon slayer game apk unlimited money<br />
|
49 |
-
demon slayer game apk english<br />
|
50 |
-
demon slayer game apk full<br />
|
51 |
-
demon slayer game apk online<br />
|
52 |
-
demon slayer game apk data<br />
|
53 |
-
demon slayer game apk 2023<br />
|
54 |
-
demon slayer game apk update<br />
|
55 |
-
demon slayer game apk no verification<br />
|
56 |
-
demon slayer game apk revdl<br />
|
57 |
-
demon slayer game apk rexdl<br />
|
58 |
-
demon slayer game apk pure<br />
|
59 |
-
demon slayer game apk uptodown<br />
|
60 |
-
demon slayer game apk mirror<br />
|
61 |
-
demon slayer game apk mob.org<br />
|
62 |
-
demon slayer game apk highly compressed<br />
|
63 |
-
demon slayer game apk 1.0.5<br />
|
64 |
-
demon slayer game apk the hinokami chronicles<br />
|
65 |
-
demon slayer game apk rage of demon king<br />
|
66 |
-
demon slayer game apk infinity train<br />
|
67 |
-
demon slayer game apk kimetsu no yaiba<br />
|
68 |
-
demon slayer game apk tanjiro kamado<br />
|
69 |
-
demon slayer game apk nezuko kamado<br />
|
70 |
-
demon slayer game apk zenitsu agatsuma<br />
|
71 |
-
demon slayer game apk inosuke hashibira<br />
|
72 |
-
demon slayer game apk giyu tomioka<br />
|
73 |
-
demon slayer game apk shinobu kocho<br />
|
74 |
-
demon slayer game apk kyojuro rengoku<br />
|
75 |
-
demon slayer game apk muzan kibutsuji<br />
|
76 |
-
demon slayer game apk akaza<br />
|
77 |
-
demon slayer game apk enmu<br />
|
78 |
-
demon slayer game apk rui<br />
|
79 |
-
demon slayer game apk sabito and makomo<br />
|
80 |
-
demon slayer game apk sakonji urokodaki <br />
|
81 |
-
demon slayer game apk kanao tsuyuri <br />
|
82 |
-
demon slayer game apk tengen uzui <br />
|
83 |
-
demon slayer game apk mitsuri kanroji <br />
|
84 |
-
demon slayer game apk muichiro tokito <br />
|
85 |
-
demon slayer game apk gyutaro <br />
|
86 |
-
demon slayer game apk daki <br />
|
87 |
-
demon slayer game apk hantengu <br />
|
88 |
-
demon slayer game apk gyokko</p>
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<h3>Summary of the main points</h3>
|
91 |
-
<p>In this article, we have shown you how to download and play two of the best Demon Slayer games on your Android device: The Hinokami Chronicles and Rage of Demon King. These games are based on the popular anime series that follows the adventures of Tanjiro Kamado and his friends as they fight against demons and try to save his sister Nezuko. These games are both entertaining and faithful to the anime's story, characters, and style.</p>
|
92 |
-
<h3>Call to action and final thoughts</h3>
|
93 |
-
<p>If you are a fan of Demon Slayer or anime in general, you should definitely try these games on your Android device. They will provide you with hours of fun and excitement as you relive the epic moments of the anime or create your own stories. You can download these games for free using the APK files that we have provided in this article. Just follow the simple steps that we have explained and enjoy!</p>
|
94 |
-
<p>Thank you for reading this article. We hope that you have found it useful and informative. If you have any questions or feedback about these games or this article, please feel free to leave a comment below. We would love to hear from you!</p>
|
95 |
-
<h2>Frequently Asked Questions</h2>
|
96 |
-
<ol>
|
97 |
-
<li><b>What is an APK file and how do I use it?</b></li>
|
98 |
-
<li>An APK file is an application package that contains all the files and data needed to install and run an app on your Android device. You can use an APK file to install apps that are not available on the official Google Play Store, such as the Demon Slayer games that we have discussed in this article. To use an APK file, you need to download it from a trusted source, enable "Unknown Sources" on your device's settings, and tap on the file to install it.</li>
|
99 |
-
<li><b>Are these Demon Slayer games safe and legal to download and play?</b></li>
|
100 |
-
<li>Yes, these Demon Slayer games are safe and legal to download and play, as long as you get them from the official websites or reliable sources that we have provided in this article. These games are licensed by the creators of the anime and manga, and they do not contain any viruses or malware that could harm your device or data. However, you should always be careful when downloading any APK files from unknown or unverified sources, as they may contain harmful or illegal content.</li>
|
101 |
-
<li><b>Do I need an internet connection to play these Demon Slayer games?</b></li>
|
102 |
-
<li>Yes, you need an internet connection to play these Demon Slayer games, as they require online authentication and verification to run. You also need an internet connection to access some of the features and modes of these games, such as online multiplayer, updates, events, and more. However, you can also play some parts of these games offline, such as the story mode or the solo mode.</li>
|
103 |
-
<li><b>Can I play these Demon Slayer games on other devices besides Android?</b></li>
|
104 |
-
<li>Yes, you can play these Demon Slayer games on other devices besides Android, depending on the game. The Hinokami Chronicles is available for Nintendo Switch, PlayStation 4, PlayStation 5, Xbox One, Xbox Series X/S, and Steam. Rage of Demon King is available for iOS and Android devices. However, if you want to play these games on your Android device, you will need to download and install the APK files that we have provided in this article.</li>
|
105 |
-
<li><b>What are some other Demon Slayer games that I can play on my Android device?</b></li>
|
106 |
-
<li>Some other Demon Slayer games that you can play on your Android device are Demon Slayer: Kimetsu no Yaiba - Keppuu Kengeki Royale, a battle royale game that lets you fight as a demon slayer or a demon; Demon Slayer: Kimetsu no Yaiba - Puzzle of Memories, a puzzle game that lets you collect and match characters from the anime; and Demon Slayer: Kimetsu no Yaiba - The Card Game, a card game that lets you build your own deck and battle against other players.</li>
|
107 |
-
</ol></p> 197e85843d<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Free Download Red Alert 2 Yuri 39s Revenge For Windows 10 EXCLUSIVE.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download and Play Red Alert 2 Yuri's Revenge for Windows 10</h1>
|
3 |
-
<p>Red Alert 2 Yuri's Revenge is one of the most popular and beloved real-time strategy games of all time. Released in 2001 as an expansion pack to Red Alert 2, it adds a new faction, new units, new maps, new modes, and a new storyline to the original game. If you are a fan of classic RTS games, you might be wondering how you can download and play Red Alert 2 Yuri's Revenge for free on your Windows 10 PC. In this article, we will show you how to do that in a few simple steps.</p>
|
4 |
-
<h2>free download red alert 2 yuri 39;s revenge for windows 10</h2><br /><p><b><b>Download</b> ===> <a href="https://jinyurl.com/2uNTJ0">https://jinyurl.com/2uNTJ0</a></b></p><br /><br />
|
5 |
-
<h2>What is Red Alert 2 Yuri's Revenge?</h2>
|
6 |
-
<h3>A brief introduction to the game and its plot</h3>
|
7 |
-
<p>Red Alert 2 Yuri's Revenge is a real-time strategy game set in an alternate history where the Soviet Union and the Allies are locked in a global war. The game follows the events of Red Alert 2, where the Allies have defeated the Soviets with the help of a time machine. However, Yuri, the former head of the Soviet Psychic Corps, has secretly built his own army of mind-controlled soldiers and machines, and plans to use his psychic dominators to enslave the world. The player can choose to play as either the Allies, the Soviets, or Yuri's faction, each with their own unique units, buildings, technologies, and abilities.</p>
|
8 |
-
<h3>The main features and gameplay modes of the game</h3>
|
9 |
-
<p>Red Alert 2 Yuri's Revenge offers a variety of features and gameplay modes for players to enjoy. Some of them are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>A single-player campaign mode with seven missions for each faction, featuring cinematic cutscenes and voice acting.</li>
|
12 |
-
<li>A skirmish mode where players can customize their own battles against AI opponents or other players on LAN or online.</li>
|
13 |
-
<li>A multiplayer mode where players can join or host online games with up to eight players on various maps and modes.</li>
|
14 |
-
<li>A map editor where players can create their own maps and scenarios.</li>
|
15 |
-
<li>A modding community where players can download and install fan-made mods that add new content or change the gameplay.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Why play Red Alert 2 Yuri's Revenge on Windows 10?</h2>
|
18 |
-
<h3>The benefits of playing on a modern operating system</h3>
|
19 |
-
<p>Playing Red Alert 2 Yuri's Revenge on Windows 10 has several advantages over playing it on older operating systems. Some of them are:</p>
|
20 |
-
<p></p>
|
21 |
-
<ul>
|
22 |
-
<li>Better performance and stability. Windows 10 is more optimized and compatible with modern hardware and software than older versions of Windows. This means that you can run the game faster and smoother, and avoid crashes and errors.</li>
|
23 |
-
<li>Better graphics and sound quality. Windows 10 supports higher resolutions and refresh rates, as well as better audio drivers and codecs than older versions of Windows. This means that you can enjoy the game with sharper visuals and clearer sounds.</li>
|
24 |
-
<li>Better security and privacy. Windows 10 has more advanced and updated security features and settings than older versions of Windows. This means that you can protect your PC and your data from viruses, malware, hackers, and other threats.</li>
|
25 |
-
</ul>
|
26 |
-
<h3>The challenges and solutions of running an old game on Windows 10</h3>
|
27 |
-
<p>Playing Red Alert 2 Yuri's Revenge on Windows 10 also has some challenges and drawbacks that need to be addressed. Some of them are:</p>
|
28 |
-
<ul>
|
29 |
-
<li>Compatibility issues. Red Alert 2 Yuri's Revenge was designed for Windows 95/98/ME/2000/XP, which means that it may not run properly or at all on Windows 10. This can cause problems such as black screens, missing textures, distorted colors, lagging, freezing, or crashing.</li>
|
30 |
-
<li>Solution: To fix these issues, you need to run the game in compatibility mode for Windows XP Service Pack 3. To do this, right-click on the game's executable file or shortcut, select Properties, go to the Compatibility tab, check the box that says "Run this program in compatibility mode for:", and choose Windows XP Service Pack 3 from the drop-down menu. Click Apply and OK to save the changes.</li>
|
31 |
-
<li>CD-ROM requirement. Red Alert 2 Yuri's Revenge requires the original CD-ROM of the game to be inserted in the CD drive in order to play. This can be inconvenient or impossible for some users who do not have a CD drive or have lost or damaged their CD.</li>
|
32 |
-
<li>Solution: To bypass this requirement, you need to download and install a no-CD patch for the game. A no-CD patch is a modified version of the game's executable file that allows you to play without the CD. You can find various no-CD patches for Red Alert 2 Yuri's Revenge online, such as [this one]. Make sure to download the patch that matches your game version and language. To install the patch, simply copy and paste the patched executable file into your game folder, replacing the original one.</li>
|
33 |
-
</ul>
|
34 |
-
<h2>How to download Red Alert 2 Yuri's Revenge for free?</h2>
|
35 |
-
<h3>The legal and ethical issues of downloading an old game for free</h3>
|
36 |
-
<p>Before you download Red Alert 2 Yuri's Revenge for free, you should be aware of the legal and ethical implications of doing so. Red Alert 2 Yuri's Revenge is a copyrighted product of Electronic Arts (EA), which means that downloading it for free without their permission is technically illegal and could result in legal action or penalties. Moreover, downloading it for free could also be considered unethical and unfair to the developers and publishers who invested their time, money, and effort into creating and distributing the game.</p>
|
37 |
-
<p>However, there are some arguments that could justify downloading an old game for free, such as:</p>
|
38 |
-
<ul>
|
39 |
-
<li>The game is no longer available for purchase or download from official sources, making it impossible or difficult for users to obtain it legally.</li>
|
40 |
-
<li>The game is considered abandonware, meaning that it has been abandoned by its developers and publishers who no longer support or care about it.</li>
|
41 |
-
<li>The game is part of the cultural heritage of gaming and should be preserved and accessible for future generations.</li>
|
42 |
-
<li>The game is downloaded for personal use only and not for commercial or malicious purposes.</li>
|
43 |
-
</ul>
|
44 |
-
<p>Ultimately, downloading Red Alert 2 Yuri's Revenge for free is a personal choice that depends on your own moral values and judgment. We do not condone or encourage piracy, but we also do not judge or criticize those who choose to do so.</p>
|
45 |
-
<h3>The best sources and methods of downloading the game safely and securely</h3>
|
46 |
-
<p>If you decide to download Red Alert 2 Yuri's Revenge for free, you should be careful about where and how you do it. There are many websites and platforms that offer free downloads of old games, but not all of them are trustworthy or reliable. Some of them may contain viruses, malware, spyware, adware, or other unwanted or harmful programs that could damage your PC or compromise your data. Some of them may also provide incomplete, corrupted, or fake files that could ruin your gaming experience.</p>
|
47 |
-
<p>To avoid these risks, you should follow these tips when downloading Red Alert 2 Yuri's Revenge for free:</p>
|
48 |
-
<ul>
|
49 |
-
<li>Use a reputable and verified website or platform that has positive reviews and feedback from other users. Some examples are [Old-Games.com], [MyAbandonware.com], [Ab andonia.com], and [GOG.com].</li>
|
50 |
-
<li>Use a reliable and updated antivirus or anti-malware software to scan the downloaded files before opening or installing them. Some examples are [Avast], [Malwarebytes], [Norton], and [Kaspersky].</li>
|
51 |
-
<li>Use a secure and fast internet connection to download the files without interruptions or errors. Avoid using public or unsecured Wi-Fi networks that could expose your data to hackers or snoopers.</li>
|
52 |
-
<li>Use a VPN (virtual private network) service to hide your IP address and encrypt your online traffic. This can help you bypass geo-restrictions, avoid ISP throttling, and protect your privacy and anonymity. Some examples are [ExpressVPN], [NordVPN], [Surfshark], and [CyberGhost].</li>
|
53 |
-
</ul>
|
54 |
-
<h2>How to install and play Red Alert 2 Yuri's Revenge on Windows 10?</h2>
|
55 |
-
<h3>The system requirements and compatibility issues of the game</h3>
|
56 |
-
<p>Before you install and play Red Alert 2 Yuri's Revenge on Windows 10, you should make sure that your PC meets the minimum system requirements of the game. According to the official website of EA, these are:</p>
|
57 |
-
<table>
|
58 |
-
<tr><th>Component</th><th>Minimum Requirement</th></tr>
|
59 |
-
<tr><td>Operating System</td><td>Windows 95/98/ME/2000/XP</td></tr>
|
60 |
-
<tr><td>Processor</td><td>Pentium II 266 MHz or higher</td></tr>
|
61 |
-
<tr><td>Memory</td><td>64 MB RAM</td></tr>
|
62 |
-
<tr><td>Hard Disk Space</td><td>200 MB free space</td></tr>
|
63 |
-
<tr><td>Video Card</td><td>2 MB PCI or AGP video card with Direct3D support</td></tr>
|
64 |
-
<tr><td>Sound Card</td><td>DirectX 8.0 compatible sound card</td></tr>
|
65 |
-
<tr><td>CD-ROM Drive</td><td>4x speed or faster CD-ROM drive</td></tr>
|
66 |
-
<tr><td>Input Device</td><td>Keyboard and mouse</td></tr>
|
67 |
-
<tr><td>Internet Connection</td><td>56 Kbps modem or faster for online play</td></tr>
|
68 |
-
</table>
|
69 |
-
<p>As you can see, these requirements are very low by today's standards, which means that most modern PCs should be able to run the game without any problems. However, as we mentioned earlier, there may be some compatibility issues that prevent the game from running properly or at all on Windows 10. To fix these issues, you need to follow the steps and tips that we discussed in the previous section.</p>
|
70 |
-
<h3>The steps and tips of installing and launching the game</h3>
|
71 |
-
<p>To install and play Red Alert 2 Yuri's Revenge on Windows 10, you need to follow these steps:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Download Red Alert 2 Yuri's Revenge from one of the sources that we recommended in the previous section. Make sure to download the full version of the game, not a demo or a trial.</li>
|
74 |
-
<li>Extract the downloaded files using a file archiver program such as [WinRAR] or [7-Zip]. You should get a folder containing the game files.</li>
|
75 |
-
<li>Run the setup.exe file inside the folder to start the installation process. Follow the on-screen instructions to complete the installation.</li>
|
76 |
-
<li>If you downloaded a no-CD patch for the game, copy and paste it into your game folder, replacing the original executable file.</li>
|
77 |
-
<li>If you want to apply any patches or mods to the game, download and install them according to their instructions.</li>
|
78 |
-
<li>Right-click on the game's executable file or shortcut, select Properties, go to the Compatibility tab, check the box that says "Run this program in compatibility mode for:", and choose Windows XP Service Pack 3 from the drop-down menu. Click Apply and OK to save the changes.</li>
|
79 |
-
<li>Double-click on the game's executable file or shortcut to launch the game. Enjoy!</li>
|
80 |
-
</ol>
|
81 |
-
<h3>The recommended patches and mods to enhance the game experience</h3>
|
82 |
-
<p>If you want to improve your gaming experience with Red Alert 2 Yuri's Revenge, you can try some of these patches and mods that add new features, fix bugs, balance gameplay, or change graphics:</p>
|
83 |
-
<ul>
|
84 |
-
<li>[CnCNet] is a patch that enables online multiplayer for Red Alert 2 Yuri's Revenge and other classic C&C games. It also adds new maps, modes, options, and enhancements to the game.</li>
|
85 |
-
<li>[Ares] is a mod that expands the modding capabilities of Red Alert 2 Yuri's Revenge. It allows modders to create new units, buildings, weapons, effects, and features to the game.</li>
|
86 |
-
<li>[Mental Omega] is a mod that adds a new storyline, new factions, new units, new missions, new music, and new gameplay elements to Red Alert 2 Yuri's Revenge. It is considered one of the best and most comprehensive mods for the game.</li>
|
87 |
-
<li>[Yuri's Revenge: CnCD2K] is a mod that combines Red Alert 2 Yuri's Revenge with Dune 2000, another classic RTS game. It adds new factions, units, buildings, maps, and modes from Dune 2000 to the game.</li>
|
88 |
-
<li>[Red Resurrection] is a mod that revamps and rebalances Red Alert 2 Yuri's Revenge. It adds new units, buildings, technologies, maps, modes, and features to the game.</li>
|
89 |
-
</ul>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>Red Alert 2 Yuri's Revenge is a classic RTS game that deserves to be played and enjoyed by old and new fans alike. If you want to download and play it for free on your Windows 10 PC, you can follow the steps and tips that we have provided in this article. We hope that this article has been helpful and informative for you. Now go ahead and command your army to victory!</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<h3>Q1: Is Red Alert 2 Yuri's Revenge a standalone game or an expansion pack?</h3>
|
94 |
-
<p>A1: Red Alert 2 Yuri's Revenge is an expansion pack to Red Alert 2, which means that you need to have the original game installed on your PC in order to play it. However, some sources may provide a complete version of the game that includes both Red Alert 2 and Yuri's Revenge in one package.</p>
|
95 |
-
<h3>Q2: How many factions and units are there in Red Alert 2 Yuri's Revenge?</h3>
|
96 |
-
<p>A2: Red Alert 2 Yuri's Revenge has three factions: the Allies, the Soviets, and Yuri's faction. Each faction has its own unique units, buildings, technologies, and abilities. There are over 100 units in total in the game, including infantry, vehicles, aircraft, naval vessels, and special units.</p>
|
97 |
-
<h3>Q3: How can I play Red Alert 2 Yuri's Revenge online with other players?</h3>
|
98 |
-
<p>A3: You can play Red Alert 2 Yuri's Revenge online with other players by using a patch such as [CnCNet], which enables online multiplayer for the game. You can join or host online games with up to eight players on various maps and modes. You can also chat with other players and join clans and tournaments.</p>
|
99 |
-
<h3>Q4: What are some of the best strategies and tips for playing Red Alert 2 Yuri's Revenge?</h3>
|
100 |
-
<p>A4: Some of the best strategies and tips for playing Red Alert 2 Yuri's Revenge are:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Know your faction's strengths and weaknesses. Each faction has its own advantages and disadvantages in terms of units, buildings, technologies, and abilities. For example, the Allies have superior air power and mobility, but lack heavy armor and firepower. The Soviets have powerful tanks and artillery, but are slow and vulnerable to air attacks. Yuri's faction has mind control and psychic weapons, but are expensive and fragile.</li>
|
103 |
-
<li>Know your enemy's faction and tactics. You should always scout your enemy's base and units to see what they are planning and how they are playing. You should also adapt your strategy according to your enemy's faction and tactics. For example, if your enemy is using a lot of infantry, you should use anti-infantry units or weapons. If your enemy is using a lot of air units, you should use anti-air units or weapons.</li>
|
104 |
-
<li>Manage your resources and economy. You should always collect as much ore as possible to fund your army and base. You should also build refineries near ore fields to speed up ore collection. You should also build power plants to supply energy to your buildings and units. You should also build barracks, war factories, airfields, naval yards, tech buildings, and defense structures to produce and protect your army and base.</li>
|
105 |
-
<li>Use your special abilities wisely. Each faction has its own special abilities that can turn the tide of battle. For example, the Allies have the Chronosphere that can teleport units across the map, the Soviets have the Iron Curtain that can make units invulnerable for a short time, and Yuri's faction has the Psychic Dominator that can unleash a devastating blast on a target area. You should use these abilities at the right time and place to surprise, weaken, or destroy your enemy.</li>
|
106 |
-
<li>Use your superweapons sparingly. Each faction has its own superweapons that can cause massive damage to the enemy base or units. For example, the Allies have the Weather Control Device that can create a lightning storm, the Soviets have the Nuclear Missile Silo that can launch a nuclear missile, and Yuri's faction has the Genetic Mutator that can mutate enemy units into brutes. You should use these superweapons only when necessary or when you have a clear advantage, as they have long cooldown times and can be countered by enemy defenses or abilities.</li>
|
107 |
-
</ul>
|
108 |
-
<h3>Q5: Where can I find more information and resources about Red Alert 2 Yuri's Revenge?</h3>
|
109 |
-
<p>A5: If you want to learn more about Red Alert 2 Yuri's Revenge, you can visit some of these websites and platforms that provide information and resources about the game:</p>
|
110 |
-
<ul>
|
111 |
-
<li>[The official website of EA] is where you can find the official news, updates, and support for the game.</li>
|
112 |
-
<li>[The C&C Wiki] is where you can find detailed information and trivia about the game's factions, units, buildings, missions, characters, and lore.</li>
|
113 |
-
<li>[The C&C Community] is where you can find forums, guides, tutorials, videos, streams, and blogs about the game and other C&C games.</li>
|
114 |
-
<li>[The ModDB] is where you can find hundreds of fan-made mods that add new content or change the gameplay of the game.</li>
|
115 |
-
</ul></p> 401be4b1e0<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/get_cost_candidates.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
voicevox_engine/part_of_speech_data.pyのcost_candidatesを計算するプログラムです。
|
3 |
-
引数のnaist_jdic_pathには、open_jtalkのsrc/mecab-naist-jdic/naist-jdic.csvを指定してください。
|
4 |
-
|
5 |
-
実行例:
|
6 |
-
python get_cost_candidates.py --naist_jdic_path=/path/to/naist-jdic.csv \
|
7 |
-
--pos=名詞 \
|
8 |
-
--pos_detail_1=固有名詞 \
|
9 |
-
--pos_detail_2=一般 \
|
10 |
-
--pos_detail_3=*
|
11 |
-
|
12 |
-
cost_candidatesの値の詳細は以下の通りです。
|
13 |
-
- 1番目の値はnaist_jdic内の同一品詞の最小コストから1を引いたもの、11番目の値は最大コストに1を足したものです。
|
14 |
-
- 2番目の値はnaist_jdic内の同一品詞のコストの下位1%、10番目の値は99%の値です。
|
15 |
-
- 6番目の値はnaist_jdic内の同一品詞のコストの最頻値です。
|
16 |
-
- 2番目から6番目、6番目から10番目までの値は一定割合で増加するようになっています。
|
17 |
-
"""
|
18 |
-
|
19 |
-
import argparse
|
20 |
-
import statistics
|
21 |
-
from pathlib import Path
|
22 |
-
from typing import List
|
23 |
-
|
24 |
-
import numpy as np
|
25 |
-
|
26 |
-
|
27 |
-
def get_candidates(
|
28 |
-
naist_jdic_path: Path,
|
29 |
-
pos: str,
|
30 |
-
pos_detail_1: str,
|
31 |
-
pos_detail_2: str,
|
32 |
-
pos_detail_3: str,
|
33 |
-
) -> List[int]:
|
34 |
-
costs = []
|
35 |
-
with naist_jdic_path.open(encoding="utf-8") as f:
|
36 |
-
for line in f:
|
37 |
-
(
|
38 |
-
_,
|
39 |
-
_,
|
40 |
-
_,
|
41 |
-
_cost,
|
42 |
-
_pos,
|
43 |
-
_pos_detail_1,
|
44 |
-
_pos_detail_2,
|
45 |
-
_pos_detail_3,
|
46 |
-
_,
|
47 |
-
_,
|
48 |
-
_,
|
49 |
-
_,
|
50 |
-
_,
|
51 |
-
_,
|
52 |
-
_,
|
53 |
-
) = line.split(",")
|
54 |
-
if (_pos, _pos_detail_1, _pos_detail_2, _pos_detail_3) == (
|
55 |
-
pos,
|
56 |
-
pos_detail_1,
|
57 |
-
pos_detail_2,
|
58 |
-
pos_detail_3,
|
59 |
-
):
|
60 |
-
costs.append(int(_cost))
|
61 |
-
assert len(costs) > 0
|
62 |
-
cost_min = min(costs) - 1
|
63 |
-
cost_1per = np.quantile(costs, 0.01).astype(np.int64)
|
64 |
-
cost_mode = statistics.mode(costs)
|
65 |
-
cost_99per = np.quantile(costs, 0.99).astype(np.int64)
|
66 |
-
cost_max = max(costs) + 1
|
67 |
-
return (
|
68 |
-
[cost_min]
|
69 |
-
+ [int(cost_1per + (cost_mode - cost_1per) * i / 4) for i in range(5)]
|
70 |
-
+ [int(cost_mode + (cost_99per - cost_mode) * i / 4) for i in range(1, 5)]
|
71 |
-
+ [cost_max]
|
72 |
-
)
|
73 |
-
|
74 |
-
|
75 |
-
if __name__ == "__main__":
|
76 |
-
parser = argparse.ArgumentParser()
|
77 |
-
parser.add_argument("--naist_jdic_path", type=Path)
|
78 |
-
parser.add_argument("--pos", type=str)
|
79 |
-
parser.add_argument("--pos_detail_1", type=str)
|
80 |
-
parser.add_argument("--pos_detail_2", type=str)
|
81 |
-
parser.add_argument("--pos_detail_3", type=str)
|
82 |
-
args = parser.parse_args()
|
83 |
-
print(
|
84 |
-
get_candidates(
|
85 |
-
naist_jdic_path=args.naist_jdic_path,
|
86 |
-
pos=args.pos,
|
87 |
-
pos_detail_1=args.pos_detail_1,
|
88 |
-
pos_detail_2=args.pos_detail_2,
|
89 |
-
pos_detail_3=args.pos_detail_3,
|
90 |
-
)
|
91 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/lib/hooks/use-bing.ts
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import { useState, useCallback, useEffect, useMemo } from 'react'
|
4 |
-
import { useAtom, useAtomValue } from 'jotai'
|
5 |
-
import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state'
|
6 |
-
import { setConversationMessages } from './chat-history'
|
7 |
-
import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types'
|
8 |
-
import { nanoid } from '../utils'
|
9 |
-
import { TTS } from '../bots/bing/tts'
|
10 |
-
|
11 |
-
export function useBing(botId: BotId = 'bing') {
|
12 |
-
const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId])
|
13 |
-
const [enableTTS] = useAtom(voiceAtom)
|
14 |
-
const speaker = useMemo(() => new TTS(), [])
|
15 |
-
const [hash, setHash] = useAtom(hashAtom)
|
16 |
-
const bingConversationStyle = useAtomValue(bingConversationStyleAtom)
|
17 |
-
const [chatState, setChatState] = useAtom(chatAtom)
|
18 |
-
const [input, setInput] = useState('')
|
19 |
-
const [attachmentList, setAttachmentList] = useState<FileItem[]>([])
|
20 |
-
|
21 |
-
const updateMessage = useCallback(
|
22 |
-
(messageId: string, updater: (message: ChatMessageModel) => void) => {
|
23 |
-
setChatState((draft) => {
|
24 |
-
const message = draft.messages.find((m) => m.id === messageId)
|
25 |
-
if (message) {
|
26 |
-
updater(message)
|
27 |
-
}
|
28 |
-
})
|
29 |
-
},
|
30 |
-
[setChatState],
|
31 |
-
)
|
32 |
-
|
33 |
-
const sendMessage = useCallback(
|
34 |
-
async (input: string, options = {}) => {
|
35 |
-
const botMessageId = nanoid()
|
36 |
-
const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined
|
37 |
-
setChatState((draft) => {
|
38 |
-
const text = imageUrl ? `${input}\n\n` : input
|
39 |
-
draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' })
|
40 |
-
setAttachmentList([])
|
41 |
-
})
|
42 |
-
const abortController = new AbortController()
|
43 |
-
setChatState((draft) => {
|
44 |
-
draft.generatingMessageId = botMessageId
|
45 |
-
draft.abortController = abortController
|
46 |
-
})
|
47 |
-
speaker.reset()
|
48 |
-
await chatState.bot.sendMessage({
|
49 |
-
prompt: input,
|
50 |
-
imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl,
|
51 |
-
options: {
|
52 |
-
...options,
|
53 |
-
bingConversationStyle,
|
54 |
-
},
|
55 |
-
signal: abortController.signal,
|
56 |
-
onEvent(event) {
|
57 |
-
if (event.type === 'UPDATE_ANSWER') {
|
58 |
-
updateMessage(botMessageId, (message) => {
|
59 |
-
if (event.data.text.length > message.text.length) {
|
60 |
-
message.text = event.data.text
|
61 |
-
}
|
62 |
-
|
63 |
-
if (event.data.spokenText && enableTTS) {
|
64 |
-
speaker.speak(event.data.spokenText)
|
65 |
-
}
|
66 |
-
|
67 |
-
message.throttling = event.data.throttling || message.throttling
|
68 |
-
message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions
|
69 |
-
message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses
|
70 |
-
})
|
71 |
-
} else if (event.type === 'ERROR') {
|
72 |
-
updateMessage(botMessageId, (message) => {
|
73 |
-
message.error = event.error
|
74 |
-
})
|
75 |
-
setChatState((draft) => {
|
76 |
-
draft.abortController = undefined
|
77 |
-
draft.generatingMessageId = ''
|
78 |
-
})
|
79 |
-
} else if (event.type === 'DONE') {
|
80 |
-
setChatState((draft) => {
|
81 |
-
draft.abortController = undefined
|
82 |
-
draft.generatingMessageId = ''
|
83 |
-
})
|
84 |
-
}
|
85 |
-
},
|
86 |
-
})
|
87 |
-
},
|
88 |
-
[botId, attachmentList, chatState.bot, setChatState, updateMessage],
|
89 |
-
)
|
90 |
-
|
91 |
-
const uploadImage = useCallback(async (imgUrl: string) => {
|
92 |
-
setAttachmentList([{ url: imgUrl, status: 'loading' }])
|
93 |
-
const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle)
|
94 |
-
if (response?.blobId) {
|
95 |
-
setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }])
|
96 |
-
} else {
|
97 |
-
setAttachmentList([{ url: imgUrl, status: 'error' }])
|
98 |
-
}
|
99 |
-
}, [chatState.bot])
|
100 |
-
|
101 |
-
const resetConversation = useCallback(() => {
|
102 |
-
chatState.bot.resetConversation()
|
103 |
-
speaker.abort()
|
104 |
-
setChatState((draft) => {
|
105 |
-
draft.abortController = undefined
|
106 |
-
draft.generatingMessageId = ''
|
107 |
-
draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }]
|
108 |
-
draft.conversationId = nanoid()
|
109 |
-
})
|
110 |
-
}, [chatState.bot, setChatState])
|
111 |
-
|
112 |
-
const stopGenerating = useCallback(() => {
|
113 |
-
chatState.abortController?.abort()
|
114 |
-
if (chatState.generatingMessageId) {
|
115 |
-
updateMessage(chatState.generatingMessageId, (message) => {
|
116 |
-
if (!message.text && !message.error) {
|
117 |
-
message.text = 'Cancelled'
|
118 |
-
}
|
119 |
-
})
|
120 |
-
}
|
121 |
-
setChatState((draft) => {
|
122 |
-
draft.generatingMessageId = ''
|
123 |
-
})
|
124 |
-
}, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage])
|
125 |
-
|
126 |
-
useEffect(() => {
|
127 |
-
if (chatState.messages.length) {
|
128 |
-
setConversationMessages(botId, chatState.conversationId, chatState.messages)
|
129 |
-
}
|
130 |
-
}, [botId, chatState.conversationId, chatState.messages])
|
131 |
-
|
132 |
-
useEffect(() => {
|
133 |
-
if (hash === 'reset') {
|
134 |
-
resetConversation()
|
135 |
-
setHash('')
|
136 |
-
}
|
137 |
-
}, [hash, setHash])
|
138 |
-
|
139 |
-
const chat = useMemo(
|
140 |
-
() => ({
|
141 |
-
botId,
|
142 |
-
bot: chatState.bot,
|
143 |
-
isSpeaking: speaker.isSpeaking,
|
144 |
-
messages: chatState.messages,
|
145 |
-
sendMessage,
|
146 |
-
setInput,
|
147 |
-
input,
|
148 |
-
resetConversation,
|
149 |
-
generating: !!chatState.generatingMessageId,
|
150 |
-
stopGenerating,
|
151 |
-
uploadImage,
|
152 |
-
setAttachmentList,
|
153 |
-
attachmentList,
|
154 |
-
}),
|
155 |
-
[
|
156 |
-
botId,
|
157 |
-
bingConversationStyle,
|
158 |
-
chatState.bot,
|
159 |
-
chatState.generatingMessageId,
|
160 |
-
chatState.messages,
|
161 |
-
speaker.isSpeaking,
|
162 |
-
setInput,
|
163 |
-
input,
|
164 |
-
setAttachmentList,
|
165 |
-
attachmentList,
|
166 |
-
resetConversation,
|
167 |
-
sendMessage,
|
168 |
-
stopGenerating,
|
169 |
-
],
|
170 |
-
)
|
171 |
-
|
172 |
-
return chat
|
173 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-ANK/blackmirroroffice/app.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import requests
|
3 |
-
|
4 |
-
# Dictionary of actors, their corresponding video URLs, and image URLs
|
5 |
-
ACTOR_VIDEOS = {
|
6 |
-
"Original": {
|
7 |
-
"video_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/videos/office.mp4",
|
8 |
-
"image_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/images/ms.jpg",
|
9 |
-
},
|
10 |
-
"John Cena": {
|
11 |
-
"video_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/videos/1jcf.mp4",
|
12 |
-
"image_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/images/1jc.jpg",
|
13 |
-
},
|
14 |
-
"Joaquin Phoenix": {
|
15 |
-
"video_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/videos/1jpf.mp4",
|
16 |
-
"image_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/images/1jp.jpg",
|
17 |
-
},
|
18 |
-
"Mr Beast": {
|
19 |
-
"video_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/videos/1mrbf.mp4",
|
20 |
-
"image_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/images/1mrb.jpg",
|
21 |
-
},
|
22 |
-
"Bob Odenkirk": {
|
23 |
-
"video_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/videos/1sgf.mp4",
|
24 |
-
"image_url": "https://cdn.jsdelivr.net/gh/AI-ANK/bmjoanisawful@main/images/1sg.jpg",
|
25 |
-
},
|
26 |
-
}
|
27 |
-
|
28 |
-
# Function to change video based on actor selection
|
29 |
-
def change_video(actor_name):
|
30 |
-
video_url = ACTOR_VIDEOS[actor_name]["video_url"]
|
31 |
-
return f'<video width="100%" controls autoplay><source src="{video_url}" type="video/mp4"></video>'
|
32 |
-
|
33 |
-
# Create Gradio Interface
|
34 |
-
iface = gr.Interface(
|
35 |
-
fn=change_video,
|
36 |
-
inputs=gr.Radio(choices=list(ACTOR_VIDEOS.keys()), label="Choose Your Actor"),
|
37 |
-
outputs=gr.HTML(label="Your Video"),
|
38 |
-
live=True,
|
39 |
-
title="Black Mirror Meets The Office: Michael Scott Is Awful",
|
40 |
-
description="Choose an actor below and watch them step into the shoes of Michael Scott"
|
41 |
-
)
|
42 |
-
|
43 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/portaspeech/portaspeech_flow.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.distributions as dist
|
3 |
-
from torch import nn
|
4 |
-
from text_to_speech.modules.commons.normalizing_flow.glow_modules import Glow
|
5 |
-
from text_to_speech.modules.tts.portaspeech.portaspeech import PortaSpeech
|
6 |
-
|
7 |
-
|
8 |
-
class PortaSpeechFlow(PortaSpeech):
|
9 |
-
def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None):
|
10 |
-
super().__init__(ph_dict_size, word_dict_size, hparams, out_dims)
|
11 |
-
cond_hs = 80
|
12 |
-
if hparams.get('use_txt_cond', True):
|
13 |
-
cond_hs = cond_hs + hparams['hidden_size']
|
14 |
-
if hparams.get('use_latent_cond', False):
|
15 |
-
cond_hs = cond_hs + hparams['latent_size']
|
16 |
-
if hparams['use_cond_proj']:
|
17 |
-
self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2)
|
18 |
-
cond_hs = 160
|
19 |
-
self.post_flow = Glow(
|
20 |
-
80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1,
|
21 |
-
hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'],
|
22 |
-
n_split=4, n_sqz=2,
|
23 |
-
gin_channels=cond_hs,
|
24 |
-
share_cond_layers=hparams['post_share_cond_layers'],
|
25 |
-
share_wn_layers=hparams['share_wn_layers'],
|
26 |
-
sigmoid_scale=hparams['sigmoid_scale']
|
27 |
-
)
|
28 |
-
self.prior_dist = dist.Normal(0, 1)
|
29 |
-
|
30 |
-
def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
|
31 |
-
spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
|
32 |
-
forward_post_glow=True, two_stage=True, global_step=None, **kwargs):
|
33 |
-
is_training = self.training
|
34 |
-
train_fvae = not (forward_post_glow and two_stage)
|
35 |
-
if not train_fvae:
|
36 |
-
self.eval()
|
37 |
-
with torch.set_grad_enabled(mode=train_fvae):
|
38 |
-
ret = super(PortaSpeechFlow, self).forward(
|
39 |
-
txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph,
|
40 |
-
spk_embed, spk_id, pitch, infer, tgt_mels, global_step, **kwargs)
|
41 |
-
if (forward_post_glow or not two_stage) and self.hparams['use_post_flow']:
|
42 |
-
self.run_post_glow(tgt_mels, infer, is_training, ret)
|
43 |
-
return ret
|
44 |
-
|
45 |
-
def run_post_glow(self, tgt_mels, infer, is_training, ret):
|
46 |
-
x_recon = ret['mel_out'].transpose(1, 2)
|
47 |
-
g = x_recon
|
48 |
-
B, _, T = g.shape
|
49 |
-
if self.hparams.get('use_txt_cond', True):
|
50 |
-
g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1)
|
51 |
-
if self.hparams.get('use_latent_cond', False):
|
52 |
-
g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T)
|
53 |
-
g = torch.cat([g, g_z], 1)
|
54 |
-
if self.hparams['use_cond_proj']:
|
55 |
-
g = self.g_proj(g)
|
56 |
-
prior_dist = self.prior_dist
|
57 |
-
if not infer:
|
58 |
-
if is_training:
|
59 |
-
self.post_flow.train()
|
60 |
-
nonpadding = ret['nonpadding'].transpose(1, 2)
|
61 |
-
y_lengths = nonpadding.sum(-1)
|
62 |
-
if self.hparams['detach_postflow_input']:
|
63 |
-
g = g.detach()
|
64 |
-
tgt_mels = tgt_mels.transpose(1, 2)
|
65 |
-
z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g)
|
66 |
-
ldj = ldj / y_lengths / 80
|
67 |
-
ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj
|
68 |
-
ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean()
|
69 |
-
if torch.isnan(ret['postflow']):
|
70 |
-
ret['postflow'] = None
|
71 |
-
else:
|
72 |
-
nonpadding = torch.ones_like(x_recon[:, :1, :])
|
73 |
-
z_post = torch.randn(x_recon.shape).to(g.device) * self.hparams['noise_scale']
|
74 |
-
x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True)
|
75 |
-
ret['mel_out'] = x_recon.transpose(1, 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AISuperheroes/10SL-RealTimeDSDashboard-Live-AIUIUX/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 10SL RealTimeDSDashboard Live AIUIUX
|
3 |
-
emoji: ⏩
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.10.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2HeroBootcamp/StaticHTML5Playcanvas/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AJRFan/dreambooth-training/train_dreambooth.py
DELETED
@@ -1,818 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import itertools
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
from pathlib import Path
|
6 |
-
from typing import Optional
|
7 |
-
import subprocess
|
8 |
-
import sys
|
9 |
-
|
10 |
-
import torch
|
11 |
-
import torch.nn.functional as F
|
12 |
-
import torch.utils.checkpoint
|
13 |
-
from torch.utils.data import Dataset
|
14 |
-
|
15 |
-
from accelerate import Accelerator
|
16 |
-
from accelerate.logging import get_logger
|
17 |
-
from accelerate.utils import set_seed
|
18 |
-
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
|
19 |
-
from diffusers.optimization import get_scheduler
|
20 |
-
from huggingface_hub import HfFolder, Repository, whoami
|
21 |
-
from PIL import Image
|
22 |
-
from torchvision import transforms
|
23 |
-
from tqdm.auto import tqdm
|
24 |
-
from transformers import CLIPTextModel, CLIPTokenizer
|
25 |
-
|
26 |
-
|
27 |
-
logger = get_logger(__name__)
|
28 |
-
|
29 |
-
|
30 |
-
def parse_args():
|
31 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
32 |
-
parser.add_argument(
|
33 |
-
"--pretrained_model_name_or_path",
|
34 |
-
type=str,
|
35 |
-
default=None,
|
36 |
-
#required=True,
|
37 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
38 |
-
)
|
39 |
-
parser.add_argument(
|
40 |
-
"--tokenizer_name",
|
41 |
-
type=str,
|
42 |
-
default=None,
|
43 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
44 |
-
)
|
45 |
-
parser.add_argument(
|
46 |
-
"--instance_data_dir",
|
47 |
-
type=str,
|
48 |
-
default=None,
|
49 |
-
#required=True,
|
50 |
-
help="A folder containing the training data of instance images.",
|
51 |
-
)
|
52 |
-
parser.add_argument(
|
53 |
-
"--class_data_dir",
|
54 |
-
type=str,
|
55 |
-
default=None,
|
56 |
-
required=False,
|
57 |
-
help="A folder containing the training data of class images.",
|
58 |
-
)
|
59 |
-
parser.add_argument(
|
60 |
-
"--instance_prompt",
|
61 |
-
type=str,
|
62 |
-
default=None,
|
63 |
-
help="The prompt with identifier specifying the instance",
|
64 |
-
)
|
65 |
-
parser.add_argument(
|
66 |
-
"--class_prompt",
|
67 |
-
type=str,
|
68 |
-
default="",
|
69 |
-
help="The prompt to specify images in the same class as provided instance images.",
|
70 |
-
)
|
71 |
-
parser.add_argument(
|
72 |
-
"--with_prior_preservation",
|
73 |
-
default=False,
|
74 |
-
action="store_true",
|
75 |
-
help="Flag to add prior preservation loss.",
|
76 |
-
)
|
77 |
-
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
78 |
-
parser.add_argument(
|
79 |
-
"--num_class_images",
|
80 |
-
type=int,
|
81 |
-
default=100,
|
82 |
-
help=(
|
83 |
-
"Minimal class images for prior preservation loss. If not have enough images, additional images will be"
|
84 |
-
" sampled with class_prompt."
|
85 |
-
),
|
86 |
-
)
|
87 |
-
parser.add_argument(
|
88 |
-
"--output_dir",
|
89 |
-
type=str,
|
90 |
-
default="",
|
91 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
92 |
-
)
|
93 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
94 |
-
parser.add_argument(
|
95 |
-
"--resolution",
|
96 |
-
type=int,
|
97 |
-
default=512,
|
98 |
-
help=(
|
99 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
100 |
-
" resolution"
|
101 |
-
),
|
102 |
-
)
|
103 |
-
parser.add_argument(
|
104 |
-
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
|
105 |
-
)
|
106 |
-
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
|
107 |
-
parser.add_argument(
|
108 |
-
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
109 |
-
)
|
110 |
-
parser.add_argument(
|
111 |
-
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
112 |
-
)
|
113 |
-
parser.add_argument("--num_train_epochs", type=int, default=1)
|
114 |
-
parser.add_argument(
|
115 |
-
"--max_train_steps",
|
116 |
-
type=int,
|
117 |
-
default=None,
|
118 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
119 |
-
)
|
120 |
-
parser.add_argument(
|
121 |
-
"--gradient_accumulation_steps",
|
122 |
-
type=int,
|
123 |
-
default=1,
|
124 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
125 |
-
)
|
126 |
-
parser.add_argument(
|
127 |
-
"--gradient_checkpointing",
|
128 |
-
action="store_true",
|
129 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
130 |
-
)
|
131 |
-
parser.add_argument(
|
132 |
-
"--learning_rate",
|
133 |
-
type=float,
|
134 |
-
default=5e-6,
|
135 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
136 |
-
)
|
137 |
-
parser.add_argument(
|
138 |
-
"--scale_lr",
|
139 |
-
action="store_true",
|
140 |
-
default=False,
|
141 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
142 |
-
)
|
143 |
-
parser.add_argument(
|
144 |
-
"--lr_scheduler",
|
145 |
-
type=str,
|
146 |
-
default="constant",
|
147 |
-
help=(
|
148 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
149 |
-
' "constant", "constant_with_warmup"]'
|
150 |
-
),
|
151 |
-
)
|
152 |
-
parser.add_argument(
|
153 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
154 |
-
)
|
155 |
-
parser.add_argument(
|
156 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
157 |
-
)
|
158 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
159 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
160 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
161 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
162 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
163 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
164 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
165 |
-
parser.add_argument(
|
166 |
-
"--hub_model_id",
|
167 |
-
type=str,
|
168 |
-
default=None,
|
169 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
170 |
-
)
|
171 |
-
parser.add_argument(
|
172 |
-
"--logging_dir",
|
173 |
-
type=str,
|
174 |
-
default="logs",
|
175 |
-
help=(
|
176 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
177 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
178 |
-
),
|
179 |
-
)
|
180 |
-
parser.add_argument(
|
181 |
-
"--mixed_precision",
|
182 |
-
type=str,
|
183 |
-
default="no",
|
184 |
-
choices=["no", "fp16", "bf16"],
|
185 |
-
help=(
|
186 |
-
"Whether to use mixed precision. Choose"
|
187 |
-
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
188 |
-
"and an Nvidia Ampere GPU."
|
189 |
-
),
|
190 |
-
)
|
191 |
-
|
192 |
-
parser.add_argument(
|
193 |
-
"--save_n_steps",
|
194 |
-
type=int,
|
195 |
-
default=1,
|
196 |
-
help=("Save the model every n global_steps"),
|
197 |
-
)
|
198 |
-
|
199 |
-
|
200 |
-
parser.add_argument(
|
201 |
-
"--save_starting_step",
|
202 |
-
type=int,
|
203 |
-
default=1,
|
204 |
-
help=("The step from which it starts saving intermediary checkpoints"),
|
205 |
-
)
|
206 |
-
|
207 |
-
parser.add_argument(
|
208 |
-
"--stop_text_encoder_training",
|
209 |
-
type=int,
|
210 |
-
default=1000000,
|
211 |
-
help=("The step at which the text_encoder is no longer trained"),
|
212 |
-
)
|
213 |
-
|
214 |
-
|
215 |
-
parser.add_argument(
|
216 |
-
"--image_captions_filename",
|
217 |
-
action="store_true",
|
218 |
-
help="Get captions from filename",
|
219 |
-
)
|
220 |
-
|
221 |
-
|
222 |
-
parser.add_argument(
|
223 |
-
"--dump_only_text_encoder",
|
224 |
-
action="store_true",
|
225 |
-
default=False,
|
226 |
-
help="Dump only text encoder",
|
227 |
-
)
|
228 |
-
|
229 |
-
parser.add_argument(
|
230 |
-
"--train_only_unet",
|
231 |
-
action="store_true",
|
232 |
-
default=False,
|
233 |
-
help="Train only the unet",
|
234 |
-
)
|
235 |
-
|
236 |
-
parser.add_argument(
|
237 |
-
"--Session_dir",
|
238 |
-
type=str,
|
239 |
-
default="",
|
240 |
-
help="Current session directory",
|
241 |
-
)
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
247 |
-
|
248 |
-
args = parser.parse_args()
|
249 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
250 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
251 |
-
args.local_rank = env_local_rank
|
252 |
-
|
253 |
-
#if args.instance_data_dir is None:
|
254 |
-
# raise ValueError("You must specify a train data directory.")
|
255 |
-
|
256 |
-
#if args.with_prior_preservation:
|
257 |
-
# if args.class_data_dir is None:
|
258 |
-
# raise ValueError("You must specify a data directory for class images.")
|
259 |
-
# if args.class_prompt is None:
|
260 |
-
# raise ValueError("You must specify prompt for class images.")
|
261 |
-
|
262 |
-
return args
|
263 |
-
|
264 |
-
|
265 |
-
class DreamBoothDataset(Dataset):
|
266 |
-
"""
|
267 |
-
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
268 |
-
It pre-processes the images and the tokenizes prompts.
|
269 |
-
"""
|
270 |
-
|
271 |
-
def __init__(
|
272 |
-
self,
|
273 |
-
instance_data_root,
|
274 |
-
instance_prompt,
|
275 |
-
tokenizer,
|
276 |
-
args,
|
277 |
-
class_data_root=None,
|
278 |
-
class_prompt=None,
|
279 |
-
size=512,
|
280 |
-
center_crop=False,
|
281 |
-
):
|
282 |
-
self.size = size
|
283 |
-
self.center_crop = center_crop
|
284 |
-
self.tokenizer = tokenizer
|
285 |
-
self.image_captions_filename = None
|
286 |
-
|
287 |
-
self.instance_data_root = Path(instance_data_root)
|
288 |
-
if not self.instance_data_root.exists():
|
289 |
-
raise ValueError("Instance images root doesn't exists.")
|
290 |
-
|
291 |
-
self.instance_images_path = list(Path(instance_data_root).iterdir())
|
292 |
-
self.num_instance_images = len(self.instance_images_path)
|
293 |
-
self.instance_prompt = instance_prompt
|
294 |
-
self._length = self.num_instance_images
|
295 |
-
|
296 |
-
if args.image_captions_filename:
|
297 |
-
self.image_captions_filename = True
|
298 |
-
|
299 |
-
if class_data_root is not None:
|
300 |
-
self.class_data_root = Path(class_data_root)
|
301 |
-
self.class_data_root.mkdir(parents=True, exist_ok=True)
|
302 |
-
self.class_images_path = list(self.class_data_root.iterdir())
|
303 |
-
self.num_class_images = len(self.class_images_path)
|
304 |
-
self._length = max(self.num_class_images, self.num_instance_images)
|
305 |
-
self.class_prompt = class_prompt
|
306 |
-
else:
|
307 |
-
self.class_data_root = None
|
308 |
-
|
309 |
-
self.image_transforms = transforms.Compose(
|
310 |
-
[
|
311 |
-
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
312 |
-
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
313 |
-
transforms.ToTensor(),
|
314 |
-
transforms.Normalize([0.5], [0.5]),
|
315 |
-
]
|
316 |
-
)
|
317 |
-
|
318 |
-
def __len__(self):
|
319 |
-
return self._length
|
320 |
-
|
321 |
-
def __getitem__(self, index):
|
322 |
-
example = {}
|
323 |
-
path = self.instance_images_path[index % self.num_instance_images]
|
324 |
-
instance_image = Image.open(path)
|
325 |
-
if not instance_image.mode == "RGB":
|
326 |
-
instance_image = instance_image.convert("RGB")
|
327 |
-
|
328 |
-
instance_prompt = self.instance_prompt
|
329 |
-
|
330 |
-
if self.image_captions_filename:
|
331 |
-
filename = Path(path).stem
|
332 |
-
pt=''.join([i for i in filename if not i.isdigit()])
|
333 |
-
pt=pt.replace("_"," ")
|
334 |
-
pt=pt.replace("(","")
|
335 |
-
pt=pt.replace(")","")
|
336 |
-
instance_prompt = pt
|
337 |
-
sys.stdout.write(" [0;32m" +instance_prompt+" [0m")
|
338 |
-
sys.stdout.flush()
|
339 |
-
|
340 |
-
|
341 |
-
example["instance_images"] = self.image_transforms(instance_image)
|
342 |
-
example["instance_prompt_ids"] = self.tokenizer(
|
343 |
-
instance_prompt,
|
344 |
-
padding="do_not_pad",
|
345 |
-
truncation=True,
|
346 |
-
max_length=self.tokenizer.model_max_length,
|
347 |
-
).input_ids
|
348 |
-
|
349 |
-
if self.class_data_root:
|
350 |
-
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
351 |
-
if not class_image.mode == "RGB":
|
352 |
-
class_image = class_image.convert("RGB")
|
353 |
-
example["class_images"] = self.image_transforms(class_image)
|
354 |
-
example["class_prompt_ids"] = self.tokenizer(
|
355 |
-
self.class_prompt,
|
356 |
-
padding="do_not_pad",
|
357 |
-
truncation=True,
|
358 |
-
max_length=self.tokenizer.model_max_length,
|
359 |
-
).input_ids
|
360 |
-
|
361 |
-
return example
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
class PromptDataset(Dataset):
|
366 |
-
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
|
367 |
-
|
368 |
-
def __init__(self, prompt, num_samples):
|
369 |
-
self.prompt = prompt
|
370 |
-
self.num_samples = num_samples
|
371 |
-
|
372 |
-
def __len__(self):
|
373 |
-
return self.num_samples
|
374 |
-
|
375 |
-
def __getitem__(self, index):
|
376 |
-
example = {}
|
377 |
-
example["prompt"] = self.prompt
|
378 |
-
example["index"] = index
|
379 |
-
return example
|
380 |
-
|
381 |
-
|
382 |
-
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
|
383 |
-
if token is None:
|
384 |
-
token = HfFolder.get_token()
|
385 |
-
if organization is None:
|
386 |
-
username = whoami(token)["name"]
|
387 |
-
return f"{username}/{model_id}"
|
388 |
-
else:
|
389 |
-
return f"{organization}/{model_id}"
|
390 |
-
|
391 |
-
def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
|
392 |
-
"""
|
393 |
-
Starts from base starting dict and then adds the remaining key values from updater replacing the values from
|
394 |
-
the first starting/base dict with the second updater dict.
|
395 |
-
|
396 |
-
For later: how does d = {**d1, **d2} replace collision?
|
397 |
-
|
398 |
-
:param starting_dict:
|
399 |
-
:param updater_dict:
|
400 |
-
:return:
|
401 |
-
"""
|
402 |
-
new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
|
403 |
-
new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
|
404 |
-
return new_dict
|
405 |
-
|
406 |
-
def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
|
407 |
-
"""
|
408 |
-
|
409 |
-
ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
|
410 |
-
:param args1:
|
411 |
-
:param args2:
|
412 |
-
:return:
|
413 |
-
"""
|
414 |
-
# - the merged args
|
415 |
-
# The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
|
416 |
-
merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
|
417 |
-
args = argparse.Namespace(**merged_key_values_for_namespace)
|
418 |
-
return args
|
419 |
-
|
420 |
-
def run_training(args_imported):
|
421 |
-
args_default = parse_args()
|
422 |
-
args = merge_args(args_default, args_imported)
|
423 |
-
print(args)
|
424 |
-
logging_dir = Path(args.output_dir, args.logging_dir)
|
425 |
-
i=args.save_starting_step
|
426 |
-
accelerator = Accelerator(
|
427 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
428 |
-
mixed_precision=args.mixed_precision,
|
429 |
-
log_with="tensorboard",
|
430 |
-
logging_dir=logging_dir,
|
431 |
-
)
|
432 |
-
|
433 |
-
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
434 |
-
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
435 |
-
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
436 |
-
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
|
437 |
-
raise ValueError(
|
438 |
-
"Gradient accumulation is not supported when training the text encoder in distributed training. "
|
439 |
-
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
|
440 |
-
)
|
441 |
-
|
442 |
-
if args.seed is not None:
|
443 |
-
set_seed(args.seed)
|
444 |
-
|
445 |
-
if args.with_prior_preservation:
|
446 |
-
class_images_dir = Path(args.class_data_dir)
|
447 |
-
if not class_images_dir.exists():
|
448 |
-
class_images_dir.mkdir(parents=True)
|
449 |
-
cur_class_images = len(list(class_images_dir.iterdir()))
|
450 |
-
|
451 |
-
if cur_class_images < args.num_class_images:
|
452 |
-
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
453 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
454 |
-
args.pretrained_model_name_or_path, torch_dtype=torch_dtype
|
455 |
-
)
|
456 |
-
pipeline.set_progress_bar_config(disable=True)
|
457 |
-
|
458 |
-
num_new_images = args.num_class_images - cur_class_images
|
459 |
-
logger.info(f"Number of class images to sample: {num_new_images}.")
|
460 |
-
|
461 |
-
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
|
462 |
-
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
|
463 |
-
|
464 |
-
sample_dataloader = accelerator.prepare(sample_dataloader)
|
465 |
-
pipeline.to(accelerator.device)
|
466 |
-
|
467 |
-
for example in tqdm(
|
468 |
-
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
469 |
-
):
|
470 |
-
with torch.autocast("cuda"):
|
471 |
-
images = pipeline(example["prompt"]).images
|
472 |
-
|
473 |
-
for i, image in enumerate(images):
|
474 |
-
image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
|
475 |
-
|
476 |
-
del pipeline
|
477 |
-
if torch.cuda.is_available():
|
478 |
-
torch.cuda.empty_cache()
|
479 |
-
|
480 |
-
# Handle the repository creation
|
481 |
-
if accelerator.is_main_process:
|
482 |
-
if args.push_to_hub:
|
483 |
-
if args.hub_model_id is None:
|
484 |
-
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
485 |
-
else:
|
486 |
-
repo_name = args.hub_model_id
|
487 |
-
repo = Repository(args.output_dir, clone_from=repo_name)
|
488 |
-
|
489 |
-
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
490 |
-
if "step_*" not in gitignore:
|
491 |
-
gitignore.write("step_*\n")
|
492 |
-
if "epoch_*" not in gitignore:
|
493 |
-
gitignore.write("epoch_*\n")
|
494 |
-
elif args.output_dir is not None:
|
495 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
496 |
-
|
497 |
-
# Load the tokenizer
|
498 |
-
if args.tokenizer_name:
|
499 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
|
500 |
-
elif args.pretrained_model_name_or_path:
|
501 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
|
502 |
-
|
503 |
-
# Load models and create wrapper for stable diffusion
|
504 |
-
if args.train_only_unet:
|
505 |
-
if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
|
506 |
-
text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
|
507 |
-
elif os.path.exists(str(args.output_dir+"/text_encoder")):
|
508 |
-
text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
|
509 |
-
else:
|
510 |
-
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
|
511 |
-
else:
|
512 |
-
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
|
513 |
-
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
|
514 |
-
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
|
515 |
-
|
516 |
-
vae.requires_grad_(False)
|
517 |
-
if not args.train_text_encoder:
|
518 |
-
text_encoder.requires_grad_(False)
|
519 |
-
|
520 |
-
if args.gradient_checkpointing:
|
521 |
-
unet.enable_gradient_checkpointing()
|
522 |
-
if args.train_text_encoder:
|
523 |
-
text_encoder.gradient_checkpointing_enable()
|
524 |
-
|
525 |
-
if args.scale_lr:
|
526 |
-
args.learning_rate = (
|
527 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
528 |
-
)
|
529 |
-
|
530 |
-
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
531 |
-
if args.use_8bit_adam:
|
532 |
-
try:
|
533 |
-
import bitsandbytes as bnb
|
534 |
-
except ImportError:
|
535 |
-
raise ImportError(
|
536 |
-
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
537 |
-
)
|
538 |
-
|
539 |
-
optimizer_class = bnb.optim.AdamW8bit
|
540 |
-
else:
|
541 |
-
optimizer_class = torch.optim.AdamW
|
542 |
-
|
543 |
-
params_to_optimize = (
|
544 |
-
itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
|
545 |
-
)
|
546 |
-
optimizer = optimizer_class(
|
547 |
-
params_to_optimize,
|
548 |
-
lr=args.learning_rate,
|
549 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
550 |
-
weight_decay=args.adam_weight_decay,
|
551 |
-
eps=args.adam_epsilon,
|
552 |
-
)
|
553 |
-
|
554 |
-
noise_scheduler = DDPMScheduler(
|
555 |
-
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
|
556 |
-
)
|
557 |
-
|
558 |
-
train_dataset = DreamBoothDataset(
|
559 |
-
instance_data_root=args.instance_data_dir,
|
560 |
-
instance_prompt=args.instance_prompt,
|
561 |
-
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
562 |
-
class_prompt=args.class_prompt,
|
563 |
-
tokenizer=tokenizer,
|
564 |
-
size=args.resolution,
|
565 |
-
center_crop=args.center_crop,
|
566 |
-
args=args,
|
567 |
-
)
|
568 |
-
|
569 |
-
def collate_fn(examples):
|
570 |
-
input_ids = [example["instance_prompt_ids"] for example in examples]
|
571 |
-
pixel_values = [example["instance_images"] for example in examples]
|
572 |
-
|
573 |
-
# Concat class and instance examples for prior preservation.
|
574 |
-
# We do this to avoid doing two forward passes.
|
575 |
-
if args.with_prior_preservation:
|
576 |
-
input_ids += [example["class_prompt_ids"] for example in examples]
|
577 |
-
pixel_values += [example["class_images"] for example in examples]
|
578 |
-
|
579 |
-
pixel_values = torch.stack(pixel_values)
|
580 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
581 |
-
|
582 |
-
input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
|
583 |
-
|
584 |
-
batch = {
|
585 |
-
"input_ids": input_ids,
|
586 |
-
"pixel_values": pixel_values,
|
587 |
-
}
|
588 |
-
return batch
|
589 |
-
|
590 |
-
train_dataloader = torch.utils.data.DataLoader(
|
591 |
-
train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
|
592 |
-
)
|
593 |
-
|
594 |
-
# Scheduler and math around the number of training steps.
|
595 |
-
overrode_max_train_steps = False
|
596 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
597 |
-
if args.max_train_steps is None:
|
598 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
599 |
-
overrode_max_train_steps = True
|
600 |
-
|
601 |
-
lr_scheduler = get_scheduler(
|
602 |
-
args.lr_scheduler,
|
603 |
-
optimizer=optimizer,
|
604 |
-
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
|
605 |
-
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
606 |
-
)
|
607 |
-
|
608 |
-
if args.train_text_encoder:
|
609 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
610 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
|
611 |
-
)
|
612 |
-
else:
|
613 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
614 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
615 |
-
)
|
616 |
-
|
617 |
-
weight_dtype = torch.float32
|
618 |
-
if args.mixed_precision == "fp16":
|
619 |
-
weight_dtype = torch.float16
|
620 |
-
elif args.mixed_precision == "bf16":
|
621 |
-
weight_dtype = torch.bfloat16
|
622 |
-
|
623 |
-
# Move text_encode and vae to gpu.
|
624 |
-
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
625 |
-
# as these models are only used for inference, keeping weights in full precision is not required.
|
626 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
627 |
-
if not args.train_text_encoder:
|
628 |
-
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
629 |
-
|
630 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
631 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
632 |
-
if overrode_max_train_steps:
|
633 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
634 |
-
# Afterwards we recalculate our number of training epochs
|
635 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
636 |
-
|
637 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
638 |
-
# The trackers initializes automatically on the main process.
|
639 |
-
if accelerator.is_main_process:
|
640 |
-
accelerator.init_trackers("dreambooth", config=vars(args))
|
641 |
-
|
642 |
-
def bar(prg):
|
643 |
-
br='|'+'█' * prg + ' ' * (25-prg)+'|'
|
644 |
-
return br
|
645 |
-
|
646 |
-
# Train!
|
647 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
648 |
-
|
649 |
-
logger.info("***** Running training *****")
|
650 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
651 |
-
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
652 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
653 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
654 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
655 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
656 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
657 |
-
# Only show the progress bar once on each machine.
|
658 |
-
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
659 |
-
global_step = 0
|
660 |
-
|
661 |
-
for epoch in range(args.num_train_epochs):
|
662 |
-
unet.train()
|
663 |
-
if args.train_text_encoder:
|
664 |
-
text_encoder.train()
|
665 |
-
for step, batch in enumerate(train_dataloader):
|
666 |
-
with accelerator.accumulate(unet):
|
667 |
-
# Convert images to latent space
|
668 |
-
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
|
669 |
-
latents = latents * 0.18215
|
670 |
-
|
671 |
-
# Sample noise that we'll add to the latents
|
672 |
-
noise = torch.randn_like(latents)
|
673 |
-
bsz = latents.shape[0]
|
674 |
-
# Sample a random timestep for each image
|
675 |
-
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
|
676 |
-
timesteps = timesteps.long()
|
677 |
-
|
678 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
679 |
-
# (this is the forward diffusion process)
|
680 |
-
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
681 |
-
|
682 |
-
# Get the text embedding for conditioning
|
683 |
-
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
|
684 |
-
|
685 |
-
# Predict the noise residual
|
686 |
-
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
|
687 |
-
|
688 |
-
if args.with_prior_preservation:
|
689 |
-
# Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
|
690 |
-
noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
|
691 |
-
noise, noise_prior = torch.chunk(noise, 2, dim=0)
|
692 |
-
|
693 |
-
# Compute instance loss
|
694 |
-
loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="none").mean([1, 2, 3]).mean()
|
695 |
-
|
696 |
-
# Compute prior loss
|
697 |
-
prior_loss = F.mse_loss(noise_pred_prior.float(), noise_prior.float(), reduction="mean")
|
698 |
-
|
699 |
-
# Add the prior loss to the instance loss.
|
700 |
-
loss = loss + args.prior_loss_weight * prior_loss
|
701 |
-
else:
|
702 |
-
loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="mean")
|
703 |
-
|
704 |
-
accelerator.backward(loss)
|
705 |
-
if accelerator.sync_gradients:
|
706 |
-
params_to_clip = (
|
707 |
-
itertools.chain(unet.parameters(), text_encoder.parameters())
|
708 |
-
if args.train_text_encoder
|
709 |
-
else unet.parameters()
|
710 |
-
)
|
711 |
-
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
712 |
-
optimizer.step()
|
713 |
-
lr_scheduler.step()
|
714 |
-
optimizer.zero_grad()
|
715 |
-
|
716 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
717 |
-
if accelerator.sync_gradients:
|
718 |
-
progress_bar.update(1)
|
719 |
-
global_step += 1
|
720 |
-
|
721 |
-
fll=round((global_step*100)/args.max_train_steps)
|
722 |
-
fll=round(fll/4)
|
723 |
-
pr=bar(fll)
|
724 |
-
|
725 |
-
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
726 |
-
progress_bar.set_postfix(**logs)
|
727 |
-
progress_bar.set_description_str("Progress:"+pr)
|
728 |
-
accelerator.log(logs, step=global_step)
|
729 |
-
|
730 |
-
if global_step >= args.max_train_steps:
|
731 |
-
break
|
732 |
-
|
733 |
-
if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
|
734 |
-
if accelerator.is_main_process:
|
735 |
-
print(" [0;32m" +" Freezing the text_encoder ..."+" [0m")
|
736 |
-
frz_dir=args.output_dir + "/text_encoder_frozen"
|
737 |
-
if os.path.exists(frz_dir):
|
738 |
-
subprocess.call('rm -r '+ frz_dir, shell=True)
|
739 |
-
os.mkdir(frz_dir)
|
740 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
741 |
-
args.pretrained_model_name_or_path,
|
742 |
-
unet=accelerator.unwrap_model(unet),
|
743 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
744 |
-
)
|
745 |
-
pipeline.text_encoder.save_pretrained(frz_dir)
|
746 |
-
|
747 |
-
if args.save_n_steps >= 200:
|
748 |
-
if global_step < args.max_train_steps-100 and global_step+1==i:
|
749 |
-
ckpt_name = "_step_" + str(global_step+1)
|
750 |
-
save_dir = Path(args.output_dir+ckpt_name)
|
751 |
-
save_dir=str(save_dir)
|
752 |
-
save_dir=save_dir.replace(" ", "_")
|
753 |
-
if not os.path.exists(save_dir):
|
754 |
-
os.mkdir(save_dir)
|
755 |
-
inst=save_dir[16:]
|
756 |
-
inst=inst.replace(" ", "_")
|
757 |
-
print(" [1;32mSAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
|
758 |
-
# Create the pipeline using the trained modules and save it.
|
759 |
-
if accelerator.is_main_process:
|
760 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
761 |
-
args.pretrained_model_name_or_path,
|
762 |
-
unet=accelerator.unwrap_model(unet),
|
763 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
764 |
-
)
|
765 |
-
pipeline.save_pretrained(save_dir)
|
766 |
-
frz_dir=args.output_dir + "/text_encoder_frozen"
|
767 |
-
if args.train_text_encoder and os.path.exists(frz_dir):
|
768 |
-
subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
|
769 |
-
subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
|
770 |
-
chkpth=args.Session_dir+"/"+inst+".ckpt"
|
771 |
-
subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
|
772 |
-
i=i+args.save_n_steps
|
773 |
-
|
774 |
-
accelerator.wait_for_everyone()
|
775 |
-
|
776 |
-
# Create the pipeline using using the trained modules and save it.
|
777 |
-
if accelerator.is_main_process:
|
778 |
-
if args.dump_only_text_encoder:
|
779 |
-
txt_dir=args.output_dir + "/text_encoder_trained"
|
780 |
-
if not os.path.exists(txt_dir):
|
781 |
-
os.mkdir(txt_dir)
|
782 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
783 |
-
args.pretrained_model_name_or_path,
|
784 |
-
unet=accelerator.unwrap_model(unet),
|
785 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
786 |
-
)
|
787 |
-
pipeline.text_encoder.save_pretrained(txt_dir)
|
788 |
-
|
789 |
-
elif args.train_only_unet:
|
790 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
791 |
-
args.pretrained_model_name_or_path,
|
792 |
-
unet=accelerator.unwrap_model(unet),
|
793 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
794 |
-
)
|
795 |
-
pipeline.save_pretrained(args.output_dir)
|
796 |
-
txt_dir=args.output_dir + "/text_encoder_trained"
|
797 |
-
subprocess.call('rm -r '+txt_dir, shell=True)
|
798 |
-
|
799 |
-
else:
|
800 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
801 |
-
args.pretrained_model_name_or_path,
|
802 |
-
unet=accelerator.unwrap_model(unet),
|
803 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
804 |
-
)
|
805 |
-
frz_dir=args.output_dir + "/text_encoder_frozen"
|
806 |
-
pipeline.save_pretrained(args.output_dir)
|
807 |
-
if args.train_text_encoder and os.path.exists(frz_dir):
|
808 |
-
subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
|
809 |
-
subprocess.call('rm -r '+ frz_dir, shell=True)
|
810 |
-
|
811 |
-
if args.push_to_hub:
|
812 |
-
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
813 |
-
|
814 |
-
accelerator.end_training()
|
815 |
-
|
816 |
-
if __name__ == "__main__":
|
817 |
-
pass
|
818 |
-
#main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/metrics.py
DELETED
@@ -1,465 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Model validation metrics
|
4 |
-
"""
|
5 |
-
|
6 |
-
import math
|
7 |
-
import warnings
|
8 |
-
from pathlib import Path
|
9 |
-
|
10 |
-
import matplotlib.pyplot as plt
|
11 |
-
import numpy as np
|
12 |
-
import torch
|
13 |
-
|
14 |
-
from utils import TryExcept, threaded
|
15 |
-
|
16 |
-
|
17 |
-
def fitness(x):
|
18 |
-
# Model fitness as a weighted combination of metrics
|
19 |
-
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
|
20 |
-
return (x[:, :4] * w).sum(1)
|
21 |
-
|
22 |
-
|
23 |
-
def smooth(y, f=0.05):
|
24 |
-
# Box filter of fraction f
|
25 |
-
nf = (
|
26 |
-
round(len(y) * f * 2) // 2 + 1
|
27 |
-
) # number of filter elements (must be odd)
|
28 |
-
p = np.ones(nf // 2) # ones padding
|
29 |
-
yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
|
30 |
-
return np.convolve(yp, np.ones(nf) / nf, mode="valid") # y-smoothed
|
31 |
-
|
32 |
-
|
33 |
-
def ap_per_class(
|
34 |
-
tp,
|
35 |
-
conf,
|
36 |
-
pred_cls,
|
37 |
-
target_cls,
|
38 |
-
plot=False,
|
39 |
-
save_dir=".",
|
40 |
-
names=(),
|
41 |
-
eps=1e-16,
|
42 |
-
prefix="",
|
43 |
-
):
|
44 |
-
"""Compute the average precision, given the recall and precision curves.
|
45 |
-
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
|
46 |
-
# Arguments
|
47 |
-
tp: True positives (nparray, nx1 or nx10).
|
48 |
-
conf: Objectness value from 0-1 (nparray).
|
49 |
-
pred_cls: Predicted object classes (nparray).
|
50 |
-
target_cls: True object classes (nparray).
|
51 |
-
plot: Plot precision-recall curve at [email protected]
|
52 |
-
save_dir: Plot save directory
|
53 |
-
# Returns
|
54 |
-
The average precision as computed in py-faster-rcnn.
|
55 |
-
"""
|
56 |
-
|
57 |
-
# Sort by objectness
|
58 |
-
i = np.argsort(-conf)
|
59 |
-
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
|
60 |
-
|
61 |
-
# Find unique classes
|
62 |
-
unique_classes, nt = np.unique(target_cls, return_counts=True)
|
63 |
-
nc = unique_classes.shape[0] # number of classes, number of detections
|
64 |
-
|
65 |
-
# Create Precision-Recall curve and compute AP for each class
|
66 |
-
px, py = np.linspace(0, 1, 1000), [] # for plotting
|
67 |
-
ap, p, r = (
|
68 |
-
np.zeros((nc, tp.shape[1])),
|
69 |
-
np.zeros((nc, 1000)),
|
70 |
-
np.zeros((nc, 1000)),
|
71 |
-
)
|
72 |
-
for ci, c in enumerate(unique_classes):
|
73 |
-
i = pred_cls == c
|
74 |
-
n_l = nt[ci] # number of labels
|
75 |
-
n_p = i.sum() # number of predictions
|
76 |
-
if n_p == 0 or n_l == 0:
|
77 |
-
continue
|
78 |
-
|
79 |
-
# Accumulate FPs and TPs
|
80 |
-
fpc = (1 - tp[i]).cumsum(0)
|
81 |
-
tpc = tp[i].cumsum(0)
|
82 |
-
|
83 |
-
# Recall
|
84 |
-
recall = tpc / (n_l + eps) # recall curve
|
85 |
-
r[ci] = np.interp(
|
86 |
-
-px, -conf[i], recall[:, 0], left=0
|
87 |
-
) # negative x, xp because xp decreases
|
88 |
-
|
89 |
-
# Precision
|
90 |
-
precision = tpc / (tpc + fpc) # precision curve
|
91 |
-
p[ci] = np.interp(
|
92 |
-
-px, -conf[i], precision[:, 0], left=1
|
93 |
-
) # p at pr_score
|
94 |
-
|
95 |
-
# AP from recall-precision curve
|
96 |
-
for j in range(tp.shape[1]):
|
97 |
-
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
|
98 |
-
if plot and j == 0:
|
99 |
-
py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
|
100 |
-
|
101 |
-
# Compute F1 (harmonic mean of precision and recall)
|
102 |
-
f1 = 2 * p * r / (p + r + eps)
|
103 |
-
names = [
|
104 |
-
v for k, v in names.items() if k in unique_classes
|
105 |
-
] # list: only classes that have data
|
106 |
-
names = dict(enumerate(names)) # to dict
|
107 |
-
if plot:
|
108 |
-
plot_pr_curve(
|
109 |
-
px, py, ap, Path(save_dir) / f"{prefix}PR_curve.png", names
|
110 |
-
)
|
111 |
-
plot_mc_curve(
|
112 |
-
px,
|
113 |
-
f1,
|
114 |
-
Path(save_dir) / f"{prefix}F1_curve.png",
|
115 |
-
names,
|
116 |
-
ylabel="F1",
|
117 |
-
)
|
118 |
-
plot_mc_curve(
|
119 |
-
px,
|
120 |
-
p,
|
121 |
-
Path(save_dir) / f"{prefix}P_curve.png",
|
122 |
-
names,
|
123 |
-
ylabel="Precision",
|
124 |
-
)
|
125 |
-
plot_mc_curve(
|
126 |
-
px,
|
127 |
-
r,
|
128 |
-
Path(save_dir) / f"{prefix}R_curve.png",
|
129 |
-
names,
|
130 |
-
ylabel="Recall",
|
131 |
-
)
|
132 |
-
|
133 |
-
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
|
134 |
-
p, r, f1 = p[:, i], r[:, i], f1[:, i]
|
135 |
-
tp = (r * nt).round() # true positives
|
136 |
-
fp = (tp / (p + eps) - tp).round() # false positives
|
137 |
-
return tp, fp, p, r, f1, ap, unique_classes.astype(int)
|
138 |
-
|
139 |
-
|
140 |
-
def compute_ap(recall, precision):
|
141 |
-
"""Compute the average precision, given the recall and precision curves
|
142 |
-
# Arguments
|
143 |
-
recall: The recall curve (list)
|
144 |
-
precision: The precision curve (list)
|
145 |
-
# Returns
|
146 |
-
Average precision, precision curve, recall curve
|
147 |
-
"""
|
148 |
-
|
149 |
-
# Append sentinel values to beginning and end
|
150 |
-
mrec = np.concatenate(([0.0], recall, [1.0]))
|
151 |
-
mpre = np.concatenate(([1.0], precision, [0.0]))
|
152 |
-
|
153 |
-
# Compute the precision envelope
|
154 |
-
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
|
155 |
-
|
156 |
-
# Integrate area under curve
|
157 |
-
method = "interp" # methods: 'continuous', 'interp'
|
158 |
-
if method == "interp":
|
159 |
-
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
|
160 |
-
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
|
161 |
-
else: # 'continuous'
|
162 |
-
i = np.where(mrec[1:] != mrec[:-1])[
|
163 |
-
0
|
164 |
-
] # points where x axis (recall) changes
|
165 |
-
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
|
166 |
-
|
167 |
-
return ap, mpre, mrec
|
168 |
-
|
169 |
-
|
170 |
-
class ConfusionMatrix:
|
171 |
-
# Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
|
172 |
-
def __init__(self, nc, conf=0.25, iou_thres=0.45):
|
173 |
-
self.matrix = np.zeros((nc + 1, nc + 1))
|
174 |
-
self.nc = nc # number of classes
|
175 |
-
self.conf = conf
|
176 |
-
self.iou_thres = iou_thres
|
177 |
-
|
178 |
-
def process_batch(self, detections, labels):
|
179 |
-
"""
|
180 |
-
Return intersection-over-union (Jaccard index) of boxes.
|
181 |
-
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
182 |
-
Arguments:
|
183 |
-
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
184 |
-
labels (Array[M, 5]), class, x1, y1, x2, y2
|
185 |
-
Returns:
|
186 |
-
None, updates confusion matrix accordingly
|
187 |
-
"""
|
188 |
-
if detections is None:
|
189 |
-
gt_classes = labels.int()
|
190 |
-
for gc in gt_classes:
|
191 |
-
self.matrix[self.nc, gc] += 1 # background FN
|
192 |
-
return
|
193 |
-
|
194 |
-
detections = detections[detections[:, 4] > self.conf]
|
195 |
-
gt_classes = labels[:, 0].int()
|
196 |
-
detection_classes = detections[:, 5].int()
|
197 |
-
iou = box_iou(labels[:, 1:], detections[:, :4])
|
198 |
-
|
199 |
-
x = torch.where(iou > self.iou_thres)
|
200 |
-
if x[0].shape[0]:
|
201 |
-
matches = (
|
202 |
-
torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1)
|
203 |
-
.cpu()
|
204 |
-
.numpy()
|
205 |
-
)
|
206 |
-
if x[0].shape[0] > 1:
|
207 |
-
matches = matches[matches[:, 2].argsort()[::-1]]
|
208 |
-
matches = matches[
|
209 |
-
np.unique(matches[:, 1], return_index=True)[1]
|
210 |
-
]
|
211 |
-
matches = matches[matches[:, 2].argsort()[::-1]]
|
212 |
-
matches = matches[
|
213 |
-
np.unique(matches[:, 0], return_index=True)[1]
|
214 |
-
]
|
215 |
-
else:
|
216 |
-
matches = np.zeros((0, 3))
|
217 |
-
|
218 |
-
n = matches.shape[0] > 0
|
219 |
-
m0, m1, _ = matches.transpose().astype(int)
|
220 |
-
for i, gc in enumerate(gt_classes):
|
221 |
-
j = m0 == i
|
222 |
-
if n and sum(j) == 1:
|
223 |
-
self.matrix[detection_classes[m1[j]], gc] += 1 # correct
|
224 |
-
else:
|
225 |
-
self.matrix[self.nc, gc] += 1 # true background
|
226 |
-
|
227 |
-
if n:
|
228 |
-
for i, dc in enumerate(detection_classes):
|
229 |
-
if not any(m1 == i):
|
230 |
-
self.matrix[dc, self.nc] += 1 # predicted background
|
231 |
-
|
232 |
-
def tp_fp(self):
|
233 |
-
tp = self.matrix.diagonal() # true positives
|
234 |
-
fp = self.matrix.sum(1) - tp # false positives
|
235 |
-
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
|
236 |
-
return tp[:-1], fp[:-1] # remove background class
|
237 |
-
|
238 |
-
@TryExcept("WARNING ⚠️ ConfusionMatrix plot failure")
|
239 |
-
def plot(self, normalize=True, save_dir="", names=()):
|
240 |
-
import seaborn as sn
|
241 |
-
|
242 |
-
array = self.matrix / (
|
243 |
-
(self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1
|
244 |
-
) # normalize columns
|
245 |
-
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
|
246 |
-
|
247 |
-
fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)
|
248 |
-
nc, nn = self.nc, len(names) # number of classes, names
|
249 |
-
sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size
|
250 |
-
labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels
|
251 |
-
ticklabels = (names + ["background"]) if labels else "auto"
|
252 |
-
with warnings.catch_warnings():
|
253 |
-
warnings.simplefilter(
|
254 |
-
"ignore"
|
255 |
-
) # suppress empty matrix RuntimeWarning: All-NaN slice encountered
|
256 |
-
sn.heatmap(
|
257 |
-
array,
|
258 |
-
ax=ax,
|
259 |
-
annot=nc < 30,
|
260 |
-
annot_kws={"size": 8},
|
261 |
-
cmap="Blues",
|
262 |
-
fmt=".2f",
|
263 |
-
square=True,
|
264 |
-
vmin=0.0,
|
265 |
-
xticklabels=ticklabels,
|
266 |
-
yticklabels=ticklabels,
|
267 |
-
).set_facecolor((1, 1, 1))
|
268 |
-
ax.set_ylabel("True")
|
269 |
-
ax.set_ylabel("Predicted")
|
270 |
-
ax.set_title("Confusion Matrix")
|
271 |
-
fig.savefig(Path(save_dir) / "confusion_matrix.png", dpi=250)
|
272 |
-
plt.close(fig)
|
273 |
-
|
274 |
-
def print(self):
|
275 |
-
for i in range(self.nc + 1):
|
276 |
-
print(" ".join(map(str, self.matrix[i])))
|
277 |
-
|
278 |
-
|
279 |
-
def bbox_iou(
|
280 |
-
box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7
|
281 |
-
):
|
282 |
-
# Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)
|
283 |
-
|
284 |
-
# Get the coordinates of bounding boxes
|
285 |
-
if xywh: # transform from xywh to xyxy
|
286 |
-
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(
|
287 |
-
4, -1
|
288 |
-
)
|
289 |
-
w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
|
290 |
-
b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
|
291 |
-
b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
|
292 |
-
else: # x1, y1, x2, y2 = box1
|
293 |
-
b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
|
294 |
-
b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
|
295 |
-
w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps)
|
296 |
-
w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps)
|
297 |
-
|
298 |
-
# Intersection area
|
299 |
-
inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * (
|
300 |
-
b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)
|
301 |
-
).clamp(0)
|
302 |
-
|
303 |
-
# Union Area
|
304 |
-
union = w1 * h1 + w2 * h2 - inter + eps
|
305 |
-
|
306 |
-
# IoU
|
307 |
-
iou = inter / union
|
308 |
-
if CIoU or DIoU or GIoU:
|
309 |
-
cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(
|
310 |
-
b2_x1
|
311 |
-
) # convex (smallest enclosing box) width
|
312 |
-
ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height
|
313 |
-
if (
|
314 |
-
CIoU or DIoU
|
315 |
-
): # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
|
316 |
-
c2 = cw**2 + ch**2 + eps # convex diagonal squared
|
317 |
-
rho2 = (
|
318 |
-
(b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2
|
319 |
-
+ (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2
|
320 |
-
) / 4 # center dist ** 2
|
321 |
-
if (
|
322 |
-
CIoU
|
323 |
-
): # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
|
324 |
-
v = (4 / math.pi**2) * (
|
325 |
-
torch.atan(w2 / h2) - torch.atan(w1 / h1)
|
326 |
-
).pow(2)
|
327 |
-
with torch.no_grad():
|
328 |
-
alpha = v / (v - iou + (1 + eps))
|
329 |
-
return iou - (rho2 / c2 + v * alpha) # CIoU
|
330 |
-
return iou - rho2 / c2 # DIoU
|
331 |
-
c_area = cw * ch + eps # convex area
|
332 |
-
return (
|
333 |
-
iou - (c_area - union) / c_area
|
334 |
-
) # GIoU https://arxiv.org/pdf/1902.09630.pdf
|
335 |
-
return iou # IoU
|
336 |
-
|
337 |
-
|
338 |
-
def box_iou(box1, box2, eps=1e-7):
|
339 |
-
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
340 |
-
"""
|
341 |
-
Return intersection-over-union (Jaccard index) of boxes.
|
342 |
-
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
343 |
-
Arguments:
|
344 |
-
box1 (Tensor[N, 4])
|
345 |
-
box2 (Tensor[M, 4])
|
346 |
-
Returns:
|
347 |
-
iou (Tensor[N, M]): the NxM matrix containing the pairwise
|
348 |
-
IoU values for every element in boxes1 and boxes2
|
349 |
-
"""
|
350 |
-
|
351 |
-
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
|
352 |
-
(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(
|
353 |
-
0
|
354 |
-
).chunk(2, 2)
|
355 |
-
inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)
|
356 |
-
|
357 |
-
# IoU = inter / (area1 + area2 - inter)
|
358 |
-
return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)
|
359 |
-
|
360 |
-
|
361 |
-
def bbox_ioa(box1, box2, eps=1e-7):
|
362 |
-
"""Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2
|
363 |
-
box1: np.array of shape(4)
|
364 |
-
box2: np.array of shape(nx4)
|
365 |
-
returns: np.array of shape(n)
|
366 |
-
"""
|
367 |
-
|
368 |
-
# Get the coordinates of bounding boxes
|
369 |
-
b1_x1, b1_y1, b1_x2, b1_y2 = box1
|
370 |
-
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
|
371 |
-
|
372 |
-
# Intersection area
|
373 |
-
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(
|
374 |
-
0
|
375 |
-
) * (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
|
376 |
-
|
377 |
-
# box2 area
|
378 |
-
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
|
379 |
-
|
380 |
-
# Intersection over box2 area
|
381 |
-
return inter_area / box2_area
|
382 |
-
|
383 |
-
|
384 |
-
def wh_iou(wh1, wh2, eps=1e-7):
|
385 |
-
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
|
386 |
-
wh1 = wh1[:, None] # [N,1,2]
|
387 |
-
wh2 = wh2[None] # [1,M,2]
|
388 |
-
inter = torch.min(wh1, wh2).prod(2) # [N,M]
|
389 |
-
return inter / (
|
390 |
-
wh1.prod(2) + wh2.prod(2) - inter + eps
|
391 |
-
) # iou = inter / (area1 + area2 - inter)
|
392 |
-
|
393 |
-
|
394 |
-
# Plots ----------------------------------------------------------------------------------------------------------------
|
395 |
-
|
396 |
-
|
397 |
-
@threaded
|
398 |
-
def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()):
|
399 |
-
# Precision-recall curve
|
400 |
-
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
|
401 |
-
py = np.stack(py, axis=1)
|
402 |
-
|
403 |
-
if 0 < len(names) < 21: # display per-class legend if < 21 classes
|
404 |
-
for i, y in enumerate(py.T):
|
405 |
-
ax.plot(
|
406 |
-
px, y, linewidth=1, label=f"{names[i]} {ap[i, 0]:.3f}"
|
407 |
-
) # plot(recall, precision)
|
408 |
-
else:
|
409 |
-
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
|
410 |
-
|
411 |
-
ax.plot(
|
412 |
-
px,
|
413 |
-
py.mean(1),
|
414 |
-
linewidth=3,
|
415 |
-
color="blue",
|
416 |
-
label="all classes %.3f [email protected]" % ap[:, 0].mean(),
|
417 |
-
)
|
418 |
-
ax.set_xlabel("Recall")
|
419 |
-
ax.set_ylabel("Precision")
|
420 |
-
ax.set_xlim(0, 1)
|
421 |
-
ax.set_ylim(0, 1)
|
422 |
-
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
|
423 |
-
ax.set_title("Precision-Recall Curve")
|
424 |
-
fig.savefig(save_dir, dpi=250)
|
425 |
-
plt.close(fig)
|
426 |
-
|
427 |
-
|
428 |
-
@threaded
|
429 |
-
def plot_mc_curve(
|
430 |
-
px,
|
431 |
-
py,
|
432 |
-
save_dir=Path("mc_curve.png"),
|
433 |
-
names=(),
|
434 |
-
xlabel="Confidence",
|
435 |
-
ylabel="Metric",
|
436 |
-
):
|
437 |
-
# Metric-confidence curve
|
438 |
-
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
|
439 |
-
|
440 |
-
if 0 < len(names) < 21: # display per-class legend if < 21 classes
|
441 |
-
for i, y in enumerate(py):
|
442 |
-
ax.plot(
|
443 |
-
px, y, linewidth=1, label=f"{names[i]}"
|
444 |
-
) # plot(confidence, metric)
|
445 |
-
else:
|
446 |
-
ax.plot(
|
447 |
-
px, py.T, linewidth=1, color="grey"
|
448 |
-
) # plot(confidence, metric)
|
449 |
-
|
450 |
-
y = smooth(py.mean(0), 0.05)
|
451 |
-
ax.plot(
|
452 |
-
px,
|
453 |
-
y,
|
454 |
-
linewidth=3,
|
455 |
-
color="blue",
|
456 |
-
label=f"all classes {y.max():.2f} at {px[y.argmax()]:.3f}",
|
457 |
-
)
|
458 |
-
ax.set_xlabel(xlabel)
|
459 |
-
ax.set_ylabel(ylabel)
|
460 |
-
ax.set_xlim(0, 1)
|
461 |
-
ax.set_ylim(0, 1)
|
462 |
-
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
|
463 |
-
ax.set_title(f"{ylabel}-Confidence Curve")
|
464 |
-
fig.savefig(save_dir, dpi=250)
|
465 |
-
plt.close(fig)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/openpose/util.py
DELETED
@@ -1,203 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import matplotlib
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
|
8 |
-
def padRightDownCorner(img, stride, padValue):
|
9 |
-
h = img.shape[0]
|
10 |
-
w = img.shape[1]
|
11 |
-
|
12 |
-
pad = 4 * [None]
|
13 |
-
pad[0] = 0 # up
|
14 |
-
pad[1] = 0 # left
|
15 |
-
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
|
16 |
-
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
|
17 |
-
|
18 |
-
img_padded = img
|
19 |
-
pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))
|
20 |
-
img_padded = np.concatenate((pad_up, img_padded), axis=0)
|
21 |
-
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))
|
22 |
-
img_padded = np.concatenate((pad_left, img_padded), axis=1)
|
23 |
-
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))
|
24 |
-
img_padded = np.concatenate((img_padded, pad_down), axis=0)
|
25 |
-
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))
|
26 |
-
img_padded = np.concatenate((img_padded, pad_right), axis=1)
|
27 |
-
|
28 |
-
return img_padded, pad
|
29 |
-
|
30 |
-
|
31 |
-
# transfer caffe model to pytorch which will match the layer name
|
32 |
-
def transfer(model, model_weights):
|
33 |
-
transfered_model_weights = {}
|
34 |
-
for weights_name in model.state_dict().keys():
|
35 |
-
transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
|
36 |
-
return transfered_model_weights
|
37 |
-
|
38 |
-
|
39 |
-
# draw the body keypoint and lims
|
40 |
-
def draw_bodypose(canvas, candidate, subset):
|
41 |
-
stickwidth = 4
|
42 |
-
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
|
43 |
-
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
|
44 |
-
[1, 16], [16, 18], [3, 17], [6, 18]]
|
45 |
-
|
46 |
-
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
|
47 |
-
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
|
48 |
-
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
|
49 |
-
for i in range(18):
|
50 |
-
for n in range(len(subset)):
|
51 |
-
index = int(subset[n][i])
|
52 |
-
if index == -1:
|
53 |
-
continue
|
54 |
-
x, y = candidate[index][0:2]
|
55 |
-
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
|
56 |
-
for i in range(17):
|
57 |
-
for n in range(len(subset)):
|
58 |
-
index = subset[n][np.array(limbSeq[i]) - 1]
|
59 |
-
if -1 in index:
|
60 |
-
continue
|
61 |
-
cur_canvas = canvas.copy()
|
62 |
-
Y = candidate[index.astype(int), 0]
|
63 |
-
X = candidate[index.astype(int), 1]
|
64 |
-
mX = np.mean(X)
|
65 |
-
mY = np.mean(Y)
|
66 |
-
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
|
67 |
-
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
|
68 |
-
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
69 |
-
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
|
70 |
-
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
|
71 |
-
# plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
|
72 |
-
# plt.imshow(canvas[:, :, [2, 1, 0]])
|
73 |
-
return canvas
|
74 |
-
|
75 |
-
|
76 |
-
# image drawed by opencv is not good.
|
77 |
-
def draw_handpose(canvas, all_hand_peaks, show_number=False):
|
78 |
-
edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
|
79 |
-
[10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
|
80 |
-
|
81 |
-
for peaks in all_hand_peaks:
|
82 |
-
for ie, e in enumerate(edges):
|
83 |
-
if np.sum(np.all(peaks[e], axis=1) == 0) == 0:
|
84 |
-
x1, y1 = peaks[e[0]]
|
85 |
-
x2, y2 = peaks[e[1]]
|
86 |
-
cv2.line(
|
87 |
-
canvas, (x1, y1), (x2, y2),
|
88 |
-
matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255,
|
89 |
-
thickness=2)
|
90 |
-
|
91 |
-
for i, keyponit in enumerate(peaks):
|
92 |
-
x, y = keyponit
|
93 |
-
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
|
94 |
-
if show_number:
|
95 |
-
cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
|
96 |
-
return canvas
|
97 |
-
|
98 |
-
|
99 |
-
# detect hand according to body pose keypoints
|
100 |
-
# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
|
101 |
-
def handDetect(candidate, subset, oriImg):
|
102 |
-
# right hand: wrist 4, elbow 3, shoulder 2
|
103 |
-
# left hand: wrist 7, elbow 6, shoulder 5
|
104 |
-
ratioWristElbow = 0.33
|
105 |
-
detect_result = []
|
106 |
-
image_height, image_width = oriImg.shape[0:2]
|
107 |
-
for person in subset.astype(int):
|
108 |
-
# if any of three not detected
|
109 |
-
has_left = np.sum(person[[5, 6, 7]] == -1) == 0
|
110 |
-
has_right = np.sum(person[[2, 3, 4]] == -1) == 0
|
111 |
-
if not (has_left or has_right):
|
112 |
-
continue
|
113 |
-
hands = []
|
114 |
-
#left hand
|
115 |
-
if has_left:
|
116 |
-
left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
|
117 |
-
x1, y1 = candidate[left_shoulder_index][:2]
|
118 |
-
x2, y2 = candidate[left_elbow_index][:2]
|
119 |
-
x3, y3 = candidate[left_wrist_index][:2]
|
120 |
-
hands.append([x1, y1, x2, y2, x3, y3, True])
|
121 |
-
# right hand
|
122 |
-
if has_right:
|
123 |
-
right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
|
124 |
-
x1, y1 = candidate[right_shoulder_index][:2]
|
125 |
-
x2, y2 = candidate[right_elbow_index][:2]
|
126 |
-
x3, y3 = candidate[right_wrist_index][:2]
|
127 |
-
hands.append([x1, y1, x2, y2, x3, y3, False])
|
128 |
-
|
129 |
-
for x1, y1, x2, y2, x3, y3, is_left in hands:
|
130 |
-
# pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
|
131 |
-
# handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
|
132 |
-
# handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
|
133 |
-
# const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
|
134 |
-
# const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
|
135 |
-
# handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
|
136 |
-
x = x3 + ratioWristElbow * (x3 - x2)
|
137 |
-
y = y3 + ratioWristElbow * (y3 - y2)
|
138 |
-
distanceWristElbow = math.sqrt((x3 - x2)**2 + (y3 - y2)**2)
|
139 |
-
distanceElbowShoulder = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
|
140 |
-
width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
|
141 |
-
# x-y refers to the center --> offset to topLeft point
|
142 |
-
# handRectangle.x -= handRectangle.width / 2.f;
|
143 |
-
# handRectangle.y -= handRectangle.height / 2.f;
|
144 |
-
x -= width / 2
|
145 |
-
y -= width / 2 # width = height
|
146 |
-
# overflow the image
|
147 |
-
if x < 0: x = 0
|
148 |
-
if y < 0: y = 0
|
149 |
-
width1 = width
|
150 |
-
width2 = width
|
151 |
-
if x + width > image_width: width1 = image_width - x
|
152 |
-
if y + width > image_height: width2 = image_height - y
|
153 |
-
width = min(width1, width2)
|
154 |
-
# the max hand box value is 20 pixels
|
155 |
-
if width >= 20:
|
156 |
-
detect_result.append([int(x), int(y), int(width), is_left])
|
157 |
-
'''
|
158 |
-
return value: [[x, y, w, True if left hand else False]].
|
159 |
-
width=height since the network require squared input.
|
160 |
-
x, y is the coordinate of top left
|
161 |
-
'''
|
162 |
-
return detect_result
|
163 |
-
|
164 |
-
|
165 |
-
# get max index of 2d array
|
166 |
-
def npmax(array):
|
167 |
-
arrayindex = array.argmax(1)
|
168 |
-
arrayvalue = array.max(1)
|
169 |
-
i = arrayvalue.argmax()
|
170 |
-
j = arrayindex[i]
|
171 |
-
return i, j
|
172 |
-
|
173 |
-
|
174 |
-
def HWC3(x):
|
175 |
-
assert x.dtype == np.uint8
|
176 |
-
if x.ndim == 2:
|
177 |
-
x = x[:, :, None]
|
178 |
-
assert x.ndim == 3
|
179 |
-
H, W, C = x.shape
|
180 |
-
assert C == 1 or C == 3 or C == 4
|
181 |
-
if C == 3:
|
182 |
-
return x
|
183 |
-
if C == 1:
|
184 |
-
return np.concatenate([x, x, x], axis=2)
|
185 |
-
if C == 4:
|
186 |
-
color = x[:, :, 0:3].astype(np.float32)
|
187 |
-
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
|
188 |
-
y = color * alpha + 255.0 * (1.0 - alpha)
|
189 |
-
y = y.clip(0, 255).astype(np.uint8)
|
190 |
-
return y
|
191 |
-
|
192 |
-
|
193 |
-
def resize_image(input_image, resolution):
|
194 |
-
H, W, C = input_image.shape
|
195 |
-
H = float(H)
|
196 |
-
W = float(W)
|
197 |
-
k = float(resolution) / min(H, W)
|
198 |
-
H *= k
|
199 |
-
W *= k
|
200 |
-
H = int(np.round(H / 64.0)) * 64
|
201 |
-
W = int(np.round(W / 64.0)) * 64
|
202 |
-
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
|
203 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/facebook/Facebook.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Base from '../base/Base';
|
2 |
-
export default class Facebook extends Base { }
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/hiddenedit/HiddenEdit.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import HiddenEdit from '../../../plugins/hiddeninputtext.js';
|
2 |
-
export default HiddenEdit;
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/training/modules/fake_fakes.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from kornia import SamplePadding
|
3 |
-
from kornia.augmentation import RandomAffine, CenterCrop
|
4 |
-
|
5 |
-
|
6 |
-
class FakeFakesGenerator:
|
7 |
-
def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2):
|
8 |
-
self.grad_aug = RandomAffine(degrees=360,
|
9 |
-
translate=0.2,
|
10 |
-
padding_mode=SamplePadding.REFLECTION,
|
11 |
-
keepdim=False,
|
12 |
-
p=1)
|
13 |
-
self.img_aug = RandomAffine(degrees=img_aug_degree,
|
14 |
-
translate=img_aug_translate,
|
15 |
-
padding_mode=SamplePadding.REFLECTION,
|
16 |
-
keepdim=True,
|
17 |
-
p=1)
|
18 |
-
self.aug_proba = aug_proba
|
19 |
-
|
20 |
-
def __call__(self, input_images, masks):
|
21 |
-
blend_masks = self._fill_masks_with_gradient(masks)
|
22 |
-
blend_target = self._make_blend_target(input_images)
|
23 |
-
result = input_images * (1 - blend_masks) + blend_target * blend_masks
|
24 |
-
return result, blend_masks
|
25 |
-
|
26 |
-
def _make_blend_target(self, input_images):
|
27 |
-
batch_size = input_images.shape[0]
|
28 |
-
permuted = input_images[torch.randperm(batch_size)]
|
29 |
-
augmented = self.img_aug(input_images)
|
30 |
-
is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float()
|
31 |
-
result = augmented * is_aug + permuted * (1 - is_aug)
|
32 |
-
return result
|
33 |
-
|
34 |
-
def _fill_masks_with_gradient(self, masks):
|
35 |
-
batch_size, _, height, width = masks.shape
|
36 |
-
grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \
|
37 |
-
.view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2)
|
38 |
-
grad = self.grad_aug(grad)
|
39 |
-
grad = CenterCrop((height, width))(grad)
|
40 |
-
grad *= masks
|
41 |
-
|
42 |
-
grad_for_min = grad + (1 - masks) * 10
|
43 |
-
grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None]
|
44 |
-
grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6
|
45 |
-
grad.clamp_(min=0, max=1)
|
46 |
-
|
47 |
-
return grad
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amr453/Transcription/app.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import whisper
|
2 |
-
import gradio as gr
|
3 |
-
import datetime
|
4 |
-
|
5 |
-
import subprocess
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import pyannote.audio
|
9 |
-
from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding
|
10 |
-
|
11 |
-
from pyannote.audio import Audio
|
12 |
-
from pyannote.core import Segment
|
13 |
-
|
14 |
-
import wave
|
15 |
-
import contextlib
|
16 |
-
|
17 |
-
from sklearn.cluster import AgglomerativeClustering
|
18 |
-
import numpy as np
|
19 |
-
|
20 |
-
model = whisper.load_model("large-v2")
|
21 |
-
embedding_model = PretrainedSpeakerEmbedding(
|
22 |
-
"speechbrain/spkrec-ecapa-voxceleb",
|
23 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
24 |
-
)
|
25 |
-
|
26 |
-
def transcribe(audio, num_speakers):
|
27 |
-
path, error = convert_to_wav(audio)
|
28 |
-
if error is not None:
|
29 |
-
return error
|
30 |
-
|
31 |
-
duration = get_duration(path)
|
32 |
-
if duration > 4 * 60 * 60:
|
33 |
-
return "Audio duration too long"
|
34 |
-
|
35 |
-
result = model.transcribe(path)
|
36 |
-
segments = result["segments"]
|
37 |
-
|
38 |
-
num_speakers = min(max(round(num_speakers), 1), len(segments))
|
39 |
-
if len(segments) == 1:
|
40 |
-
segments[0]['speaker'] = 'SPEAKER 1'
|
41 |
-
else:
|
42 |
-
embeddings = make_embeddings(path, segments, duration)
|
43 |
-
add_speaker_labels(segments, embeddings, num_speakers)
|
44 |
-
output = get_output(segments)
|
45 |
-
return output
|
46 |
-
|
47 |
-
def convert_to_wav(path):
|
48 |
-
if path[-3:] != 'wav':
|
49 |
-
new_path = '.'.join(path.split('.')[:-1]) + '.wav'
|
50 |
-
try:
|
51 |
-
subprocess.call(['ffmpeg', '-i', path, new_path, '-y'])
|
52 |
-
except:
|
53 |
-
return path, 'Error: Could not convert file to .wav'
|
54 |
-
path = new_path
|
55 |
-
return path, None
|
56 |
-
|
57 |
-
def get_duration(path):
|
58 |
-
with contextlib.closing(wave.open(path,'r')) as f:
|
59 |
-
frames = f.getnframes()
|
60 |
-
rate = f.getframerate()
|
61 |
-
return frames / float(rate)
|
62 |
-
|
63 |
-
def make_embeddings(path, segments, duration):
|
64 |
-
embeddings = np.zeros(shape=(len(segments), 192))
|
65 |
-
for i, segment in enumerate(segments):
|
66 |
-
embeddings[i] = segment_embedding(path, segment, duration)
|
67 |
-
return np.nan_to_num(embeddings)
|
68 |
-
|
69 |
-
audio = Audio()
|
70 |
-
|
71 |
-
def segment_embedding(path, segment, duration):
|
72 |
-
start = segment["start"]
|
73 |
-
# Whisper overshoots the end timestamp in the last segment
|
74 |
-
end = min(duration, segment["end"])
|
75 |
-
clip = Segment(start, end)
|
76 |
-
waveform, sample_rate = audio.crop(path, clip)
|
77 |
-
return embedding_model(waveform[None])
|
78 |
-
|
79 |
-
def add_speaker_labels(segments, embeddings, num_speakers):
|
80 |
-
clustering = AgglomerativeClustering(num_speakers).fit(embeddings)
|
81 |
-
labels = clustering.labels_
|
82 |
-
for i in range(len(segments)):
|
83 |
-
segments[i]["speaker"] = 'SPEAKER ' + str(labels[i] + 1)
|
84 |
-
|
85 |
-
def time(secs):
|
86 |
-
return datetime.timedelta(seconds=round(secs))
|
87 |
-
|
88 |
-
def get_output(segments):
|
89 |
-
output = ''
|
90 |
-
for (i, segment) in enumerate(segments):
|
91 |
-
if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
|
92 |
-
if i != 0:
|
93 |
-
output += '\n\n'
|
94 |
-
output += segment["speaker"] + ' ' + str(time(segment["start"])) + '\n\n'
|
95 |
-
output += segment["text"][1:] + ' '
|
96 |
-
return output
|
97 |
-
|
98 |
-
gr.Interface(
|
99 |
-
title = 'Whisper with Speaker Recognition',
|
100 |
-
fn=transcribe,
|
101 |
-
inputs=[
|
102 |
-
gr.inputs.Audio(source="upload", type="filepath"),
|
103 |
-
gr.inputs.Number(default=2, label="Number of Speakers")
|
104 |
-
|
105 |
-
],
|
106 |
-
outputs=[
|
107 |
-
gr.outputs.Textbox(label='Transcript')
|
108 |
-
]
|
109 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import unittest
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
from transformers import CLIPTextConfig, CLIPTextModel
|
21 |
-
|
22 |
-
from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel
|
23 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
|
24 |
-
|
25 |
-
|
26 |
-
enable_full_determinism()
|
27 |
-
|
28 |
-
|
29 |
-
class LDMPipelineFastTests(unittest.TestCase):
|
30 |
-
@property
|
31 |
-
def dummy_uncond_unet(self):
|
32 |
-
torch.manual_seed(0)
|
33 |
-
model = UNet2DModel(
|
34 |
-
block_out_channels=(32, 64),
|
35 |
-
layers_per_block=2,
|
36 |
-
sample_size=32,
|
37 |
-
in_channels=3,
|
38 |
-
out_channels=3,
|
39 |
-
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
|
40 |
-
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
|
41 |
-
)
|
42 |
-
return model
|
43 |
-
|
44 |
-
@property
|
45 |
-
def dummy_vq_model(self):
|
46 |
-
torch.manual_seed(0)
|
47 |
-
model = VQModel(
|
48 |
-
block_out_channels=[32, 64],
|
49 |
-
in_channels=3,
|
50 |
-
out_channels=3,
|
51 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
52 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
53 |
-
latent_channels=3,
|
54 |
-
)
|
55 |
-
return model
|
56 |
-
|
57 |
-
@property
|
58 |
-
def dummy_text_encoder(self):
|
59 |
-
torch.manual_seed(0)
|
60 |
-
config = CLIPTextConfig(
|
61 |
-
bos_token_id=0,
|
62 |
-
eos_token_id=2,
|
63 |
-
hidden_size=32,
|
64 |
-
intermediate_size=37,
|
65 |
-
layer_norm_eps=1e-05,
|
66 |
-
num_attention_heads=4,
|
67 |
-
num_hidden_layers=5,
|
68 |
-
pad_token_id=1,
|
69 |
-
vocab_size=1000,
|
70 |
-
)
|
71 |
-
return CLIPTextModel(config)
|
72 |
-
|
73 |
-
def test_inference_uncond(self):
|
74 |
-
unet = self.dummy_uncond_unet
|
75 |
-
scheduler = DDIMScheduler()
|
76 |
-
vae = self.dummy_vq_model
|
77 |
-
|
78 |
-
ldm = LDMPipeline(unet=unet, vqvae=vae, scheduler=scheduler)
|
79 |
-
ldm.to(torch_device)
|
80 |
-
ldm.set_progress_bar_config(disable=None)
|
81 |
-
|
82 |
-
generator = torch.manual_seed(0)
|
83 |
-
image = ldm(generator=generator, num_inference_steps=2, output_type="numpy").images
|
84 |
-
|
85 |
-
generator = torch.manual_seed(0)
|
86 |
-
image_from_tuple = ldm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0]
|
87 |
-
|
88 |
-
image_slice = image[0, -3:, -3:, -1]
|
89 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
90 |
-
|
91 |
-
assert image.shape == (1, 64, 64, 3)
|
92 |
-
expected_slice = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
|
93 |
-
tolerance = 1e-2 if torch_device != "mps" else 3e-2
|
94 |
-
|
95 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
|
96 |
-
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
|
97 |
-
|
98 |
-
|
99 |
-
@slow
|
100 |
-
@require_torch
|
101 |
-
class LDMPipelineIntegrationTests(unittest.TestCase):
|
102 |
-
def test_inference_uncond(self):
|
103 |
-
ldm = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256")
|
104 |
-
ldm.to(torch_device)
|
105 |
-
ldm.set_progress_bar_config(disable=None)
|
106 |
-
|
107 |
-
generator = torch.manual_seed(0)
|
108 |
-
image = ldm(generator=generator, num_inference_steps=5, output_type="numpy").images
|
109 |
-
|
110 |
-
image_slice = image[0, -3:, -3:, -1]
|
111 |
-
|
112 |
-
assert image.shape == (1, 256, 256, 3)
|
113 |
-
expected_slice = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
|
114 |
-
tolerance = 1e-2 if torch_device != "mps" else 3e-2
|
115 |
-
|
116 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
_base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='mmcls://mobilenet_v2',
|
4 |
-
backbone=dict(
|
5 |
-
_delete_=True,
|
6 |
-
type='MobileNetV2',
|
7 |
-
widen_factor=1.,
|
8 |
-
strides=(1, 2, 2, 1, 1, 1, 1),
|
9 |
-
dilations=(1, 1, 1, 2, 2, 4, 4),
|
10 |
-
out_indices=(1, 2, 4, 6)),
|
11 |
-
decode_head=dict(in_channels=320),
|
12 |
-
auxiliary_head=dict(in_channels=96))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Audio-Notification.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# Audio notification
|
2 |
-
|
3 |
-
If your computer takes a long time to generate each response for the model that you are using, you can enable an audio notification for when the response is completed. This feature was kindly contributed by HappyWorldGames in [#1277](https://github.com/oobabooga/text-generation-webui/pull/1277).
|
4 |
-
|
5 |
-
### Installation
|
6 |
-
|
7 |
-
Simply place a file called "notification.mp3" in the same folder as `server.py`. Here you can find some examples:
|
8 |
-
|
9 |
-
* https://pixabay.com/sound-effects/search/ding/?duration=0-30
|
10 |
-
* https://pixabay.com/sound-effects/search/notification/?duration=0-30
|
11 |
-
|
12 |
-
Source: https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1126
|
13 |
-
|
14 |
-
This file will be automatically detected the next time you start the web UI.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AquaSuisei/ChatGPTXE/modules/openai_func.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import logging
|
3 |
-
from modules.presets import (
|
4 |
-
timeout_all,
|
5 |
-
USAGE_API_URL,
|
6 |
-
BALANCE_API_URL,
|
7 |
-
standard_error_msg,
|
8 |
-
connection_timeout_prompt,
|
9 |
-
error_retrieve_prompt,
|
10 |
-
read_timeout_prompt
|
11 |
-
)
|
12 |
-
|
13 |
-
from . import shared
|
14 |
-
from modules.config import retrieve_proxy
|
15 |
-
import os, datetime
|
16 |
-
|
17 |
-
def get_billing_data(openai_api_key, billing_url):
|
18 |
-
headers = {
|
19 |
-
"Content-Type": "application/json",
|
20 |
-
"Authorization": f"Bearer {openai_api_key}"
|
21 |
-
}
|
22 |
-
|
23 |
-
timeout = timeout_all
|
24 |
-
with retrieve_proxy():
|
25 |
-
response = requests.get(
|
26 |
-
billing_url,
|
27 |
-
headers=headers,
|
28 |
-
timeout=timeout,
|
29 |
-
)
|
30 |
-
|
31 |
-
if response.status_code == 200:
|
32 |
-
data = response.json()
|
33 |
-
return data
|
34 |
-
else:
|
35 |
-
raise Exception(f"API request failed with status code {response.status_code}: {response.text}")
|
36 |
-
|
37 |
-
|
38 |
-
def get_usage(openai_api_key):
|
39 |
-
try:
|
40 |
-
curr_time = datetime.datetime.now()
|
41 |
-
last_day_of_month = get_last_day_of_month(curr_time).strftime("%Y-%m-%d")
|
42 |
-
first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d")
|
43 |
-
usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}"
|
44 |
-
try:
|
45 |
-
usage_data = get_billing_data(openai_api_key, usage_url)
|
46 |
-
except Exception as e:
|
47 |
-
logging.error(f"获取API使用情况失败:"+str(e))
|
48 |
-
return f"**获取API使用情况失败**"
|
49 |
-
rounded_usage = "{:.5f}".format(usage_data['total_usage']/100)
|
50 |
-
return f"**本月使用金额** \u3000 ${rounded_usage}"
|
51 |
-
except requests.exceptions.ConnectTimeout:
|
52 |
-
status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
|
53 |
-
return status_text
|
54 |
-
except requests.exceptions.ReadTimeout:
|
55 |
-
status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
|
56 |
-
return status_text
|
57 |
-
except Exception as e:
|
58 |
-
logging.error(f"获取API使用情况失败:"+str(e))
|
59 |
-
return standard_error_msg + error_retrieve_prompt
|
60 |
-
|
61 |
-
def get_last_day_of_month(any_day):
|
62 |
-
# The day 28 exists in every month. 4 days later, it's always next month
|
63 |
-
next_month = any_day.replace(day=28) + datetime.timedelta(days=4)
|
64 |
-
# subtracting the number of the current day brings us back one month
|
65 |
-
return next_month - datetime.timedelta(days=next_month.day)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ariharasudhan/YoloV5/utils/flask_rest_api/restapi.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Run a Flask REST API exposing one or more YOLOv5s models
|
4 |
-
"""
|
5 |
-
|
6 |
-
import argparse
|
7 |
-
import io
|
8 |
-
|
9 |
-
import torch
|
10 |
-
from flask import Flask, request
|
11 |
-
from PIL import Image
|
12 |
-
|
13 |
-
app = Flask(__name__)
|
14 |
-
models = {}
|
15 |
-
|
16 |
-
DETECTION_URL = "/v1/object-detection/<model>"
|
17 |
-
|
18 |
-
|
19 |
-
@app.route(DETECTION_URL, methods=["POST"])
|
20 |
-
def predict(model):
|
21 |
-
if request.method != "POST":
|
22 |
-
return
|
23 |
-
|
24 |
-
if request.files.get("image"):
|
25 |
-
# Method 1
|
26 |
-
# with request.files["image"] as f:
|
27 |
-
# im = Image.open(io.BytesIO(f.read()))
|
28 |
-
|
29 |
-
# Method 2
|
30 |
-
im_file = request.files["image"]
|
31 |
-
im_bytes = im_file.read()
|
32 |
-
im = Image.open(io.BytesIO(im_bytes))
|
33 |
-
|
34 |
-
if model in models:
|
35 |
-
results = models[model](im, size=640) # reduce size=320 for faster inference
|
36 |
-
return results.pandas().xyxy[0].to_json(orient="records")
|
37 |
-
|
38 |
-
|
39 |
-
if __name__ == "__main__":
|
40 |
-
parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
|
41 |
-
parser.add_argument("--port", default=5000, type=int, help="port number")
|
42 |
-
parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
|
43 |
-
opt = parser.parse_args()
|
44 |
-
|
45 |
-
for m in opt.model:
|
46 |
-
models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True)
|
47 |
-
|
48 |
-
app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/scheme.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
For types associated with installation schemes.
|
3 |
-
|
4 |
-
For a general overview of available schemes and their context, see
|
5 |
-
https://docs.python.org/3/install/index.html#alternate-installation.
|
6 |
-
"""
|
7 |
-
|
8 |
-
|
9 |
-
SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"]
|
10 |
-
|
11 |
-
|
12 |
-
class Scheme:
|
13 |
-
"""A Scheme holds paths which are used as the base directories for
|
14 |
-
artifacts associated with a Python package.
|
15 |
-
"""
|
16 |
-
|
17 |
-
__slots__ = SCHEME_KEYS
|
18 |
-
|
19 |
-
def __init__(
|
20 |
-
self,
|
21 |
-
platlib: str,
|
22 |
-
purelib: str,
|
23 |
-
headers: str,
|
24 |
-
scripts: str,
|
25 |
-
data: str,
|
26 |
-
) -> None:
|
27 |
-
self.platlib = platlib
|
28 |
-
self.purelib = purelib
|
29 |
-
self.headers = headers
|
30 |
-
self.scripts = scripts
|
31 |
-
self.data = data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoLLM/AutoAgents/README-main.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
# AutoAgents
|
2 |
-
|
3 |
-
<p align="center"><img src="https://raw.githubusercontent.com/AutoLLM/AutoAgents/assets/images/logo.png?raw=true" width=400/></p>
|
4 |
-
|
5 |
-
Unlock complex question answering in LLMs with enhanced chain-of-thought reasoning and information-seeking capabilities.
|
6 |
-
|
7 |
-
## 👉 Overview
|
8 |
-
|
9 |
-
The purpose of this project is to extend LLMs ability to answer more complex questions through chain-of-thought reasoning and information-seeking actions.
|
10 |
-
|
11 |
-
We are excited to release the initial version of AutoAgents, a proof-of-concept on what can be achieved with only well-written prompts. This is the initial step towards our first big milestone, releasing and open-sourcing the AutoAgents 7B model!
|
12 |
-
|
13 |
-
Come try out our [Huggingface Space](https://huggingface.co/spaces/AutoLLM/AutoAgents)!
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
## 🤖 The AutoAgents Project
|
18 |
-
|
19 |
-
This project demonstrates LLMs capability to execute a complex user goal: understand a user's goal, generate a plan, use proper tools, and deliver a final result.
|
20 |
-
|
21 |
-
For simplicity, our first attempt starts with a Web Search Agent.
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
## 💫 How it works:
|
26 |
-
|
27 |
-
<p align="left"><img src="https://raw.githubusercontent.com/AutoLLM/AutoAgents/assets/images/agent.png" width=830/></p>
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
## 📔 Examples
|
32 |
-
|
33 |
-
Ask your AutoAgent to do what a real person would do using the internet:
|
34 |
-
|
35 |
-
For example:
|
36 |
-
|
37 |
-
*1. Recommend a kid friendly movie that is playing at a theater near Sunnyvale. Give me the showtimes and a link to purchase the tickets*
|
38 |
-
|
39 |
-
*2. What is the average age of the past three president when they took office*
|
40 |
-
|
41 |
-
*3. What is the mortgage rate right now and how does that compare to the past two years*
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
## 💁 Roadmap
|
46 |
-
|
47 |
-
* ~~HuggingFace Space demo using OpenAI models~~ [LINK](https://huggingface.co/spaces/AutoLLM/AutoAgents)
|
48 |
-
* AutoAgents [7B] Model
|
49 |
-
* Initial Release:
|
50 |
-
* Finetune and release a 7B parameter fine-tuned search model
|
51 |
-
* AutoAgents Dataset
|
52 |
-
* A high-quality dataset for a diverse set of search scenarios (why quality and diversity?<sup>[1](https://arxiv.org/abs/2305.11206)</sup>)
|
53 |
-
* Reduce Model Inference Overhead
|
54 |
-
* Affordance Modeling <sup>[2](https://en.wikipedia.org/wiki/Affordance)</sup>
|
55 |
-
* Extend Support to Additional Tools
|
56 |
-
* Customizable Document Search set (e.g. personal documents)
|
57 |
-
* Support Multi-turn Dialogue
|
58 |
-
* Advanced Flow Control in Plan Execution
|
59 |
-
|
60 |
-
We are actively developing a few interesting things, check back here or follow us on [Twitter](https://twitter.com/AutoLLM) for any new development.
|
61 |
-
|
62 |
-
If you are interested in any other problems, feel free to shoot us an issue.
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
## 🧭 How to use this repo?
|
67 |
-
|
68 |
-
This repo contains the entire code to run the search agent from your local browser. All you need is an OpenAI API key to begin.
|
69 |
-
|
70 |
-
To run the search agent locally:
|
71 |
-
|
72 |
-
1. Clone the repo and change the directory
|
73 |
-
|
74 |
-
```bash
|
75 |
-
git clone https://github.com/AutoLLM/AutoAgents.git
|
76 |
-
cd AutoAgents
|
77 |
-
```
|
78 |
-
|
79 |
-
2. Install the dependencies
|
80 |
-
|
81 |
-
```bash
|
82 |
-
pip install -r requirements.txt
|
83 |
-
```
|
84 |
-
|
85 |
-
3. Install the `autoagents` package
|
86 |
-
|
87 |
-
```bash
|
88 |
-
pip install -e .
|
89 |
-
```
|
90 |
-
|
91 |
-
4. Make sure you have your OpenAI API key set as an environment variable. Alternatively, you can also feed it through the input text-box on the sidebar.
|
92 |
-
|
93 |
-
```bash
|
94 |
-
export OPENAI_API_KEY=sk-xxxxxx
|
95 |
-
```
|
96 |
-
|
97 |
-
5. Run the Streamlit app
|
98 |
-
|
99 |
-
```bash
|
100 |
-
streamlit run autoagents/spaces/app.py
|
101 |
-
```
|
102 |
-
|
103 |
-
This should open a browser window where you can type your search query.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/filesystem.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import fnmatch
|
2 |
-
import os
|
3 |
-
import os.path
|
4 |
-
import random
|
5 |
-
import sys
|
6 |
-
from contextlib import contextmanager
|
7 |
-
from tempfile import NamedTemporaryFile
|
8 |
-
from typing import Any, BinaryIO, Generator, List, Union, cast
|
9 |
-
|
10 |
-
from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
|
11 |
-
|
12 |
-
from pip._internal.utils.compat import get_path_uid
|
13 |
-
from pip._internal.utils.misc import format_size
|
14 |
-
|
15 |
-
|
16 |
-
def check_path_owner(path: str) -> bool:
|
17 |
-
# If we don't have a way to check the effective uid of this process, then
|
18 |
-
# we'll just assume that we own the directory.
|
19 |
-
if sys.platform == "win32" or not hasattr(os, "geteuid"):
|
20 |
-
return True
|
21 |
-
|
22 |
-
assert os.path.isabs(path)
|
23 |
-
|
24 |
-
previous = None
|
25 |
-
while path != previous:
|
26 |
-
if os.path.lexists(path):
|
27 |
-
# Check if path is writable by current user.
|
28 |
-
if os.geteuid() == 0:
|
29 |
-
# Special handling for root user in order to handle properly
|
30 |
-
# cases where users use sudo without -H flag.
|
31 |
-
try:
|
32 |
-
path_uid = get_path_uid(path)
|
33 |
-
except OSError:
|
34 |
-
return False
|
35 |
-
return path_uid == 0
|
36 |
-
else:
|
37 |
-
return os.access(path, os.W_OK)
|
38 |
-
else:
|
39 |
-
previous, path = path, os.path.dirname(path)
|
40 |
-
return False # assume we don't own the path
|
41 |
-
|
42 |
-
|
43 |
-
@contextmanager
|
44 |
-
def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
|
45 |
-
"""Return a file-like object pointing to a tmp file next to path.
|
46 |
-
|
47 |
-
The file is created securely and is ensured to be written to disk
|
48 |
-
after the context reaches its end.
|
49 |
-
|
50 |
-
kwargs will be passed to tempfile.NamedTemporaryFile to control
|
51 |
-
the way the temporary file will be opened.
|
52 |
-
"""
|
53 |
-
with NamedTemporaryFile(
|
54 |
-
delete=False,
|
55 |
-
dir=os.path.dirname(path),
|
56 |
-
prefix=os.path.basename(path),
|
57 |
-
suffix=".tmp",
|
58 |
-
**kwargs,
|
59 |
-
) as f:
|
60 |
-
result = cast(BinaryIO, f)
|
61 |
-
try:
|
62 |
-
yield result
|
63 |
-
finally:
|
64 |
-
result.flush()
|
65 |
-
os.fsync(result.fileno())
|
66 |
-
|
67 |
-
|
68 |
-
# Tenacity raises RetryError by default, explicitly raise the original exception
|
69 |
-
_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25))
|
70 |
-
|
71 |
-
replace = _replace_retry(os.replace)
|
72 |
-
|
73 |
-
|
74 |
-
# test_writable_dir and _test_writable_dir_win are copied from Flit,
|
75 |
-
# with the author's agreement to also place them under pip's license.
|
76 |
-
def test_writable_dir(path: str) -> bool:
|
77 |
-
"""Check if a directory is writable.
|
78 |
-
|
79 |
-
Uses os.access() on POSIX, tries creating files on Windows.
|
80 |
-
"""
|
81 |
-
# If the directory doesn't exist, find the closest parent that does.
|
82 |
-
while not os.path.isdir(path):
|
83 |
-
parent = os.path.dirname(path)
|
84 |
-
if parent == path:
|
85 |
-
break # Should never get here, but infinite loops are bad
|
86 |
-
path = parent
|
87 |
-
|
88 |
-
if os.name == "posix":
|
89 |
-
return os.access(path, os.W_OK)
|
90 |
-
|
91 |
-
return _test_writable_dir_win(path)
|
92 |
-
|
93 |
-
|
94 |
-
def _test_writable_dir_win(path: str) -> bool:
|
95 |
-
# os.access doesn't work on Windows: http://bugs.python.org/issue2528
|
96 |
-
# and we can't use tempfile: http://bugs.python.org/issue22107
|
97 |
-
basename = "accesstest_deleteme_fishfingers_custard_"
|
98 |
-
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
|
99 |
-
for _ in range(10):
|
100 |
-
name = basename + "".join(random.choice(alphabet) for _ in range(6))
|
101 |
-
file = os.path.join(path, name)
|
102 |
-
try:
|
103 |
-
fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL)
|
104 |
-
except FileExistsError:
|
105 |
-
pass
|
106 |
-
except PermissionError:
|
107 |
-
# This could be because there's a directory with the same name.
|
108 |
-
# But it's highly unlikely there's a directory called that,
|
109 |
-
# so we'll assume it's because the parent dir is not writable.
|
110 |
-
# This could as well be because the parent dir is not readable,
|
111 |
-
# due to non-privileged user access.
|
112 |
-
return False
|
113 |
-
else:
|
114 |
-
os.close(fd)
|
115 |
-
os.unlink(file)
|
116 |
-
return True
|
117 |
-
|
118 |
-
# This should never be reached
|
119 |
-
raise OSError("Unexpected condition testing for writable directory")
|
120 |
-
|
121 |
-
|
122 |
-
def find_files(path: str, pattern: str) -> List[str]:
|
123 |
-
"""Returns a list of absolute paths of files beneath path, recursively,
|
124 |
-
with filenames which match the UNIX-style shell glob pattern."""
|
125 |
-
result: List[str] = []
|
126 |
-
for root, _, files in os.walk(path):
|
127 |
-
matches = fnmatch.filter(files, pattern)
|
128 |
-
result.extend(os.path.join(root, f) for f in matches)
|
129 |
-
return result
|
130 |
-
|
131 |
-
|
132 |
-
def file_size(path: str) -> Union[int, float]:
|
133 |
-
# If it's a symlink, return 0.
|
134 |
-
if os.path.islink(path):
|
135 |
-
return 0
|
136 |
-
return os.path.getsize(path)
|
137 |
-
|
138 |
-
|
139 |
-
def format_file_size(path: str) -> str:
|
140 |
-
return format_size(file_size(path))
|
141 |
-
|
142 |
-
|
143 |
-
def directory_size(path: str) -> Union[int, float]:
|
144 |
-
size = 0.0
|
145 |
-
for root, _dirs, files in os.walk(path):
|
146 |
-
for filename in files:
|
147 |
-
file_path = os.path.join(root, filename)
|
148 |
-
size += file_size(file_path)
|
149 |
-
return size
|
150 |
-
|
151 |
-
|
152 |
-
def format_directory_size(path: str) -> str:
|
153 |
-
return format_size(directory_size(path))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Scripts/activate.bat
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
@echo off
|
2 |
-
|
3 |
-
rem This file is UTF-8 encoded, so we need to update the current code page while executing it
|
4 |
-
for /f "tokens=2 delims=:." %%a in ('"%SystemRoot%\System32\chcp.com"') do (
|
5 |
-
set _OLD_CODEPAGE=%%a
|
6 |
-
)
|
7 |
-
if defined _OLD_CODEPAGE (
|
8 |
-
"%SystemRoot%\System32\chcp.com" 65001 > nul
|
9 |
-
)
|
10 |
-
|
11 |
-
set VIRTUAL_ENV=C:\Users\cajul\Documents\Big Web Labs\Code\monet\MMSD\env
|
12 |
-
|
13 |
-
if not defined PROMPT set PROMPT=$P$G
|
14 |
-
|
15 |
-
if defined _OLD_VIRTUAL_PROMPT set PROMPT=%_OLD_VIRTUAL_PROMPT%
|
16 |
-
if defined _OLD_VIRTUAL_PYTHONHOME set PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%
|
17 |
-
|
18 |
-
set _OLD_VIRTUAL_PROMPT=%PROMPT%
|
19 |
-
set PROMPT=(env) %PROMPT%
|
20 |
-
|
21 |
-
if defined PYTHONHOME set _OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%
|
22 |
-
set PYTHONHOME=
|
23 |
-
|
24 |
-
if defined _OLD_VIRTUAL_PATH set PATH=%_OLD_VIRTUAL_PATH%
|
25 |
-
if not defined _OLD_VIRTUAL_PATH set _OLD_VIRTUAL_PATH=%PATH%
|
26 |
-
|
27 |
-
set PATH=%VIRTUAL_ENV%\Scripts;%PATH%
|
28 |
-
set VIRTUAL_ENV_PROMPT=(env)
|
29 |
-
|
30 |
-
:END
|
31 |
-
if defined _OLD_CODEPAGE (
|
32 |
-
"%SystemRoot%\System32\chcp.com" %_OLD_CODEPAGE% > nul
|
33 |
-
set _OLD_CODEPAGE=
|
34 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CNXT/TXT2PiX/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/packaging/pkg_helpers.bash
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
#!/bin/bash -e
|
2 |
-
|
3 |
-
# Function to retry functions that sometimes timeout or have flaky failures
|
4 |
-
retry () {
|
5 |
-
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
6 |
-
}
|
7 |
-
# Install with pip a bit more robustly than the default
|
8 |
-
pip_install() {
|
9 |
-
retry pip install --progress-bar off "$@"
|
10 |
-
}
|
11 |
-
|
12 |
-
|
13 |
-
setup_cuda() {
|
14 |
-
# Now work out the CUDA settings
|
15 |
-
# Like other torch domain libraries, we choose common GPU architectures only.
|
16 |
-
export FORCE_CUDA=1
|
17 |
-
case "$CU_VERSION" in
|
18 |
-
cu101)
|
19 |
-
export CUDA_HOME=/usr/local/cuda-10.1/
|
20 |
-
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
|
21 |
-
;;
|
22 |
-
cu100)
|
23 |
-
export CUDA_HOME=/usr/local/cuda-10.0/
|
24 |
-
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
|
25 |
-
;;
|
26 |
-
cu92)
|
27 |
-
export CUDA_HOME=/usr/local/cuda-9.2/
|
28 |
-
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX"
|
29 |
-
;;
|
30 |
-
cpu)
|
31 |
-
unset FORCE_CUDA
|
32 |
-
export CUDA_VISIBLE_DEVICES=
|
33 |
-
;;
|
34 |
-
*)
|
35 |
-
echo "Unrecognized CU_VERSION=$CU_VERSION"
|
36 |
-
exit 1
|
37 |
-
;;
|
38 |
-
esac
|
39 |
-
}
|
40 |
-
|
41 |
-
setup_wheel_python() {
|
42 |
-
case "$PYTHON_VERSION" in
|
43 |
-
3.6) python_abi=cp36-cp36m ;;
|
44 |
-
3.7) python_abi=cp37-cp37m ;;
|
45 |
-
3.8) python_abi=cp38-cp38 ;;
|
46 |
-
*)
|
47 |
-
echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
|
48 |
-
exit 1
|
49 |
-
;;
|
50 |
-
esac
|
51 |
-
export PATH="/opt/python/$python_abi/bin:$PATH"
|
52 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/assign_value.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits assign_value
|
22 |
-
#include <thrust/system/detail/sequential/assign_value.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/eleven_labs.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
"""ElevenLabs speech module"""
|
2 |
-
import os
|
3 |
-
|
4 |
-
import requests
|
5 |
-
from playsound import playsound
|
6 |
-
|
7 |
-
from autogpt.config import Config
|
8 |
-
from autogpt.speech.base import VoiceBase
|
9 |
-
|
10 |
-
PLACEHOLDERS = {"your-voice-id"}
|
11 |
-
|
12 |
-
|
13 |
-
class ElevenLabsSpeech(VoiceBase):
|
14 |
-
"""ElevenLabs speech class"""
|
15 |
-
|
16 |
-
def _setup(self) -> None:
|
17 |
-
"""Set up the voices, API key, etc.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
None: None
|
21 |
-
"""
|
22 |
-
|
23 |
-
cfg = Config()
|
24 |
-
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
25 |
-
voice_options = {
|
26 |
-
"Rachel": "21m00Tcm4TlvDq8ikWAM",
|
27 |
-
"Domi": "AZnzlk1XvdvUeBnXmlld",
|
28 |
-
"Bella": "EXAVITQu4vr4xnSDxMaL",
|
29 |
-
"Antoni": "ErXwobaYiN019PkySvjV",
|
30 |
-
"Elli": "MF3mGyEYCl7XYWbV9V6O",
|
31 |
-
"Josh": "TxGEqnHWrfWFTfGW9XjX",
|
32 |
-
"Arnold": "VR6AewLTigWG4xSOukaG",
|
33 |
-
"Adam": "pNInz6obpgDQGcFmaJgB",
|
34 |
-
"Sam": "yoZ06aMxZJJ28mfd3POQ",
|
35 |
-
}
|
36 |
-
self._headers = {
|
37 |
-
"Content-Type": "application/json",
|
38 |
-
"xi-api-key": cfg.elevenlabs_api_key,
|
39 |
-
}
|
40 |
-
self._voices = default_voices.copy()
|
41 |
-
if cfg.elevenlabs_voice_1_id in voice_options:
|
42 |
-
cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id]
|
43 |
-
if cfg.elevenlabs_voice_2_id in voice_options:
|
44 |
-
cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
|
45 |
-
self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0)
|
46 |
-
self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
|
47 |
-
|
48 |
-
def _use_custom_voice(self, voice, voice_index) -> None:
|
49 |
-
"""Use a custom voice if provided and not a placeholder
|
50 |
-
|
51 |
-
Args:
|
52 |
-
voice (str): The voice ID
|
53 |
-
voice_index (int): The voice index
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
None: None
|
57 |
-
"""
|
58 |
-
# Placeholder values that should be treated as empty
|
59 |
-
if voice and voice not in PLACEHOLDERS:
|
60 |
-
self._voices[voice_index] = voice
|
61 |
-
|
62 |
-
def _speech(self, text: str, voice_index: int = 0) -> bool:
|
63 |
-
"""Speak text using elevenlabs.io's API
|
64 |
-
|
65 |
-
Args:
|
66 |
-
text (str): The text to speak
|
67 |
-
voice_index (int, optional): The voice to use. Defaults to 0.
|
68 |
-
|
69 |
-
Returns:
|
70 |
-
bool: True if the request was successful, False otherwise
|
71 |
-
"""
|
72 |
-
tts_url = (
|
73 |
-
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
|
74 |
-
)
|
75 |
-
response = requests.post(tts_url, headers=self._headers, json={"text": text})
|
76 |
-
|
77 |
-
if response.status_code == 200:
|
78 |
-
with open("speech.mpeg", "wb") as f:
|
79 |
-
f.write(response.content)
|
80 |
-
playsound("speech.mpeg", True)
|
81 |
-
os.remove("speech.mpeg")
|
82 |
-
return True
|
83 |
-
else:
|
84 |
-
print("Request failed with status code:", response.status_code)
|
85 |
-
print("Response content:", response.content)
|
86 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CormacMc/projectsub6/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Projectsub6
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_request.py
DELETED
@@ -1,882 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import datetime
|
3 |
-
import io
|
4 |
-
import re
|
5 |
-
import socket
|
6 |
-
import string
|
7 |
-
import tempfile
|
8 |
-
import types
|
9 |
-
import warnings
|
10 |
-
from http.cookies import SimpleCookie
|
11 |
-
from types import MappingProxyType
|
12 |
-
from typing import (
|
13 |
-
TYPE_CHECKING,
|
14 |
-
Any,
|
15 |
-
Dict,
|
16 |
-
Iterator,
|
17 |
-
Mapping,
|
18 |
-
MutableMapping,
|
19 |
-
Optional,
|
20 |
-
Pattern,
|
21 |
-
Tuple,
|
22 |
-
Union,
|
23 |
-
cast,
|
24 |
-
)
|
25 |
-
from urllib.parse import parse_qsl
|
26 |
-
|
27 |
-
import attr
|
28 |
-
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
|
29 |
-
from yarl import URL
|
30 |
-
|
31 |
-
from . import hdrs
|
32 |
-
from .abc import AbstractStreamWriter
|
33 |
-
from .helpers import (
|
34 |
-
DEBUG,
|
35 |
-
ETAG_ANY,
|
36 |
-
LIST_QUOTED_ETAG_RE,
|
37 |
-
ChainMapProxy,
|
38 |
-
ETag,
|
39 |
-
HeadersMixin,
|
40 |
-
parse_http_date,
|
41 |
-
reify,
|
42 |
-
sentinel,
|
43 |
-
)
|
44 |
-
from .http_parser import RawRequestMessage
|
45 |
-
from .http_writer import HttpVersion
|
46 |
-
from .multipart import BodyPartReader, MultipartReader
|
47 |
-
from .streams import EmptyStreamReader, StreamReader
|
48 |
-
from .typedefs import (
|
49 |
-
DEFAULT_JSON_DECODER,
|
50 |
-
Final,
|
51 |
-
JSONDecoder,
|
52 |
-
LooseHeaders,
|
53 |
-
RawHeaders,
|
54 |
-
StrOrURL,
|
55 |
-
)
|
56 |
-
from .web_exceptions import HTTPRequestEntityTooLarge
|
57 |
-
from .web_response import StreamResponse
|
58 |
-
|
59 |
-
__all__ = ("BaseRequest", "FileField", "Request")
|
60 |
-
|
61 |
-
|
62 |
-
if TYPE_CHECKING: # pragma: no cover
|
63 |
-
from .web_app import Application
|
64 |
-
from .web_protocol import RequestHandler
|
65 |
-
from .web_urldispatcher import UrlMappingMatchInfo
|
66 |
-
|
67 |
-
|
68 |
-
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
69 |
-
class FileField:
|
70 |
-
name: str
|
71 |
-
filename: str
|
72 |
-
file: io.BufferedReader
|
73 |
-
content_type: str
|
74 |
-
headers: "CIMultiDictProxy[str]"
|
75 |
-
|
76 |
-
|
77 |
-
_TCHAR: Final[str] = string.digits + string.ascii_letters + r"!#$%&'*+.^_`|~-"
|
78 |
-
# '-' at the end to prevent interpretation as range in a char class
|
79 |
-
|
80 |
-
_TOKEN: Final[str] = rf"[{_TCHAR}]+"
|
81 |
-
|
82 |
-
_QDTEXT: Final[str] = r"[{}]".format(
|
83 |
-
r"".join(chr(c) for c in (0x09, 0x20, 0x21) + tuple(range(0x23, 0x7F)))
|
84 |
-
)
|
85 |
-
# qdtext includes 0x5C to escape 0x5D ('\]')
|
86 |
-
# qdtext excludes obs-text (because obsoleted, and encoding not specified)
|
87 |
-
|
88 |
-
_QUOTED_PAIR: Final[str] = r"\\[\t !-~]"
|
89 |
-
|
90 |
-
_QUOTED_STRING: Final[str] = r'"(?:{quoted_pair}|{qdtext})*"'.format(
|
91 |
-
qdtext=_QDTEXT, quoted_pair=_QUOTED_PAIR
|
92 |
-
)
|
93 |
-
|
94 |
-
_FORWARDED_PAIR: Final[
|
95 |
-
str
|
96 |
-
] = r"({token})=({token}|{quoted_string})(:\d{{1,4}})?".format(
|
97 |
-
token=_TOKEN, quoted_string=_QUOTED_STRING
|
98 |
-
)
|
99 |
-
|
100 |
-
_QUOTED_PAIR_REPLACE_RE: Final[Pattern[str]] = re.compile(r"\\([\t !-~])")
|
101 |
-
# same pattern as _QUOTED_PAIR but contains a capture group
|
102 |
-
|
103 |
-
_FORWARDED_PAIR_RE: Final[Pattern[str]] = re.compile(_FORWARDED_PAIR)
|
104 |
-
|
105 |
-
############################################################
|
106 |
-
# HTTP Request
|
107 |
-
############################################################
|
108 |
-
|
109 |
-
|
110 |
-
class BaseRequest(MutableMapping[str, Any], HeadersMixin):
|
111 |
-
|
112 |
-
POST_METHODS = {
|
113 |
-
hdrs.METH_PATCH,
|
114 |
-
hdrs.METH_POST,
|
115 |
-
hdrs.METH_PUT,
|
116 |
-
hdrs.METH_TRACE,
|
117 |
-
hdrs.METH_DELETE,
|
118 |
-
}
|
119 |
-
|
120 |
-
ATTRS = HeadersMixin.ATTRS | frozenset(
|
121 |
-
[
|
122 |
-
"_message",
|
123 |
-
"_protocol",
|
124 |
-
"_payload_writer",
|
125 |
-
"_payload",
|
126 |
-
"_headers",
|
127 |
-
"_method",
|
128 |
-
"_version",
|
129 |
-
"_rel_url",
|
130 |
-
"_post",
|
131 |
-
"_read_bytes",
|
132 |
-
"_state",
|
133 |
-
"_cache",
|
134 |
-
"_task",
|
135 |
-
"_client_max_size",
|
136 |
-
"_loop",
|
137 |
-
"_transport_sslcontext",
|
138 |
-
"_transport_peername",
|
139 |
-
]
|
140 |
-
)
|
141 |
-
|
142 |
-
def __init__(
|
143 |
-
self,
|
144 |
-
message: RawRequestMessage,
|
145 |
-
payload: StreamReader,
|
146 |
-
protocol: "RequestHandler",
|
147 |
-
payload_writer: AbstractStreamWriter,
|
148 |
-
task: "asyncio.Task[None]",
|
149 |
-
loop: asyncio.AbstractEventLoop,
|
150 |
-
*,
|
151 |
-
client_max_size: int = 1024**2,
|
152 |
-
state: Optional[Dict[str, Any]] = None,
|
153 |
-
scheme: Optional[str] = None,
|
154 |
-
host: Optional[str] = None,
|
155 |
-
remote: Optional[str] = None,
|
156 |
-
) -> None:
|
157 |
-
if state is None:
|
158 |
-
state = {}
|
159 |
-
self._message = message
|
160 |
-
self._protocol = protocol
|
161 |
-
self._payload_writer = payload_writer
|
162 |
-
|
163 |
-
self._payload = payload
|
164 |
-
self._headers = message.headers
|
165 |
-
self._method = message.method
|
166 |
-
self._version = message.version
|
167 |
-
self._cache: Dict[str, Any] = {}
|
168 |
-
url = message.url
|
169 |
-
if url.is_absolute():
|
170 |
-
# absolute URL is given,
|
171 |
-
# override auto-calculating url, host, and scheme
|
172 |
-
# all other properties should be good
|
173 |
-
self._cache["url"] = url
|
174 |
-
self._cache["host"] = url.host
|
175 |
-
self._cache["scheme"] = url.scheme
|
176 |
-
self._rel_url = url.relative()
|
177 |
-
else:
|
178 |
-
self._rel_url = message.url
|
179 |
-
self._post: Optional[MultiDictProxy[Union[str, bytes, FileField]]] = None
|
180 |
-
self._read_bytes: Optional[bytes] = None
|
181 |
-
|
182 |
-
self._state = state
|
183 |
-
self._task = task
|
184 |
-
self._client_max_size = client_max_size
|
185 |
-
self._loop = loop
|
186 |
-
|
187 |
-
transport = self._protocol.transport
|
188 |
-
assert transport is not None
|
189 |
-
self._transport_sslcontext = transport.get_extra_info("sslcontext")
|
190 |
-
self._transport_peername = transport.get_extra_info("peername")
|
191 |
-
|
192 |
-
if scheme is not None:
|
193 |
-
self._cache["scheme"] = scheme
|
194 |
-
if host is not None:
|
195 |
-
self._cache["host"] = host
|
196 |
-
if remote is not None:
|
197 |
-
self._cache["remote"] = remote
|
198 |
-
|
199 |
-
def clone(
|
200 |
-
self,
|
201 |
-
*,
|
202 |
-
method: str = sentinel,
|
203 |
-
rel_url: StrOrURL = sentinel,
|
204 |
-
headers: LooseHeaders = sentinel,
|
205 |
-
scheme: str = sentinel,
|
206 |
-
host: str = sentinel,
|
207 |
-
remote: str = sentinel,
|
208 |
-
) -> "BaseRequest":
|
209 |
-
"""Clone itself with replacement some attributes.
|
210 |
-
|
211 |
-
Creates and returns a new instance of Request object. If no parameters
|
212 |
-
are given, an exact copy is returned. If a parameter is not passed, it
|
213 |
-
will reuse the one from the current request object.
|
214 |
-
"""
|
215 |
-
if self._read_bytes:
|
216 |
-
raise RuntimeError("Cannot clone request " "after reading its content")
|
217 |
-
|
218 |
-
dct: Dict[str, Any] = {}
|
219 |
-
if method is not sentinel:
|
220 |
-
dct["method"] = method
|
221 |
-
if rel_url is not sentinel:
|
222 |
-
new_url = URL(rel_url)
|
223 |
-
dct["url"] = new_url
|
224 |
-
dct["path"] = str(new_url)
|
225 |
-
if headers is not sentinel:
|
226 |
-
# a copy semantic
|
227 |
-
dct["headers"] = CIMultiDictProxy(CIMultiDict(headers))
|
228 |
-
dct["raw_headers"] = tuple(
|
229 |
-
(k.encode("utf-8"), v.encode("utf-8")) for k, v in headers.items()
|
230 |
-
)
|
231 |
-
|
232 |
-
message = self._message._replace(**dct)
|
233 |
-
|
234 |
-
kwargs = {}
|
235 |
-
if scheme is not sentinel:
|
236 |
-
kwargs["scheme"] = scheme
|
237 |
-
if host is not sentinel:
|
238 |
-
kwargs["host"] = host
|
239 |
-
if remote is not sentinel:
|
240 |
-
kwargs["remote"] = remote
|
241 |
-
|
242 |
-
return self.__class__(
|
243 |
-
message,
|
244 |
-
self._payload,
|
245 |
-
self._protocol,
|
246 |
-
self._payload_writer,
|
247 |
-
self._task,
|
248 |
-
self._loop,
|
249 |
-
client_max_size=self._client_max_size,
|
250 |
-
state=self._state.copy(),
|
251 |
-
**kwargs,
|
252 |
-
)
|
253 |
-
|
254 |
-
@property
|
255 |
-
def task(self) -> "asyncio.Task[None]":
|
256 |
-
return self._task
|
257 |
-
|
258 |
-
@property
|
259 |
-
def protocol(self) -> "RequestHandler":
|
260 |
-
return self._protocol
|
261 |
-
|
262 |
-
@property
|
263 |
-
def transport(self) -> Optional[asyncio.Transport]:
|
264 |
-
if self._protocol is None:
|
265 |
-
return None
|
266 |
-
return self._protocol.transport
|
267 |
-
|
268 |
-
@property
|
269 |
-
def writer(self) -> AbstractStreamWriter:
|
270 |
-
return self._payload_writer
|
271 |
-
|
272 |
-
@reify
|
273 |
-
def message(self) -> RawRequestMessage:
|
274 |
-
warnings.warn("Request.message is deprecated", DeprecationWarning, stacklevel=3)
|
275 |
-
return self._message
|
276 |
-
|
277 |
-
@reify
|
278 |
-
def rel_url(self) -> URL:
|
279 |
-
return self._rel_url
|
280 |
-
|
281 |
-
@reify
|
282 |
-
def loop(self) -> asyncio.AbstractEventLoop:
|
283 |
-
warnings.warn(
|
284 |
-
"request.loop property is deprecated", DeprecationWarning, stacklevel=2
|
285 |
-
)
|
286 |
-
return self._loop
|
287 |
-
|
288 |
-
# MutableMapping API
|
289 |
-
|
290 |
-
def __getitem__(self, key: str) -> Any:
|
291 |
-
return self._state[key]
|
292 |
-
|
293 |
-
def __setitem__(self, key: str, value: Any) -> None:
|
294 |
-
self._state[key] = value
|
295 |
-
|
296 |
-
def __delitem__(self, key: str) -> None:
|
297 |
-
del self._state[key]
|
298 |
-
|
299 |
-
def __len__(self) -> int:
|
300 |
-
return len(self._state)
|
301 |
-
|
302 |
-
def __iter__(self) -> Iterator[str]:
|
303 |
-
return iter(self._state)
|
304 |
-
|
305 |
-
########
|
306 |
-
|
307 |
-
@reify
|
308 |
-
def secure(self) -> bool:
|
309 |
-
"""A bool indicating if the request is handled with SSL."""
|
310 |
-
return self.scheme == "https"
|
311 |
-
|
312 |
-
@reify
|
313 |
-
def forwarded(self) -> Tuple[Mapping[str, str], ...]:
|
314 |
-
"""A tuple containing all parsed Forwarded header(s).
|
315 |
-
|
316 |
-
Makes an effort to parse Forwarded headers as specified by RFC 7239:
|
317 |
-
|
318 |
-
- It adds one (immutable) dictionary per Forwarded 'field-value', ie
|
319 |
-
per proxy. The element corresponds to the data in the Forwarded
|
320 |
-
field-value added by the first proxy encountered by the client. Each
|
321 |
-
subsequent item corresponds to those added by later proxies.
|
322 |
-
- It checks that every value has valid syntax in general as specified
|
323 |
-
in section 4: either a 'token' or a 'quoted-string'.
|
324 |
-
- It un-escapes found escape sequences.
|
325 |
-
- It does NOT validate 'by' and 'for' contents as specified in section
|
326 |
-
6.
|
327 |
-
- It does NOT validate 'host' contents (Host ABNF).
|
328 |
-
- It does NOT validate 'proto' contents for valid URI scheme names.
|
329 |
-
|
330 |
-
Returns a tuple containing one or more immutable dicts
|
331 |
-
"""
|
332 |
-
elems = []
|
333 |
-
for field_value in self._message.headers.getall(hdrs.FORWARDED, ()):
|
334 |
-
length = len(field_value)
|
335 |
-
pos = 0
|
336 |
-
need_separator = False
|
337 |
-
elem: Dict[str, str] = {}
|
338 |
-
elems.append(types.MappingProxyType(elem))
|
339 |
-
while 0 <= pos < length:
|
340 |
-
match = _FORWARDED_PAIR_RE.match(field_value, pos)
|
341 |
-
if match is not None: # got a valid forwarded-pair
|
342 |
-
if need_separator:
|
343 |
-
# bad syntax here, skip to next comma
|
344 |
-
pos = field_value.find(",", pos)
|
345 |
-
else:
|
346 |
-
name, value, port = match.groups()
|
347 |
-
if value[0] == '"':
|
348 |
-
# quoted string: remove quotes and unescape
|
349 |
-
value = _QUOTED_PAIR_REPLACE_RE.sub(r"\1", value[1:-1])
|
350 |
-
if port:
|
351 |
-
value += port
|
352 |
-
elem[name.lower()] = value
|
353 |
-
pos += len(match.group(0))
|
354 |
-
need_separator = True
|
355 |
-
elif field_value[pos] == ",": # next forwarded-element
|
356 |
-
need_separator = False
|
357 |
-
elem = {}
|
358 |
-
elems.append(types.MappingProxyType(elem))
|
359 |
-
pos += 1
|
360 |
-
elif field_value[pos] == ";": # next forwarded-pair
|
361 |
-
need_separator = False
|
362 |
-
pos += 1
|
363 |
-
elif field_value[pos] in " \t":
|
364 |
-
# Allow whitespace even between forwarded-pairs, though
|
365 |
-
# RFC 7239 doesn't. This simplifies code and is in line
|
366 |
-
# with Postel's law.
|
367 |
-
pos += 1
|
368 |
-
else:
|
369 |
-
# bad syntax here, skip to next comma
|
370 |
-
pos = field_value.find(",", pos)
|
371 |
-
return tuple(elems)
|
372 |
-
|
373 |
-
@reify
|
374 |
-
def scheme(self) -> str:
|
375 |
-
"""A string representing the scheme of the request.
|
376 |
-
|
377 |
-
Hostname is resolved in this order:
|
378 |
-
|
379 |
-
- overridden value by .clone(scheme=new_scheme) call.
|
380 |
-
- type of connection to peer: HTTPS if socket is SSL, HTTP otherwise.
|
381 |
-
|
382 |
-
'http' or 'https'.
|
383 |
-
"""
|
384 |
-
if self._transport_sslcontext:
|
385 |
-
return "https"
|
386 |
-
else:
|
387 |
-
return "http"
|
388 |
-
|
389 |
-
@reify
|
390 |
-
def method(self) -> str:
|
391 |
-
"""Read only property for getting HTTP method.
|
392 |
-
|
393 |
-
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
|
394 |
-
"""
|
395 |
-
return self._method
|
396 |
-
|
397 |
-
@reify
|
398 |
-
def version(self) -> HttpVersion:
|
399 |
-
"""Read only property for getting HTTP version of request.
|
400 |
-
|
401 |
-
Returns aiohttp.protocol.HttpVersion instance.
|
402 |
-
"""
|
403 |
-
return self._version
|
404 |
-
|
405 |
-
@reify
|
406 |
-
def host(self) -> str:
|
407 |
-
"""Hostname of the request.
|
408 |
-
|
409 |
-
Hostname is resolved in this order:
|
410 |
-
|
411 |
-
- overridden value by .clone(host=new_host) call.
|
412 |
-
- HOST HTTP header
|
413 |
-
- socket.getfqdn() value
|
414 |
-
"""
|
415 |
-
host = self._message.headers.get(hdrs.HOST)
|
416 |
-
if host is not None:
|
417 |
-
return host
|
418 |
-
return socket.getfqdn()
|
419 |
-
|
420 |
-
@reify
|
421 |
-
def remote(self) -> Optional[str]:
|
422 |
-
"""Remote IP of client initiated HTTP request.
|
423 |
-
|
424 |
-
The IP is resolved in this order:
|
425 |
-
|
426 |
-
- overridden value by .clone(remote=new_remote) call.
|
427 |
-
- peername of opened socket
|
428 |
-
"""
|
429 |
-
if self._transport_peername is None:
|
430 |
-
return None
|
431 |
-
if isinstance(self._transport_peername, (list, tuple)):
|
432 |
-
return str(self._transport_peername[0])
|
433 |
-
return str(self._transport_peername)
|
434 |
-
|
435 |
-
@reify
|
436 |
-
def url(self) -> URL:
|
437 |
-
url = URL.build(scheme=self.scheme, host=self.host)
|
438 |
-
return url.join(self._rel_url)
|
439 |
-
|
440 |
-
@reify
|
441 |
-
def path(self) -> str:
|
442 |
-
"""The URL including *PATH INFO* without the host or scheme.
|
443 |
-
|
444 |
-
E.g., ``/app/blog``
|
445 |
-
"""
|
446 |
-
return self._rel_url.path
|
447 |
-
|
448 |
-
@reify
|
449 |
-
def path_qs(self) -> str:
|
450 |
-
"""The URL including PATH_INFO and the query string.
|
451 |
-
|
452 |
-
E.g, /app/blog?id=10
|
453 |
-
"""
|
454 |
-
return str(self._rel_url)
|
455 |
-
|
456 |
-
@reify
|
457 |
-
def raw_path(self) -> str:
|
458 |
-
"""The URL including raw *PATH INFO* without the host or scheme.
|
459 |
-
|
460 |
-
Warning, the path is unquoted and may contains non valid URL characters
|
461 |
-
|
462 |
-
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
|
463 |
-
"""
|
464 |
-
return self._message.path
|
465 |
-
|
466 |
-
@reify
|
467 |
-
def query(self) -> "MultiDictProxy[str]":
|
468 |
-
"""A multidict with all the variables in the query string."""
|
469 |
-
return MultiDictProxy(self._rel_url.query)
|
470 |
-
|
471 |
-
@reify
|
472 |
-
def query_string(self) -> str:
|
473 |
-
"""The query string in the URL.
|
474 |
-
|
475 |
-
E.g., id=10
|
476 |
-
"""
|
477 |
-
return self._rel_url.query_string
|
478 |
-
|
479 |
-
@reify
|
480 |
-
def headers(self) -> "CIMultiDictProxy[str]":
|
481 |
-
"""A case-insensitive multidict proxy with all headers."""
|
482 |
-
return self._headers
|
483 |
-
|
484 |
-
@reify
|
485 |
-
def raw_headers(self) -> RawHeaders:
|
486 |
-
"""A sequence of pairs for all headers."""
|
487 |
-
return self._message.raw_headers
|
488 |
-
|
489 |
-
@reify
|
490 |
-
def if_modified_since(self) -> Optional[datetime.datetime]:
|
491 |
-
"""The value of If-Modified-Since HTTP header, or None.
|
492 |
-
|
493 |
-
This header is represented as a `datetime` object.
|
494 |
-
"""
|
495 |
-
return parse_http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE))
|
496 |
-
|
497 |
-
@reify
|
498 |
-
def if_unmodified_since(self) -> Optional[datetime.datetime]:
|
499 |
-
"""The value of If-Unmodified-Since HTTP header, or None.
|
500 |
-
|
501 |
-
This header is represented as a `datetime` object.
|
502 |
-
"""
|
503 |
-
return parse_http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE))
|
504 |
-
|
505 |
-
@staticmethod
|
506 |
-
def _etag_values(etag_header: str) -> Iterator[ETag]:
|
507 |
-
"""Extract `ETag` objects from raw header."""
|
508 |
-
if etag_header == ETAG_ANY:
|
509 |
-
yield ETag(
|
510 |
-
is_weak=False,
|
511 |
-
value=ETAG_ANY,
|
512 |
-
)
|
513 |
-
else:
|
514 |
-
for match in LIST_QUOTED_ETAG_RE.finditer(etag_header):
|
515 |
-
is_weak, value, garbage = match.group(2, 3, 4)
|
516 |
-
# Any symbol captured by 4th group means
|
517 |
-
# that the following sequence is invalid.
|
518 |
-
if garbage:
|
519 |
-
break
|
520 |
-
|
521 |
-
yield ETag(
|
522 |
-
is_weak=bool(is_weak),
|
523 |
-
value=value,
|
524 |
-
)
|
525 |
-
|
526 |
-
@classmethod
|
527 |
-
def _if_match_or_none_impl(
|
528 |
-
cls, header_value: Optional[str]
|
529 |
-
) -> Optional[Tuple[ETag, ...]]:
|
530 |
-
if not header_value:
|
531 |
-
return None
|
532 |
-
|
533 |
-
return tuple(cls._etag_values(header_value))
|
534 |
-
|
535 |
-
@reify
|
536 |
-
def if_match(self) -> Optional[Tuple[ETag, ...]]:
|
537 |
-
"""The value of If-Match HTTP header, or None.
|
538 |
-
|
539 |
-
This header is represented as a `tuple` of `ETag` objects.
|
540 |
-
"""
|
541 |
-
return self._if_match_or_none_impl(self.headers.get(hdrs.IF_MATCH))
|
542 |
-
|
543 |
-
@reify
|
544 |
-
def if_none_match(self) -> Optional[Tuple[ETag, ...]]:
|
545 |
-
"""The value of If-None-Match HTTP header, or None.
|
546 |
-
|
547 |
-
This header is represented as a `tuple` of `ETag` objects.
|
548 |
-
"""
|
549 |
-
return self._if_match_or_none_impl(self.headers.get(hdrs.IF_NONE_MATCH))
|
550 |
-
|
551 |
-
@reify
|
552 |
-
def if_range(self) -> Optional[datetime.datetime]:
|
553 |
-
"""The value of If-Range HTTP header, or None.
|
554 |
-
|
555 |
-
This header is represented as a `datetime` object.
|
556 |
-
"""
|
557 |
-
return parse_http_date(self.headers.get(hdrs.IF_RANGE))
|
558 |
-
|
559 |
-
@reify
|
560 |
-
def keep_alive(self) -> bool:
|
561 |
-
"""Is keepalive enabled by client?"""
|
562 |
-
return not self._message.should_close
|
563 |
-
|
564 |
-
@reify
|
565 |
-
def cookies(self) -> Mapping[str, str]:
|
566 |
-
"""Return request cookies.
|
567 |
-
|
568 |
-
A read-only dictionary-like object.
|
569 |
-
"""
|
570 |
-
raw = self.headers.get(hdrs.COOKIE, "")
|
571 |
-
parsed: SimpleCookie[str] = SimpleCookie(raw)
|
572 |
-
return MappingProxyType({key: val.value for key, val in parsed.items()})
|
573 |
-
|
574 |
-
@reify
|
575 |
-
def http_range(self) -> slice:
|
576 |
-
"""The content of Range HTTP header.
|
577 |
-
|
578 |
-
Return a slice instance.
|
579 |
-
|
580 |
-
"""
|
581 |
-
rng = self._headers.get(hdrs.RANGE)
|
582 |
-
start, end = None, None
|
583 |
-
if rng is not None:
|
584 |
-
try:
|
585 |
-
pattern = r"^bytes=(\d*)-(\d*)$"
|
586 |
-
start, end = re.findall(pattern, rng)[0]
|
587 |
-
except IndexError: # pattern was not found in header
|
588 |
-
raise ValueError("range not in acceptable format")
|
589 |
-
|
590 |
-
end = int(end) if end else None
|
591 |
-
start = int(start) if start else None
|
592 |
-
|
593 |
-
if start is None and end is not None:
|
594 |
-
# end with no start is to return tail of content
|
595 |
-
start = -end
|
596 |
-
end = None
|
597 |
-
|
598 |
-
if start is not None and end is not None:
|
599 |
-
# end is inclusive in range header, exclusive for slice
|
600 |
-
end += 1
|
601 |
-
|
602 |
-
if start >= end:
|
603 |
-
raise ValueError("start cannot be after end")
|
604 |
-
|
605 |
-
if start is end is None: # No valid range supplied
|
606 |
-
raise ValueError("No start or end of range specified")
|
607 |
-
|
608 |
-
return slice(start, end, 1)
|
609 |
-
|
610 |
-
@reify
|
611 |
-
def content(self) -> StreamReader:
|
612 |
-
"""Return raw payload stream."""
|
613 |
-
return self._payload
|
614 |
-
|
615 |
-
@property
|
616 |
-
def has_body(self) -> bool:
|
617 |
-
"""Return True if request's HTTP BODY can be read, False otherwise."""
|
618 |
-
warnings.warn(
|
619 |
-
"Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2
|
620 |
-
)
|
621 |
-
return not self._payload.at_eof()
|
622 |
-
|
623 |
-
@property
|
624 |
-
def can_read_body(self) -> bool:
|
625 |
-
"""Return True if request's HTTP BODY can be read, False otherwise."""
|
626 |
-
return not self._payload.at_eof()
|
627 |
-
|
628 |
-
@reify
|
629 |
-
def body_exists(self) -> bool:
|
630 |
-
"""Return True if request has HTTP BODY, False otherwise."""
|
631 |
-
return type(self._payload) is not EmptyStreamReader
|
632 |
-
|
633 |
-
async def release(self) -> None:
|
634 |
-
"""Release request.
|
635 |
-
|
636 |
-
Eat unread part of HTTP BODY if present.
|
637 |
-
"""
|
638 |
-
while not self._payload.at_eof():
|
639 |
-
await self._payload.readany()
|
640 |
-
|
641 |
-
async def read(self) -> bytes:
|
642 |
-
"""Read request body if present.
|
643 |
-
|
644 |
-
Returns bytes object with full request content.
|
645 |
-
"""
|
646 |
-
if self._read_bytes is None:
|
647 |
-
body = bytearray()
|
648 |
-
while True:
|
649 |
-
chunk = await self._payload.readany()
|
650 |
-
body.extend(chunk)
|
651 |
-
if self._client_max_size:
|
652 |
-
body_size = len(body)
|
653 |
-
if body_size >= self._client_max_size:
|
654 |
-
raise HTTPRequestEntityTooLarge(
|
655 |
-
max_size=self._client_max_size, actual_size=body_size
|
656 |
-
)
|
657 |
-
if not chunk:
|
658 |
-
break
|
659 |
-
self._read_bytes = bytes(body)
|
660 |
-
return self._read_bytes
|
661 |
-
|
662 |
-
async def text(self) -> str:
|
663 |
-
"""Return BODY as text using encoding from .charset."""
|
664 |
-
bytes_body = await self.read()
|
665 |
-
encoding = self.charset or "utf-8"
|
666 |
-
return bytes_body.decode(encoding)
|
667 |
-
|
668 |
-
async def json(self, *, loads: JSONDecoder = DEFAULT_JSON_DECODER) -> Any:
|
669 |
-
"""Return BODY as JSON."""
|
670 |
-
body = await self.text()
|
671 |
-
return loads(body)
|
672 |
-
|
673 |
-
async def multipart(self) -> MultipartReader:
|
674 |
-
"""Return async iterator to process BODY as multipart."""
|
675 |
-
return MultipartReader(self._headers, self._payload)
|
676 |
-
|
677 |
-
async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]":
|
678 |
-
"""Return POST parameters."""
|
679 |
-
if self._post is not None:
|
680 |
-
return self._post
|
681 |
-
if self._method not in self.POST_METHODS:
|
682 |
-
self._post = MultiDictProxy(MultiDict())
|
683 |
-
return self._post
|
684 |
-
|
685 |
-
content_type = self.content_type
|
686 |
-
if content_type not in (
|
687 |
-
"",
|
688 |
-
"application/x-www-form-urlencoded",
|
689 |
-
"multipart/form-data",
|
690 |
-
):
|
691 |
-
self._post = MultiDictProxy(MultiDict())
|
692 |
-
return self._post
|
693 |
-
|
694 |
-
out: MultiDict[Union[str, bytes, FileField]] = MultiDict()
|
695 |
-
|
696 |
-
if content_type == "multipart/form-data":
|
697 |
-
multipart = await self.multipart()
|
698 |
-
max_size = self._client_max_size
|
699 |
-
|
700 |
-
field = await multipart.next()
|
701 |
-
while field is not None:
|
702 |
-
size = 0
|
703 |
-
field_ct = field.headers.get(hdrs.CONTENT_TYPE)
|
704 |
-
|
705 |
-
if isinstance(field, BodyPartReader):
|
706 |
-
assert field.name is not None
|
707 |
-
|
708 |
-
# Note that according to RFC 7578, the Content-Type header
|
709 |
-
# is optional, even for files, so we can't assume it's
|
710 |
-
# present.
|
711 |
-
# https://tools.ietf.org/html/rfc7578#section-4.4
|
712 |
-
if field.filename:
|
713 |
-
# store file in temp file
|
714 |
-
tmp = tempfile.TemporaryFile()
|
715 |
-
chunk = await field.read_chunk(size=2**16)
|
716 |
-
while chunk:
|
717 |
-
chunk = field.decode(chunk)
|
718 |
-
tmp.write(chunk)
|
719 |
-
size += len(chunk)
|
720 |
-
if 0 < max_size < size:
|
721 |
-
tmp.close()
|
722 |
-
raise HTTPRequestEntityTooLarge(
|
723 |
-
max_size=max_size, actual_size=size
|
724 |
-
)
|
725 |
-
chunk = await field.read_chunk(size=2**16)
|
726 |
-
tmp.seek(0)
|
727 |
-
|
728 |
-
if field_ct is None:
|
729 |
-
field_ct = "application/octet-stream"
|
730 |
-
|
731 |
-
ff = FileField(
|
732 |
-
field.name,
|
733 |
-
field.filename,
|
734 |
-
cast(io.BufferedReader, tmp),
|
735 |
-
field_ct,
|
736 |
-
field.headers,
|
737 |
-
)
|
738 |
-
out.add(field.name, ff)
|
739 |
-
else:
|
740 |
-
# deal with ordinary data
|
741 |
-
value = await field.read(decode=True)
|
742 |
-
if field_ct is None or field_ct.startswith("text/"):
|
743 |
-
charset = field.get_charset(default="utf-8")
|
744 |
-
out.add(field.name, value.decode(charset))
|
745 |
-
else:
|
746 |
-
out.add(field.name, value)
|
747 |
-
size += len(value)
|
748 |
-
if 0 < max_size < size:
|
749 |
-
raise HTTPRequestEntityTooLarge(
|
750 |
-
max_size=max_size, actual_size=size
|
751 |
-
)
|
752 |
-
else:
|
753 |
-
raise ValueError(
|
754 |
-
"To decode nested multipart you need " "to use custom reader",
|
755 |
-
)
|
756 |
-
|
757 |
-
field = await multipart.next()
|
758 |
-
else:
|
759 |
-
data = await self.read()
|
760 |
-
if data:
|
761 |
-
charset = self.charset or "utf-8"
|
762 |
-
out.extend(
|
763 |
-
parse_qsl(
|
764 |
-
data.rstrip().decode(charset),
|
765 |
-
keep_blank_values=True,
|
766 |
-
encoding=charset,
|
767 |
-
)
|
768 |
-
)
|
769 |
-
|
770 |
-
self._post = MultiDictProxy(out)
|
771 |
-
return self._post
|
772 |
-
|
773 |
-
def get_extra_info(self, name: str, default: Any = None) -> Any:
|
774 |
-
"""Extra info from protocol transport"""
|
775 |
-
protocol = self._protocol
|
776 |
-
if protocol is None:
|
777 |
-
return default
|
778 |
-
|
779 |
-
transport = protocol.transport
|
780 |
-
if transport is None:
|
781 |
-
return default
|
782 |
-
|
783 |
-
return transport.get_extra_info(name, default)
|
784 |
-
|
785 |
-
def __repr__(self) -> str:
|
786 |
-
ascii_encodable_path = self.path.encode("ascii", "backslashreplace").decode(
|
787 |
-
"ascii"
|
788 |
-
)
|
789 |
-
return "<{} {} {} >".format(
|
790 |
-
self.__class__.__name__, self._method, ascii_encodable_path
|
791 |
-
)
|
792 |
-
|
793 |
-
def __eq__(self, other: object) -> bool:
|
794 |
-
return id(self) == id(other)
|
795 |
-
|
796 |
-
def __bool__(self) -> bool:
|
797 |
-
return True
|
798 |
-
|
799 |
-
async def _prepare_hook(self, response: StreamResponse) -> None:
|
800 |
-
return
|
801 |
-
|
802 |
-
def _cancel(self, exc: BaseException) -> None:
|
803 |
-
self._payload.set_exception(exc)
|
804 |
-
|
805 |
-
|
806 |
-
class Request(BaseRequest):
|
807 |
-
|
808 |
-
ATTRS = BaseRequest.ATTRS | frozenset(["_match_info"])
|
809 |
-
|
810 |
-
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
811 |
-
super().__init__(*args, **kwargs)
|
812 |
-
|
813 |
-
# matchdict, route_name, handler
|
814 |
-
# or information about traversal lookup
|
815 |
-
|
816 |
-
# initialized after route resolving
|
817 |
-
self._match_info: Optional[UrlMappingMatchInfo] = None
|
818 |
-
|
819 |
-
if DEBUG:
|
820 |
-
|
821 |
-
def __setattr__(self, name: str, val: Any) -> None:
|
822 |
-
if name not in self.ATTRS:
|
823 |
-
warnings.warn(
|
824 |
-
"Setting custom {}.{} attribute "
|
825 |
-
"is discouraged".format(self.__class__.__name__, name),
|
826 |
-
DeprecationWarning,
|
827 |
-
stacklevel=2,
|
828 |
-
)
|
829 |
-
super().__setattr__(name, val)
|
830 |
-
|
831 |
-
def clone(
|
832 |
-
self,
|
833 |
-
*,
|
834 |
-
method: str = sentinel,
|
835 |
-
rel_url: StrOrURL = sentinel,
|
836 |
-
headers: LooseHeaders = sentinel,
|
837 |
-
scheme: str = sentinel,
|
838 |
-
host: str = sentinel,
|
839 |
-
remote: str = sentinel,
|
840 |
-
) -> "Request":
|
841 |
-
ret = super().clone(
|
842 |
-
method=method,
|
843 |
-
rel_url=rel_url,
|
844 |
-
headers=headers,
|
845 |
-
scheme=scheme,
|
846 |
-
host=host,
|
847 |
-
remote=remote,
|
848 |
-
)
|
849 |
-
new_ret = cast(Request, ret)
|
850 |
-
new_ret._match_info = self._match_info
|
851 |
-
return new_ret
|
852 |
-
|
853 |
-
@reify
|
854 |
-
def match_info(self) -> "UrlMappingMatchInfo":
|
855 |
-
"""Result of route resolving."""
|
856 |
-
match_info = self._match_info
|
857 |
-
assert match_info is not None
|
858 |
-
return match_info
|
859 |
-
|
860 |
-
@property
|
861 |
-
def app(self) -> "Application":
|
862 |
-
"""Application instance."""
|
863 |
-
match_info = self._match_info
|
864 |
-
assert match_info is not None
|
865 |
-
return match_info.current_app
|
866 |
-
|
867 |
-
@property
|
868 |
-
def config_dict(self) -> ChainMapProxy:
|
869 |
-
match_info = self._match_info
|
870 |
-
assert match_info is not None
|
871 |
-
lst = match_info.apps
|
872 |
-
app = self.app
|
873 |
-
idx = lst.index(app)
|
874 |
-
sublist = list(reversed(lst[: idx + 1]))
|
875 |
-
return ChainMapProxy(sublist)
|
876 |
-
|
877 |
-
async def _prepare_hook(self, response: StreamResponse) -> None:
|
878 |
-
match_info = self._match_info
|
879 |
-
if match_info is None:
|
880 |
-
return
|
881 |
-
for app in match_info._apps:
|
882 |
-
await app.on_response_prepare.send(self, response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/cli/__init__.py
DELETED
File without changes
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G__l_a_t.py
DELETED
@@ -1,234 +0,0 @@
|
|
1 |
-
from fontTools.misc import sstruct
|
2 |
-
from fontTools.misc.fixedTools import floatToFixedToStr
|
3 |
-
from fontTools.misc.textTools import safeEval
|
4 |
-
|
5 |
-
# from itertools import *
|
6 |
-
from functools import partial
|
7 |
-
from . import DefaultTable
|
8 |
-
from . import grUtils
|
9 |
-
import struct
|
10 |
-
|
11 |
-
|
12 |
-
Glat_format_0 = """
|
13 |
-
> # big endian
|
14 |
-
version: 16.16F
|
15 |
-
"""
|
16 |
-
|
17 |
-
Glat_format_3 = """
|
18 |
-
>
|
19 |
-
version: 16.16F
|
20 |
-
compression:L # compression scheme or reserved
|
21 |
-
"""
|
22 |
-
|
23 |
-
Glat_format_1_entry = """
|
24 |
-
>
|
25 |
-
attNum: B # Attribute number of first attribute
|
26 |
-
num: B # Number of attributes in this run
|
27 |
-
"""
|
28 |
-
Glat_format_23_entry = """
|
29 |
-
>
|
30 |
-
attNum: H # Attribute number of first attribute
|
31 |
-
num: H # Number of attributes in this run
|
32 |
-
"""
|
33 |
-
|
34 |
-
Glat_format_3_octabox_metrics = """
|
35 |
-
>
|
36 |
-
subboxBitmap: H # Which subboxes exist on 4x4 grid
|
37 |
-
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
|
38 |
-
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
|
39 |
-
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
|
40 |
-
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
|
41 |
-
"""
|
42 |
-
|
43 |
-
Glat_format_3_subbox_entry = """
|
44 |
-
>
|
45 |
-
left: B # xi
|
46 |
-
right: B # xa
|
47 |
-
bottom: B # yi
|
48 |
-
top: B # ya
|
49 |
-
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
|
50 |
-
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
|
51 |
-
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
|
52 |
-
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
|
53 |
-
"""
|
54 |
-
|
55 |
-
|
56 |
-
class _Object:
|
57 |
-
pass
|
58 |
-
|
59 |
-
|
60 |
-
class _Dict(dict):
|
61 |
-
pass
|
62 |
-
|
63 |
-
|
64 |
-
class table_G__l_a_t(DefaultTable.DefaultTable):
|
65 |
-
"""
|
66 |
-
Support Graphite Glat tables
|
67 |
-
"""
|
68 |
-
|
69 |
-
def __init__(self, tag=None):
|
70 |
-
DefaultTable.DefaultTable.__init__(self, tag)
|
71 |
-
self.scheme = 0
|
72 |
-
|
73 |
-
def decompile(self, data, ttFont):
|
74 |
-
sstruct.unpack2(Glat_format_0, data, self)
|
75 |
-
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
76 |
-
if self.version <= 1.9:
|
77 |
-
decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
|
78 |
-
elif self.version <= 2.9:
|
79 |
-
decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
|
80 |
-
elif self.version >= 3.0:
|
81 |
-
(data, self.scheme) = grUtils.decompress(data)
|
82 |
-
sstruct.unpack2(Glat_format_3, data, self)
|
83 |
-
self.hasOctaboxes = (self.compression & 1) == 1
|
84 |
-
decoder = self.decompileAttributes3
|
85 |
-
|
86 |
-
gloc = ttFont["Gloc"]
|
87 |
-
self.attributes = {}
|
88 |
-
count = 0
|
89 |
-
for s, e in zip(gloc, gloc[1:]):
|
90 |
-
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
|
91 |
-
count += 1
|
92 |
-
|
93 |
-
def decompileAttributes12(self, data, fmt):
|
94 |
-
attributes = _Dict()
|
95 |
-
while len(data) > 3:
|
96 |
-
e, data = sstruct.unpack2(fmt, data, _Object())
|
97 |
-
keys = range(e.attNum, e.attNum + e.num)
|
98 |
-
if len(data) >= 2 * e.num:
|
99 |
-
vals = struct.unpack_from((">%dh" % e.num), data)
|
100 |
-
attributes.update(zip(keys, vals))
|
101 |
-
data = data[2 * e.num :]
|
102 |
-
return attributes
|
103 |
-
|
104 |
-
def decompileAttributes3(self, data):
|
105 |
-
if self.hasOctaboxes:
|
106 |
-
o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
|
107 |
-
numsub = bin(o.subboxBitmap).count("1")
|
108 |
-
o.subboxes = []
|
109 |
-
for b in range(numsub):
|
110 |
-
if len(data) >= 8:
|
111 |
-
subbox, data = sstruct.unpack2(
|
112 |
-
Glat_format_3_subbox_entry, data, _Object()
|
113 |
-
)
|
114 |
-
o.subboxes.append(subbox)
|
115 |
-
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
|
116 |
-
if self.hasOctaboxes:
|
117 |
-
attrs.octabox = o
|
118 |
-
return attrs
|
119 |
-
|
120 |
-
def compile(self, ttFont):
|
121 |
-
data = sstruct.pack(Glat_format_0, self)
|
122 |
-
if self.version <= 1.9:
|
123 |
-
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
|
124 |
-
elif self.version <= 2.9:
|
125 |
-
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
|
126 |
-
elif self.version >= 3.0:
|
127 |
-
self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
|
128 |
-
data = sstruct.pack(Glat_format_3, self)
|
129 |
-
encoder = self.compileAttributes3
|
130 |
-
|
131 |
-
glocs = []
|
132 |
-
for n in range(len(self.attributes)):
|
133 |
-
glocs.append(len(data))
|
134 |
-
data += encoder(self.attributes[ttFont.getGlyphName(n)])
|
135 |
-
glocs.append(len(data))
|
136 |
-
ttFont["Gloc"].set(glocs)
|
137 |
-
|
138 |
-
if self.version >= 3.0:
|
139 |
-
data = grUtils.compress(self.scheme, data)
|
140 |
-
return data
|
141 |
-
|
142 |
-
def compileAttributes12(self, attrs, fmt):
|
143 |
-
data = b""
|
144 |
-
for e in grUtils.entries(attrs):
|
145 |
-
data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
|
146 |
-
(">%dh" % len(e[2])), *e[2]
|
147 |
-
)
|
148 |
-
return data
|
149 |
-
|
150 |
-
def compileAttributes3(self, attrs):
|
151 |
-
if self.hasOctaboxes:
|
152 |
-
o = attrs.octabox
|
153 |
-
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
|
154 |
-
numsub = bin(o.subboxBitmap).count("1")
|
155 |
-
for b in range(numsub):
|
156 |
-
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
|
157 |
-
else:
|
158 |
-
data = ""
|
159 |
-
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
|
160 |
-
|
161 |
-
def toXML(self, writer, ttFont):
|
162 |
-
writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
|
163 |
-
writer.newline()
|
164 |
-
for n, a in sorted(
|
165 |
-
self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
|
166 |
-
):
|
167 |
-
writer.begintag("glyph", name=n)
|
168 |
-
writer.newline()
|
169 |
-
if hasattr(a, "octabox"):
|
170 |
-
o = a.octabox
|
171 |
-
formatstring, names, fixes = sstruct.getformat(
|
172 |
-
Glat_format_3_octabox_metrics
|
173 |
-
)
|
174 |
-
vals = {}
|
175 |
-
for k in names:
|
176 |
-
if k == "subboxBitmap":
|
177 |
-
continue
|
178 |
-
vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
|
179 |
-
vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
|
180 |
-
writer.begintag("octaboxes", **vals)
|
181 |
-
writer.newline()
|
182 |
-
formatstring, names, fixes = sstruct.getformat(
|
183 |
-
Glat_format_3_subbox_entry
|
184 |
-
)
|
185 |
-
for s in o.subboxes:
|
186 |
-
vals = {}
|
187 |
-
for k in names:
|
188 |
-
vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
|
189 |
-
writer.simpletag("octabox", **vals)
|
190 |
-
writer.newline()
|
191 |
-
writer.endtag("octaboxes")
|
192 |
-
writer.newline()
|
193 |
-
for k, v in sorted(a.items()):
|
194 |
-
writer.simpletag("attribute", index=k, value=v)
|
195 |
-
writer.newline()
|
196 |
-
writer.endtag("glyph")
|
197 |
-
writer.newline()
|
198 |
-
|
199 |
-
def fromXML(self, name, attrs, content, ttFont):
|
200 |
-
if name == "version":
|
201 |
-
self.version = float(safeEval(attrs["version"]))
|
202 |
-
self.scheme = int(safeEval(attrs["compressionScheme"]))
|
203 |
-
if name != "glyph":
|
204 |
-
return
|
205 |
-
if not hasattr(self, "attributes"):
|
206 |
-
self.attributes = {}
|
207 |
-
gname = attrs["name"]
|
208 |
-
attributes = _Dict()
|
209 |
-
for element in content:
|
210 |
-
if not isinstance(element, tuple):
|
211 |
-
continue
|
212 |
-
tag, attrs, subcontent = element
|
213 |
-
if tag == "attribute":
|
214 |
-
k = int(safeEval(attrs["index"]))
|
215 |
-
v = int(safeEval(attrs["value"]))
|
216 |
-
attributes[k] = v
|
217 |
-
elif tag == "octaboxes":
|
218 |
-
self.hasOctaboxes = True
|
219 |
-
o = _Object()
|
220 |
-
o.subboxBitmap = int(attrs["bitmap"], 16)
|
221 |
-
o.subboxes = []
|
222 |
-
del attrs["bitmap"]
|
223 |
-
for k, v in attrs.items():
|
224 |
-
setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
|
225 |
-
for element in subcontent:
|
226 |
-
if not isinstance(element, tuple):
|
227 |
-
continue
|
228 |
-
(tag, attrs, subcontent) = element
|
229 |
-
so = _Object()
|
230 |
-
for k, v in attrs.items():
|
231 |
-
setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
|
232 |
-
o.subboxes.append(so)
|
233 |
-
attributes.octabox = o
|
234 |
-
self.attributes[gname] = attributes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/gui.py
DELETED
@@ -1,411 +0,0 @@
|
|
1 |
-
import ast
|
2 |
-
import contextlib
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import re
|
6 |
-
from typing import ClassVar, Sequence
|
7 |
-
|
8 |
-
import panel as pn
|
9 |
-
|
10 |
-
from .core import OpenFile, get_filesystem_class, split_protocol
|
11 |
-
from .registry import known_implementations
|
12 |
-
|
13 |
-
pn.extension()
|
14 |
-
logger = logging.getLogger("fsspec.gui")
|
15 |
-
|
16 |
-
|
17 |
-
class SigSlot(object):
|
18 |
-
"""Signal-slot mixin, for Panel event passing
|
19 |
-
|
20 |
-
Include this class in a widget manager's superclasses to be able to
|
21 |
-
register events and callbacks on Panel widgets managed by that class.
|
22 |
-
|
23 |
-
The method ``_register`` should be called as widgets are added, and external
|
24 |
-
code should call ``connect`` to associate callbacks.
|
25 |
-
|
26 |
-
By default, all signals emit a DEBUG logging statement.
|
27 |
-
"""
|
28 |
-
|
29 |
-
# names of signals that this class may emit each of which must be
|
30 |
-
# set by _register for any new instance
|
31 |
-
signals: ClassVar[Sequence[str]] = []
|
32 |
-
# names of actions that this class may respond to
|
33 |
-
slots: ClassVar[Sequence[str]] = []
|
34 |
-
|
35 |
-
# each of which must be a method name
|
36 |
-
|
37 |
-
def __init__(self):
|
38 |
-
self._ignoring_events = False
|
39 |
-
self._sigs = {}
|
40 |
-
self._map = {}
|
41 |
-
self._setup()
|
42 |
-
|
43 |
-
def _setup(self):
|
44 |
-
"""Create GUI elements and register signals"""
|
45 |
-
self.panel = pn.pane.PaneBase()
|
46 |
-
# no signals to set up in the base class
|
47 |
-
|
48 |
-
def _register(
|
49 |
-
self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
|
50 |
-
):
|
51 |
-
"""Watch the given attribute of a widget and assign it a named event
|
52 |
-
|
53 |
-
This is normally called at the time a widget is instantiated, in the
|
54 |
-
class which owns it.
|
55 |
-
|
56 |
-
Parameters
|
57 |
-
----------
|
58 |
-
widget : pn.layout.Panel or None
|
59 |
-
Widget to watch. If None, an anonymous signal not associated with
|
60 |
-
any widget.
|
61 |
-
name : str
|
62 |
-
Name of this event
|
63 |
-
thing : str
|
64 |
-
Attribute of the given widget to watch
|
65 |
-
log_level : int
|
66 |
-
When the signal is triggered, a logging event of the given level
|
67 |
-
will be fired in the dfviz logger.
|
68 |
-
auto : bool
|
69 |
-
If True, automatically connects with a method in this class of the
|
70 |
-
same name.
|
71 |
-
"""
|
72 |
-
if name not in self.signals:
|
73 |
-
raise ValueError("Attempt to assign an undeclared signal: %s" % name)
|
74 |
-
self._sigs[name] = {
|
75 |
-
"widget": widget,
|
76 |
-
"callbacks": [],
|
77 |
-
"thing": thing,
|
78 |
-
"log": log_level,
|
79 |
-
}
|
80 |
-
wn = "-".join(
|
81 |
-
[
|
82 |
-
getattr(widget, "name", str(widget)) if widget is not None else "none",
|
83 |
-
thing,
|
84 |
-
]
|
85 |
-
)
|
86 |
-
self._map[wn] = name
|
87 |
-
if widget is not None:
|
88 |
-
widget.param.watch(self._signal, thing, onlychanged=True)
|
89 |
-
if auto and hasattr(self, name):
|
90 |
-
self.connect(name, getattr(self, name))
|
91 |
-
|
92 |
-
def _repr_mimebundle_(self, *args, **kwargs):
|
93 |
-
"""Display in a notebook or a server"""
|
94 |
-
try:
|
95 |
-
return self.panel._repr_mimebundle_(*args, **kwargs)
|
96 |
-
except (ValueError, AttributeError):
|
97 |
-
raise NotImplementedError("Panel does not seem to be set " "up properly")
|
98 |
-
|
99 |
-
def connect(self, signal, slot):
|
100 |
-
"""Associate call back with given event
|
101 |
-
|
102 |
-
The callback must be a function which takes the "new" value of the
|
103 |
-
watched attribute as the only parameter. If the callback return False,
|
104 |
-
this cancels any further processing of the given event.
|
105 |
-
|
106 |
-
Alternatively, the callback can be a string, in which case it means
|
107 |
-
emitting the correspondingly-named event (i.e., connect to self)
|
108 |
-
"""
|
109 |
-
self._sigs[signal]["callbacks"].append(slot)
|
110 |
-
|
111 |
-
def _signal(self, event):
|
112 |
-
"""This is called by a an action on a widget
|
113 |
-
|
114 |
-
Within an self.ignore_events context, nothing happens.
|
115 |
-
|
116 |
-
Tests can execute this method by directly changing the values of
|
117 |
-
widget components.
|
118 |
-
"""
|
119 |
-
if not self._ignoring_events:
|
120 |
-
wn = "-".join([event.obj.name, event.name])
|
121 |
-
if wn in self._map and self._map[wn] in self._sigs:
|
122 |
-
self._emit(self._map[wn], event.new)
|
123 |
-
|
124 |
-
@contextlib.contextmanager
|
125 |
-
def ignore_events(self):
|
126 |
-
"""Temporarily turn off events processing in this instance
|
127 |
-
|
128 |
-
(does not propagate to children)
|
129 |
-
"""
|
130 |
-
self._ignoring_events = True
|
131 |
-
try:
|
132 |
-
yield
|
133 |
-
finally:
|
134 |
-
self._ignoring_events = False
|
135 |
-
|
136 |
-
def _emit(self, sig, value=None):
|
137 |
-
"""An event happened, call its callbacks
|
138 |
-
|
139 |
-
This method can be used in tests to simulate message passing without
|
140 |
-
directly changing visual elements.
|
141 |
-
|
142 |
-
Calling of callbacks will halt whenever one returns False.
|
143 |
-
"""
|
144 |
-
logger.log(self._sigs[sig]["log"], "{}: {}".format(sig, value))
|
145 |
-
for callback in self._sigs[sig]["callbacks"]:
|
146 |
-
if isinstance(callback, str):
|
147 |
-
self._emit(callback)
|
148 |
-
else:
|
149 |
-
try:
|
150 |
-
# running callbacks should not break the interface
|
151 |
-
ret = callback(value)
|
152 |
-
if ret is False:
|
153 |
-
break
|
154 |
-
except Exception as e:
|
155 |
-
logger.exception(
|
156 |
-
"Exception (%s) while executing callback for signal: %s"
|
157 |
-
"" % (e, sig)
|
158 |
-
)
|
159 |
-
|
160 |
-
def show(self, threads=False):
|
161 |
-
"""Open a new browser tab and display this instance's interface"""
|
162 |
-
self.panel.show(threads=threads, verbose=False)
|
163 |
-
return self
|
164 |
-
|
165 |
-
|
166 |
-
class SingleSelect(SigSlot):
|
167 |
-
"""A multiselect which only allows you to select one item for an event"""
|
168 |
-
|
169 |
-
signals = ["_selected", "selected"] # the first is internal
|
170 |
-
slots = ["set_options", "set_selection", "add", "clear", "select"]
|
171 |
-
|
172 |
-
def __init__(self, **kwargs):
|
173 |
-
self.kwargs = kwargs
|
174 |
-
super().__init__()
|
175 |
-
|
176 |
-
def _setup(self):
|
177 |
-
self.panel = pn.widgets.MultiSelect(**self.kwargs)
|
178 |
-
self._register(self.panel, "_selected", "value")
|
179 |
-
self._register(None, "selected")
|
180 |
-
self.connect("_selected", self.select_one)
|
181 |
-
|
182 |
-
def _signal(self, *args, **kwargs):
|
183 |
-
super()._signal(*args, **kwargs)
|
184 |
-
|
185 |
-
def select_one(self, *_):
|
186 |
-
with self.ignore_events():
|
187 |
-
val = [self.panel.value[-1]] if self.panel.value else []
|
188 |
-
self.panel.value = val
|
189 |
-
self._emit("selected", self.panel.value)
|
190 |
-
|
191 |
-
def set_options(self, options):
|
192 |
-
self.panel.options = options
|
193 |
-
|
194 |
-
def clear(self):
|
195 |
-
self.panel.options = []
|
196 |
-
|
197 |
-
@property
|
198 |
-
def value(self):
|
199 |
-
return self.panel.value
|
200 |
-
|
201 |
-
def set_selection(self, selection):
|
202 |
-
self.panel.value = [selection]
|
203 |
-
|
204 |
-
|
205 |
-
class FileSelector(SigSlot):
|
206 |
-
"""Panel-based graphical file selector widget
|
207 |
-
|
208 |
-
Instances of this widget are interactive and can be displayed in jupyter by having
|
209 |
-
them as the output of a cell, or in a separate browser tab using ``.show()``.
|
210 |
-
"""
|
211 |
-
|
212 |
-
signals = [
|
213 |
-
"protocol_changed",
|
214 |
-
"selection_changed",
|
215 |
-
"directory_entered",
|
216 |
-
"home_clicked",
|
217 |
-
"up_clicked",
|
218 |
-
"go_clicked",
|
219 |
-
"filters_changed",
|
220 |
-
]
|
221 |
-
slots = ["set_filters", "go_home"]
|
222 |
-
|
223 |
-
def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
|
224 |
-
"""
|
225 |
-
|
226 |
-
Parameters
|
227 |
-
----------
|
228 |
-
url : str (optional)
|
229 |
-
Initial value of the URL to populate the dialog; should include protocol
|
230 |
-
filters : list(str) (optional)
|
231 |
-
File endings to include in the listings. If not included, all files are
|
232 |
-
allowed. Does not affect directories.
|
233 |
-
If given, the endings will appear as checkboxes in the interface
|
234 |
-
ignore : list(str) (optional)
|
235 |
-
Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
|
236 |
-
hidden files on posix
|
237 |
-
kwargs : dict (optional)
|
238 |
-
To pass to file system instance
|
239 |
-
"""
|
240 |
-
if url:
|
241 |
-
self.init_protocol, url = split_protocol(url)
|
242 |
-
else:
|
243 |
-
self.init_protocol, url = "file", os.getcwd()
|
244 |
-
self.init_url = url
|
245 |
-
self.init_kwargs = kwargs or "{}"
|
246 |
-
self.filters = filters
|
247 |
-
self.ignore = [re.compile(i) for i in ignore or []]
|
248 |
-
self._fs = None
|
249 |
-
super().__init__()
|
250 |
-
|
251 |
-
def _setup(self):
|
252 |
-
self.url = pn.widgets.TextInput(
|
253 |
-
name="url",
|
254 |
-
value=self.init_url,
|
255 |
-
align="end",
|
256 |
-
sizing_mode="stretch_width",
|
257 |
-
width_policy="max",
|
258 |
-
)
|
259 |
-
self.protocol = pn.widgets.Select(
|
260 |
-
options=list(sorted(known_implementations)),
|
261 |
-
value=self.init_protocol,
|
262 |
-
name="protocol",
|
263 |
-
align="center",
|
264 |
-
)
|
265 |
-
self.kwargs = pn.widgets.TextInput(name="kwargs", value="{}", align="center")
|
266 |
-
self.go = pn.widgets.Button(name="⇨", align="end", width=45)
|
267 |
-
self.main = SingleSelect(size=10)
|
268 |
-
self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
|
269 |
-
self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
|
270 |
-
|
271 |
-
self._register(self.protocol, "protocol_changed", auto=True)
|
272 |
-
self._register(self.go, "go_clicked", "clicks", auto=True)
|
273 |
-
self._register(self.up, "up_clicked", "clicks", auto=True)
|
274 |
-
self._register(self.home, "home_clicked", "clicks", auto=True)
|
275 |
-
self._register(None, "selection_changed")
|
276 |
-
self.main.connect("selected", self.selection_changed)
|
277 |
-
self._register(None, "directory_entered")
|
278 |
-
self.prev_protocol = self.protocol.value
|
279 |
-
self.prev_kwargs = self.storage_options
|
280 |
-
|
281 |
-
self.filter_sel = pn.widgets.CheckBoxGroup(
|
282 |
-
value=[], options=[], inline=False, align="end", width_policy="min"
|
283 |
-
)
|
284 |
-
self._register(self.filter_sel, "filters_changed", auto=True)
|
285 |
-
|
286 |
-
self.panel = pn.Column(
|
287 |
-
pn.Row(self.protocol, self.kwargs),
|
288 |
-
pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
|
289 |
-
self.main.panel,
|
290 |
-
)
|
291 |
-
self.set_filters(self.filters)
|
292 |
-
self.go_clicked()
|
293 |
-
|
294 |
-
def set_filters(self, filters=None):
|
295 |
-
self.filters = filters
|
296 |
-
if filters:
|
297 |
-
self.filter_sel.options = filters
|
298 |
-
self.filter_sel.value = filters
|
299 |
-
else:
|
300 |
-
self.filter_sel.options = []
|
301 |
-
self.filter_sel.value = []
|
302 |
-
|
303 |
-
@property
|
304 |
-
def storage_options(self):
|
305 |
-
"""Value of the kwargs box as a dictionary"""
|
306 |
-
return ast.literal_eval(self.kwargs.value) or {}
|
307 |
-
|
308 |
-
@property
|
309 |
-
def fs(self):
|
310 |
-
"""Current filesystem instance"""
|
311 |
-
if self._fs is None:
|
312 |
-
cls = get_filesystem_class(self.protocol.value)
|
313 |
-
self._fs = cls(**self.storage_options)
|
314 |
-
return self._fs
|
315 |
-
|
316 |
-
@property
|
317 |
-
def urlpath(self):
|
318 |
-
"""URL of currently selected item"""
|
319 |
-
return (
|
320 |
-
(self.protocol.value + "://" + self.main.value[0])
|
321 |
-
if self.main.value
|
322 |
-
else None
|
323 |
-
)
|
324 |
-
|
325 |
-
def open_file(self, mode="rb", compression=None, encoding=None):
|
326 |
-
"""Create OpenFile instance for the currently selected item
|
327 |
-
|
328 |
-
For example, in a notebook you might do something like
|
329 |
-
|
330 |
-
.. code-block::
|
331 |
-
|
332 |
-
[ ]: sel = FileSelector(); sel
|
333 |
-
|
334 |
-
# user selects their file
|
335 |
-
|
336 |
-
[ ]: with sel.open_file('rb') as f:
|
337 |
-
... out = f.read()
|
338 |
-
|
339 |
-
Parameters
|
340 |
-
----------
|
341 |
-
mode: str (optional)
|
342 |
-
Open mode for the file.
|
343 |
-
compression: str (optional)
|
344 |
-
The interact with the file as compressed. Set to 'infer' to guess
|
345 |
-
compression from the file ending
|
346 |
-
encoding: str (optional)
|
347 |
-
If using text mode, use this encoding; defaults to UTF8.
|
348 |
-
"""
|
349 |
-
if self.urlpath is None:
|
350 |
-
raise ValueError("No file selected")
|
351 |
-
return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
|
352 |
-
|
353 |
-
def filters_changed(self, values):
|
354 |
-
self.filters = values
|
355 |
-
self.go_clicked()
|
356 |
-
|
357 |
-
def selection_changed(self, *_):
|
358 |
-
if self.urlpath is None:
|
359 |
-
return
|
360 |
-
if self.fs.isdir(self.urlpath):
|
361 |
-
self.url.value = self.fs._strip_protocol(self.urlpath)
|
362 |
-
self.go_clicked()
|
363 |
-
|
364 |
-
def go_clicked(self, *_):
|
365 |
-
if (
|
366 |
-
self.prev_protocol != self.protocol.value
|
367 |
-
or self.prev_kwargs != self.storage_options
|
368 |
-
):
|
369 |
-
self._fs = None # causes fs to be recreated
|
370 |
-
self.prev_protocol = self.protocol.value
|
371 |
-
self.prev_kwargs = self.storage_options
|
372 |
-
listing = sorted(
|
373 |
-
self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
|
374 |
-
)
|
375 |
-
listing = [
|
376 |
-
l
|
377 |
-
for l in listing
|
378 |
-
if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
|
379 |
-
]
|
380 |
-
folders = {
|
381 |
-
"📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
|
382 |
-
for o in listing
|
383 |
-
if o["type"] == "directory"
|
384 |
-
}
|
385 |
-
files = {
|
386 |
-
"📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
|
387 |
-
for o in listing
|
388 |
-
if o["type"] == "file"
|
389 |
-
}
|
390 |
-
if self.filters:
|
391 |
-
files = {
|
392 |
-
k: v
|
393 |
-
for k, v in files.items()
|
394 |
-
if any(v.endswith(ext) for ext in self.filters)
|
395 |
-
}
|
396 |
-
self.main.set_options(dict(**folders, **files))
|
397 |
-
|
398 |
-
def protocol_changed(self, *_):
|
399 |
-
self._fs = None
|
400 |
-
self.main.options = []
|
401 |
-
self.url.value = ""
|
402 |
-
|
403 |
-
def home_clicked(self, *_):
|
404 |
-
self.protocol.value = self.init_protocol
|
405 |
-
self.kwargs.value = self.init_kwargs
|
406 |
-
self.url.value = self.init_url
|
407 |
-
self.go_clicked()
|
408 |
-
|
409 |
-
def up_clicked(self, *_):
|
410 |
-
self.url.value = self.fs._parent(self.url.value)
|
411 |
-
self.go_clicked()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dataset.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
"""gr.Dataset() component."""
|
2 |
-
|
3 |
-
from __future__ import annotations
|
4 |
-
|
5 |
-
from typing import Any, Literal
|
6 |
-
|
7 |
-
from gradio_client.documentation import document, set_documentation_group
|
8 |
-
from gradio_client.serializing import StringSerializable
|
9 |
-
|
10 |
-
from gradio.components.base import (
|
11 |
-
Component,
|
12 |
-
IOComponent,
|
13 |
-
_Keywords,
|
14 |
-
get_component_instance,
|
15 |
-
)
|
16 |
-
from gradio.events import Clickable, Selectable
|
17 |
-
|
18 |
-
set_documentation_group("component")
|
19 |
-
|
20 |
-
|
21 |
-
@document()
|
22 |
-
class Dataset(Clickable, Selectable, Component, StringSerializable):
|
23 |
-
"""
|
24 |
-
Used to create an output widget for showing datasets. Used to render the examples
|
25 |
-
box.
|
26 |
-
Preprocessing: passes the selected sample either as a {list} of data (if type="value") or as an {int} index (if type="index")
|
27 |
-
Postprocessing: expects a {list} of {lists} corresponding to the dataset data.
|
28 |
-
"""
|
29 |
-
|
30 |
-
def __init__(
|
31 |
-
self,
|
32 |
-
*,
|
33 |
-
label: str | None = None,
|
34 |
-
components: list[IOComponent] | list[str],
|
35 |
-
samples: list[list[Any]] | None = None,
|
36 |
-
headers: list[str] | None = None,
|
37 |
-
type: Literal["values", "index"] = "values",
|
38 |
-
samples_per_page: int = 10,
|
39 |
-
visible: bool = True,
|
40 |
-
elem_id: str | None = None,
|
41 |
-
elem_classes: list[str] | str | None = None,
|
42 |
-
container: bool = True,
|
43 |
-
scale: int | None = None,
|
44 |
-
min_width: int = 160,
|
45 |
-
**kwargs,
|
46 |
-
):
|
47 |
-
"""
|
48 |
-
Parameters:
|
49 |
-
components: Which component types to show in this dataset widget, can be passed in as a list of string names or Components instances. The following components are supported in a Dataset: Audio, Checkbox, CheckboxGroup, ColorPicker, Dataframe, Dropdown, File, HTML, Image, Markdown, Model3D, Number, Radio, Slider, Textbox, TimeSeries, Video
|
50 |
-
samples: a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component
|
51 |
-
headers: Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels
|
52 |
-
type: 'values' if clicking on a sample should pass the value of the sample, or "index" if it should pass the index of the sample
|
53 |
-
samples_per_page: how many examples to show per page.
|
54 |
-
visible: If False, component will be hidden.
|
55 |
-
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
|
56 |
-
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
|
57 |
-
container: If True, will place the component in a container - providing some extra padding around the border.
|
58 |
-
scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
|
59 |
-
min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
|
60 |
-
"""
|
61 |
-
Component.__init__(
|
62 |
-
self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs
|
63 |
-
)
|
64 |
-
self.container = container
|
65 |
-
self.scale = scale
|
66 |
-
self.min_width = min_width
|
67 |
-
self.components = [get_component_instance(c, render=False) for c in components]
|
68 |
-
|
69 |
-
# Narrow type to IOComponent
|
70 |
-
assert all(
|
71 |
-
isinstance(c, IOComponent) for c in self.components
|
72 |
-
), "All components in a `Dataset` must be subclasses of `IOComponent`"
|
73 |
-
self.components = [c for c in self.components if isinstance(c, IOComponent)]
|
74 |
-
for component in self.components:
|
75 |
-
component.root_url = self.root_url
|
76 |
-
|
77 |
-
self.samples = [[]] if samples is None else samples
|
78 |
-
for example in self.samples:
|
79 |
-
for i, (component, ex) in enumerate(zip(self.components, example)):
|
80 |
-
example[i] = component.as_example(ex)
|
81 |
-
self.type = type
|
82 |
-
self.label = label
|
83 |
-
if headers is not None:
|
84 |
-
self.headers = headers
|
85 |
-
elif all(c.label is None for c in self.components):
|
86 |
-
self.headers = []
|
87 |
-
else:
|
88 |
-
self.headers = [c.label or "" for c in self.components]
|
89 |
-
self.samples_per_page = samples_per_page
|
90 |
-
|
91 |
-
def get_config(self):
|
92 |
-
return {
|
93 |
-
"components": [component.get_block_name() for component in self.components],
|
94 |
-
"headers": self.headers,
|
95 |
-
"samples": self.samples,
|
96 |
-
"type": self.type,
|
97 |
-
"label": self.label,
|
98 |
-
"samples_per_page": self.samples_per_page,
|
99 |
-
"container": self.container,
|
100 |
-
"scale": self.scale,
|
101 |
-
"min_width": self.min_width,
|
102 |
-
**Component.get_config(self),
|
103 |
-
}
|
104 |
-
|
105 |
-
@staticmethod
|
106 |
-
def update(
|
107 |
-
samples: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
|
108 |
-
visible: bool | None = None,
|
109 |
-
label: str | None = None,
|
110 |
-
container: bool | None = None,
|
111 |
-
scale: int | None = None,
|
112 |
-
min_width: int | None = None,
|
113 |
-
):
|
114 |
-
return {
|
115 |
-
"samples": samples,
|
116 |
-
"visible": visible,
|
117 |
-
"label": label,
|
118 |
-
"container": container,
|
119 |
-
"scale": scale,
|
120 |
-
"min_width": min_width,
|
121 |
-
"__type__": "update",
|
122 |
-
}
|
123 |
-
|
124 |
-
def preprocess(self, x: Any) -> Any:
|
125 |
-
"""
|
126 |
-
Any preprocessing needed to be performed on function input.
|
127 |
-
"""
|
128 |
-
if self.type == "index":
|
129 |
-
return x
|
130 |
-
elif self.type == "values":
|
131 |
-
return self.samples[x]
|
132 |
-
|
133 |
-
def postprocess(self, samples: list[list[Any]]) -> dict:
|
134 |
-
return {
|
135 |
-
"samples": samples,
|
136 |
-
"__type__": "update",
|
137 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/_generated/_async_client.py
DELETED
@@ -1,1269 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023-present, the HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
#
|
16 |
-
# WARNING
|
17 |
-
# This entire file has been adapted from the sync-client code in `src/huggingface_hub/inference/_client.py`.
|
18 |
-
# Any change in InferenceClient will be automatically reflected in AsyncInferenceClient.
|
19 |
-
# To re-generate the code, run `make style` or `python ./utils/generate_async_inference_client.py --update`.
|
20 |
-
# WARNING
|
21 |
-
import logging
|
22 |
-
import time
|
23 |
-
import warnings
|
24 |
-
from dataclasses import asdict
|
25 |
-
from typing import (
|
26 |
-
TYPE_CHECKING,
|
27 |
-
Any,
|
28 |
-
AsyncIterable,
|
29 |
-
Dict,
|
30 |
-
List,
|
31 |
-
Optional,
|
32 |
-
Union,
|
33 |
-
overload,
|
34 |
-
)
|
35 |
-
|
36 |
-
from requests.structures import CaseInsensitiveDict
|
37 |
-
|
38 |
-
from huggingface_hub.constants import INFERENCE_ENDPOINT
|
39 |
-
from huggingface_hub.inference._common import (
|
40 |
-
ContentT,
|
41 |
-
InferenceTimeoutError,
|
42 |
-
_async_stream_text_generation_response,
|
43 |
-
_b64_encode,
|
44 |
-
_b64_to_image,
|
45 |
-
_bytes_to_dict,
|
46 |
-
_bytes_to_image,
|
47 |
-
_get_recommended_model,
|
48 |
-
_import_numpy,
|
49 |
-
_is_tgi_server,
|
50 |
-
_open_as_binary,
|
51 |
-
_set_as_non_tgi,
|
52 |
-
)
|
53 |
-
from huggingface_hub.inference._text_generation import (
|
54 |
-
TextGenerationParameters,
|
55 |
-
TextGenerationRequest,
|
56 |
-
TextGenerationResponse,
|
57 |
-
TextGenerationStreamResponse,
|
58 |
-
raise_text_generation_error,
|
59 |
-
)
|
60 |
-
from huggingface_hub.inference._types import ClassificationOutput, ConversationalOutput, ImageSegmentationOutput
|
61 |
-
from huggingface_hub.utils import (
|
62 |
-
build_hf_headers,
|
63 |
-
)
|
64 |
-
from huggingface_hub.utils._typing import Literal
|
65 |
-
|
66 |
-
from .._common import _async_yield_from, _import_aiohttp
|
67 |
-
|
68 |
-
|
69 |
-
if TYPE_CHECKING:
|
70 |
-
import numpy as np
|
71 |
-
from PIL import Image
|
72 |
-
|
73 |
-
logger = logging.getLogger(__name__)
|
74 |
-
|
75 |
-
|
76 |
-
class AsyncInferenceClient:
|
77 |
-
"""
|
78 |
-
Initialize a new Inference Client.
|
79 |
-
|
80 |
-
[`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used
|
81 |
-
seamlessly with either the (free) Inference API or self-hosted Inference Endpoints.
|
82 |
-
|
83 |
-
Args:
|
84 |
-
model (`str`, `optional`):
|
85 |
-
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `bigcode/starcoder`
|
86 |
-
or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is
|
87 |
-
automatically selected for the task.
|
88 |
-
token (`str`, *optional*):
|
89 |
-
Hugging Face token. Will default to the locally saved token. Pass `token=False` if you don't want to send
|
90 |
-
your token to the server.
|
91 |
-
timeout (`float`, `optional`):
|
92 |
-
The maximum number of seconds to wait for a response from the server. Loading a new model in Inference
|
93 |
-
API can take up to several minutes. Defaults to None, meaning it will loop until the server is available.
|
94 |
-
headers (`Dict[str, str]`, `optional`):
|
95 |
-
Additional headers to send to the server. By default only the authorization and user-agent headers are sent.
|
96 |
-
Values in this dictionary will override the default values.
|
97 |
-
cookies (`Dict[str, str]`, `optional`):
|
98 |
-
Additional cookies to send to the server.
|
99 |
-
"""
|
100 |
-
|
101 |
-
def __init__(
|
102 |
-
self,
|
103 |
-
model: Optional[str] = None,
|
104 |
-
token: Union[str, bool, None] = None,
|
105 |
-
timeout: Optional[float] = None,
|
106 |
-
headers: Optional[Dict[str, str]] = None,
|
107 |
-
cookies: Optional[Dict[str, str]] = None,
|
108 |
-
) -> None:
|
109 |
-
self.model: Optional[str] = model
|
110 |
-
self.headers = CaseInsensitiveDict(build_hf_headers(token=token)) # contains 'authorization' + 'user-agent'
|
111 |
-
if headers is not None:
|
112 |
-
self.headers.update(headers)
|
113 |
-
self.cookies = cookies
|
114 |
-
self.timeout = timeout
|
115 |
-
|
116 |
-
def __repr__(self):
|
117 |
-
return f"<InferenceClient(model='{self.model if self.model else ''}', timeout={self.timeout})>"
|
118 |
-
|
119 |
-
@overload
|
120 |
-
async def post( # type: ignore
|
121 |
-
self,
|
122 |
-
*,
|
123 |
-
json: Optional[Union[str, Dict, List]] = None,
|
124 |
-
data: Optional[ContentT] = None,
|
125 |
-
model: Optional[str] = None,
|
126 |
-
task: Optional[str] = None,
|
127 |
-
stream: Literal[False] = ...,
|
128 |
-
) -> bytes:
|
129 |
-
pass
|
130 |
-
|
131 |
-
@overload
|
132 |
-
async def post( # type: ignore
|
133 |
-
self,
|
134 |
-
*,
|
135 |
-
json: Optional[Union[str, Dict, List]] = None,
|
136 |
-
data: Optional[ContentT] = None,
|
137 |
-
model: Optional[str] = None,
|
138 |
-
task: Optional[str] = None,
|
139 |
-
stream: Literal[True] = ...,
|
140 |
-
) -> AsyncIterable[bytes]:
|
141 |
-
pass
|
142 |
-
|
143 |
-
async def post(
|
144 |
-
self,
|
145 |
-
*,
|
146 |
-
json: Optional[Union[str, Dict, List]] = None,
|
147 |
-
data: Optional[ContentT] = None,
|
148 |
-
model: Optional[str] = None,
|
149 |
-
task: Optional[str] = None,
|
150 |
-
stream: bool = False,
|
151 |
-
) -> Union[bytes, AsyncIterable[bytes]]:
|
152 |
-
"""
|
153 |
-
Make a POST request to the inference server.
|
154 |
-
|
155 |
-
Args:
|
156 |
-
json (`Union[str, Dict, List]`, *optional*):
|
157 |
-
The JSON data to send in the request body. Defaults to None.
|
158 |
-
data (`Union[str, Path, bytes, BinaryIO]`, *optional*):
|
159 |
-
The content to send in the request body. It can be raw bytes, a pointer to an opened file, a local file
|
160 |
-
path, or a URL to an online resource (image, audio file,...). If both `json` and `data` are passed,
|
161 |
-
`data` will take precedence. At least `json` or `data` must be provided. Defaults to None.
|
162 |
-
model (`str`, *optional*):
|
163 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
164 |
-
Inference Endpoint. Will override the model defined at the instance level. Defaults to None.
|
165 |
-
task (`str`, *optional*):
|
166 |
-
The task to perform on the inference. Used only to default to a recommended model if `model` is not
|
167 |
-
provided. At least `model` or `task` must be provided. Defaults to None.
|
168 |
-
stream (`bool`, *optional*):
|
169 |
-
Whether to iterate over streaming APIs.
|
170 |
-
|
171 |
-
Returns:
|
172 |
-
bytes: The raw bytes returned by the server.
|
173 |
-
|
174 |
-
Raises:
|
175 |
-
[`InferenceTimeoutError`]:
|
176 |
-
If the model is unavailable or the request times out.
|
177 |
-
`aiohttp.ClientResponseError`:
|
178 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
179 |
-
"""
|
180 |
-
|
181 |
-
aiohttp = _import_aiohttp()
|
182 |
-
|
183 |
-
url = self._resolve_url(model, task)
|
184 |
-
|
185 |
-
if data is not None and json is not None:
|
186 |
-
warnings.warn("Ignoring `json` as `data` is passed as binary.")
|
187 |
-
|
188 |
-
t0 = time.time()
|
189 |
-
timeout = self.timeout
|
190 |
-
while True:
|
191 |
-
with _open_as_binary(data) as data_as_binary:
|
192 |
-
# Do not use context manager as we don't want to close the connection immediately when returning
|
193 |
-
# a stream
|
194 |
-
client = aiohttp.ClientSession(
|
195 |
-
headers=self.headers, cookies=self.cookies, timeout=aiohttp.ClientTimeout(self.timeout)
|
196 |
-
)
|
197 |
-
|
198 |
-
try:
|
199 |
-
response = await client.post(url, headers=build_hf_headers(), json=json, data=data_as_binary)
|
200 |
-
response_error_payload = None
|
201 |
-
if response.status != 200:
|
202 |
-
try:
|
203 |
-
response_error_payload = await response.json() # get payload before connection closed
|
204 |
-
except Exception:
|
205 |
-
pass
|
206 |
-
response.raise_for_status()
|
207 |
-
if stream:
|
208 |
-
return _async_yield_from(client, response)
|
209 |
-
else:
|
210 |
-
content = await response.read()
|
211 |
-
await client.close()
|
212 |
-
return content
|
213 |
-
except TimeoutError as error:
|
214 |
-
await client.close()
|
215 |
-
# Convert any `TimeoutError` to a `InferenceTimeoutError`
|
216 |
-
raise InferenceTimeoutError(f"Inference call timed out: {url}") from error
|
217 |
-
except aiohttp.ClientResponseError as error:
|
218 |
-
error.response_error_payload = response_error_payload
|
219 |
-
await client.close()
|
220 |
-
if response.status == 503:
|
221 |
-
# If Model is unavailable, either raise a TimeoutError...
|
222 |
-
if timeout is not None and time.time() - t0 > timeout:
|
223 |
-
raise InferenceTimeoutError(
|
224 |
-
f"Model not loaded on the server: {url}. Please retry with a higher timeout"
|
225 |
-
f" (current: {self.timeout})."
|
226 |
-
) from error
|
227 |
-
# ...or wait 1s and retry
|
228 |
-
logger.info(f"Waiting for model to be loaded on the server: {error}")
|
229 |
-
time.sleep(1)
|
230 |
-
if timeout is not None:
|
231 |
-
timeout = max(self.timeout - (time.time() - t0), 1) # type: ignore
|
232 |
-
continue
|
233 |
-
raise error
|
234 |
-
|
235 |
-
async def audio_classification(
|
236 |
-
self,
|
237 |
-
audio: ContentT,
|
238 |
-
*,
|
239 |
-
model: Optional[str] = None,
|
240 |
-
) -> List[ClassificationOutput]:
|
241 |
-
"""
|
242 |
-
Perform audio classification on the provided audio content.
|
243 |
-
|
244 |
-
Args:
|
245 |
-
audio (Union[str, Path, bytes, BinaryIO]):
|
246 |
-
The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an
|
247 |
-
audio file.
|
248 |
-
model (`str`, *optional*):
|
249 |
-
The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub
|
250 |
-
or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for
|
251 |
-
audio classification will be used.
|
252 |
-
|
253 |
-
Returns:
|
254 |
-
`List[Dict]`: The classification output containing the predicted label and its confidence.
|
255 |
-
|
256 |
-
Raises:
|
257 |
-
[`InferenceTimeoutError`]:
|
258 |
-
If the model is unavailable or the request times out.
|
259 |
-
`aiohttp.ClientResponseError`:
|
260 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
261 |
-
|
262 |
-
Example:
|
263 |
-
```py
|
264 |
-
# Must be run in an async context
|
265 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
266 |
-
>>> client = AsyncInferenceClient()
|
267 |
-
>>> await client.audio_classification("audio.flac")
|
268 |
-
[{'score': 0.4976358711719513, 'label': 'hap'}, {'score': 0.3677836060523987, 'label': 'neu'},...]
|
269 |
-
```
|
270 |
-
"""
|
271 |
-
response = await self.post(data=audio, model=model, task="audio-classification")
|
272 |
-
return _bytes_to_dict(response)
|
273 |
-
|
274 |
-
async def automatic_speech_recognition(
|
275 |
-
self,
|
276 |
-
audio: ContentT,
|
277 |
-
*,
|
278 |
-
model: Optional[str] = None,
|
279 |
-
) -> str:
|
280 |
-
"""
|
281 |
-
Perform automatic speech recognition (ASR or audio-to-text) on the given audio content.
|
282 |
-
|
283 |
-
Args:
|
284 |
-
audio (Union[str, Path, bytes, BinaryIO]):
|
285 |
-
The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file.
|
286 |
-
model (`str`, *optional*):
|
287 |
-
The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
288 |
-
Inference Endpoint. If not provided, the default recommended model for ASR will be used.
|
289 |
-
|
290 |
-
Returns:
|
291 |
-
str: The transcribed text.
|
292 |
-
|
293 |
-
Raises:
|
294 |
-
[`InferenceTimeoutError`]:
|
295 |
-
If the model is unavailable or the request times out.
|
296 |
-
`aiohttp.ClientResponseError`:
|
297 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
298 |
-
|
299 |
-
Example:
|
300 |
-
```py
|
301 |
-
# Must be run in an async context
|
302 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
303 |
-
>>> client = AsyncInferenceClient()
|
304 |
-
>>> await client.automatic_speech_recognition("hello_world.flac")
|
305 |
-
"hello world"
|
306 |
-
```
|
307 |
-
"""
|
308 |
-
response = await self.post(data=audio, model=model, task="automatic-speech-recognition")
|
309 |
-
return _bytes_to_dict(response)["text"]
|
310 |
-
|
311 |
-
async def conversational(
|
312 |
-
self,
|
313 |
-
text: str,
|
314 |
-
generated_responses: Optional[List[str]] = None,
|
315 |
-
past_user_inputs: Optional[List[str]] = None,
|
316 |
-
*,
|
317 |
-
parameters: Optional[Dict[str, Any]] = None,
|
318 |
-
model: Optional[str] = None,
|
319 |
-
) -> ConversationalOutput:
|
320 |
-
"""
|
321 |
-
Generate conversational responses based on the given input text (i.e. chat with the API).
|
322 |
-
|
323 |
-
Args:
|
324 |
-
text (`str`):
|
325 |
-
The last input from the user in the conversation.
|
326 |
-
generated_responses (`List[str]`, *optional*):
|
327 |
-
A list of strings corresponding to the earlier replies from the model. Defaults to None.
|
328 |
-
past_user_inputs (`List[str]`, *optional*):
|
329 |
-
A list of strings corresponding to the earlier replies from the user. Should be the same length as
|
330 |
-
`generated_responses`. Defaults to None.
|
331 |
-
parameters (`Dict[str, Any]`, *optional*):
|
332 |
-
Additional parameters for the conversational task. Defaults to None. For more details about the available
|
333 |
-
parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#conversational-task)
|
334 |
-
model (`str`, *optional*):
|
335 |
-
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
|
336 |
-
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
|
337 |
-
Defaults to None.
|
338 |
-
|
339 |
-
Returns:
|
340 |
-
`Dict`: The generated conversational output.
|
341 |
-
|
342 |
-
Raises:
|
343 |
-
[`InferenceTimeoutError`]:
|
344 |
-
If the model is unavailable or the request times out.
|
345 |
-
`aiohttp.ClientResponseError`:
|
346 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
347 |
-
|
348 |
-
Example:
|
349 |
-
```py
|
350 |
-
# Must be run in an async context
|
351 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
352 |
-
>>> client = AsyncInferenceClient()
|
353 |
-
>>> output = await client.conversational("Hi, who are you?")
|
354 |
-
>>> output
|
355 |
-
{'generated_text': 'I am the one who knocks.', 'conversation': {'generated_responses': ['I am the one who knocks.'], 'past_user_inputs': ['Hi, who are you?']}, 'warnings': ['Setting `pad_token_id` to `eos_token_id`:50256 async for open-end generation.']}
|
356 |
-
>>> await client.conversational(
|
357 |
-
... "Wow, that's scary!",
|
358 |
-
... generated_responses=output["conversation"]["generated_responses"],
|
359 |
-
... past_user_inputs=output["conversation"]["past_user_inputs"],
|
360 |
-
... )
|
361 |
-
```
|
362 |
-
"""
|
363 |
-
payload: Dict[str, Any] = {"inputs": {"text": text}}
|
364 |
-
if generated_responses is not None:
|
365 |
-
payload["inputs"]["generated_responses"] = generated_responses
|
366 |
-
if past_user_inputs is not None:
|
367 |
-
payload["inputs"]["past_user_inputs"] = past_user_inputs
|
368 |
-
if parameters is not None:
|
369 |
-
payload["parameters"] = parameters
|
370 |
-
response = await self.post(json=payload, model=model, task="conversational")
|
371 |
-
return _bytes_to_dict(response)
|
372 |
-
|
373 |
-
async def feature_extraction(self, text: str, *, model: Optional[str] = None) -> "np.ndarray":
|
374 |
-
"""
|
375 |
-
Generate embeddings for a given text.
|
376 |
-
|
377 |
-
Args:
|
378 |
-
text (`str`):
|
379 |
-
The text to embed.
|
380 |
-
model (`str`, *optional*):
|
381 |
-
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
|
382 |
-
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
|
383 |
-
Defaults to None.
|
384 |
-
|
385 |
-
Returns:
|
386 |
-
`np.ndarray`: The embedding representing the input text as a float32 numpy array.
|
387 |
-
|
388 |
-
Raises:
|
389 |
-
[`InferenceTimeoutError`]:
|
390 |
-
If the model is unavailable or the request times out.
|
391 |
-
`aiohttp.ClientResponseError`:
|
392 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
393 |
-
|
394 |
-
Example:
|
395 |
-
```py
|
396 |
-
# Must be run in an async context
|
397 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
398 |
-
>>> client = AsyncInferenceClient()
|
399 |
-
>>> await client.feature_extraction("Hi, who are you?")
|
400 |
-
array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ],
|
401 |
-
[-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ],
|
402 |
-
...,
|
403 |
-
[ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32)
|
404 |
-
```
|
405 |
-
"""
|
406 |
-
response = await self.post(json={"inputs": text}, model=model, task="feature-extraction")
|
407 |
-
np = _import_numpy()
|
408 |
-
return np.array(_bytes_to_dict(response)[0], dtype="float32")
|
409 |
-
|
410 |
-
async def image_classification(
|
411 |
-
self,
|
412 |
-
image: ContentT,
|
413 |
-
*,
|
414 |
-
model: Optional[str] = None,
|
415 |
-
) -> List[ClassificationOutput]:
|
416 |
-
"""
|
417 |
-
Perform image classification on the given image using the specified model.
|
418 |
-
|
419 |
-
Args:
|
420 |
-
image (`Union[str, Path, bytes, BinaryIO]`):
|
421 |
-
The image to classify. It can be raw bytes, an image file, or a URL to an online image.
|
422 |
-
model (`str`, *optional*):
|
423 |
-
The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a
|
424 |
-
deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used.
|
425 |
-
|
426 |
-
Returns:
|
427 |
-
`List[Dict]`: a list of dictionaries containing the predicted label and associated probability.
|
428 |
-
|
429 |
-
Raises:
|
430 |
-
[`InferenceTimeoutError`]:
|
431 |
-
If the model is unavailable or the request times out.
|
432 |
-
`aiohttp.ClientResponseError`:
|
433 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
434 |
-
|
435 |
-
Example:
|
436 |
-
```py
|
437 |
-
# Must be run in an async context
|
438 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
439 |
-
>>> client = AsyncInferenceClient()
|
440 |
-
>>> await client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
|
441 |
-
[{'score': 0.9779096841812134, 'label': 'Blenheim spaniel'}, ...]
|
442 |
-
```
|
443 |
-
"""
|
444 |
-
response = await self.post(data=image, model=model, task="image-classification")
|
445 |
-
return _bytes_to_dict(response)
|
446 |
-
|
447 |
-
async def image_segmentation(
|
448 |
-
self,
|
449 |
-
image: ContentT,
|
450 |
-
*,
|
451 |
-
model: Optional[str] = None,
|
452 |
-
) -> List[ImageSegmentationOutput]:
|
453 |
-
"""
|
454 |
-
Perform image segmentation on the given image using the specified model.
|
455 |
-
|
456 |
-
<Tip warning={true}>
|
457 |
-
|
458 |
-
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
|
459 |
-
|
460 |
-
</Tip>
|
461 |
-
|
462 |
-
Args:
|
463 |
-
image (`Union[str, Path, bytes, BinaryIO]`):
|
464 |
-
The image to segment. It can be raw bytes, an image file, or a URL to an online image.
|
465 |
-
model (`str`, *optional*):
|
466 |
-
The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a
|
467 |
-
deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used.
|
468 |
-
|
469 |
-
Returns:
|
470 |
-
`List[Dict]`: A list of dictionaries containing the segmented masks and associated attributes.
|
471 |
-
|
472 |
-
Raises:
|
473 |
-
[`InferenceTimeoutError`]:
|
474 |
-
If the model is unavailable or the request times out.
|
475 |
-
`aiohttp.ClientResponseError`:
|
476 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
477 |
-
|
478 |
-
Example:
|
479 |
-
```py
|
480 |
-
# Must be run in an async context
|
481 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
482 |
-
>>> client = AsyncInferenceClient()
|
483 |
-
>>> await client.image_segmentation("cat.jpg"):
|
484 |
-
[{'score': 0.989008, 'label': 'LABEL_184', 'mask': <PIL.PngImagePlugin.PngImageFile image mode=L size=400x300 at 0x7FDD2B129CC0>}, ...]
|
485 |
-
```
|
486 |
-
"""
|
487 |
-
|
488 |
-
# Segment
|
489 |
-
response = await self.post(data=image, model=model, task="image-segmentation")
|
490 |
-
output = _bytes_to_dict(response)
|
491 |
-
|
492 |
-
# Parse masks as PIL Image
|
493 |
-
if not isinstance(output, list):
|
494 |
-
raise ValueError(f"Server output must be a list. Got {type(output)}: {str(output)[:200]}...")
|
495 |
-
for item in output:
|
496 |
-
item["mask"] = _b64_to_image(item["mask"])
|
497 |
-
return output
|
498 |
-
|
499 |
-
async def image_to_image(
|
500 |
-
self,
|
501 |
-
image: ContentT,
|
502 |
-
prompt: Optional[str] = None,
|
503 |
-
*,
|
504 |
-
negative_prompt: Optional[str] = None,
|
505 |
-
height: Optional[int] = None,
|
506 |
-
width: Optional[int] = None,
|
507 |
-
num_inference_steps: Optional[int] = None,
|
508 |
-
guidance_scale: Optional[float] = None,
|
509 |
-
model: Optional[str] = None,
|
510 |
-
**kwargs,
|
511 |
-
) -> "Image":
|
512 |
-
"""
|
513 |
-
Perform image-to-image translation using a specified model.
|
514 |
-
|
515 |
-
<Tip warning={true}>
|
516 |
-
|
517 |
-
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
|
518 |
-
|
519 |
-
</Tip>
|
520 |
-
|
521 |
-
Args:
|
522 |
-
image (`Union[str, Path, bytes, BinaryIO]`):
|
523 |
-
The input image for translation. It can be raw bytes, an image file, or a URL to an online image.
|
524 |
-
prompt (`str`, *optional*):
|
525 |
-
The text prompt to guide the image generation.
|
526 |
-
negative_prompt (`str`, *optional*):
|
527 |
-
A negative prompt to guide the translation process.
|
528 |
-
height (`int`, *optional*):
|
529 |
-
The height in pixels of the generated image.
|
530 |
-
width (`int`, *optional*):
|
531 |
-
The width in pixels of the generated image.
|
532 |
-
num_inference_steps (`int`, *optional*):
|
533 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
534 |
-
expense of slower inference.
|
535 |
-
guidance_scale (`float`, *optional*):
|
536 |
-
Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
537 |
-
usually at the expense of lower image quality.
|
538 |
-
model (`str`, *optional*):
|
539 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
540 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
541 |
-
|
542 |
-
Returns:
|
543 |
-
`Image`: The translated image.
|
544 |
-
|
545 |
-
Raises:
|
546 |
-
[`InferenceTimeoutError`]:
|
547 |
-
If the model is unavailable or the request times out.
|
548 |
-
`aiohttp.ClientResponseError`:
|
549 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
550 |
-
|
551 |
-
Example:
|
552 |
-
```py
|
553 |
-
# Must be run in an async context
|
554 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
555 |
-
>>> client = AsyncInferenceClient()
|
556 |
-
>>> image = await client.image_to_image("cat.jpg", prompt="turn the cat into a tiger")
|
557 |
-
>>> image.save("tiger.jpg")
|
558 |
-
```
|
559 |
-
"""
|
560 |
-
parameters = {
|
561 |
-
"prompt": prompt,
|
562 |
-
"negative_prompt": negative_prompt,
|
563 |
-
"height": height,
|
564 |
-
"width": width,
|
565 |
-
"num_inference_steps": num_inference_steps,
|
566 |
-
"guidance_scale": guidance_scale,
|
567 |
-
**kwargs,
|
568 |
-
}
|
569 |
-
if all(parameter is None for parameter in parameters.values()):
|
570 |
-
# Either only an image to send => send as raw bytes
|
571 |
-
data = image
|
572 |
-
payload: Optional[Dict[str, Any]] = None
|
573 |
-
else:
|
574 |
-
# Or an image + some parameters => use base64 encoding
|
575 |
-
data = None
|
576 |
-
payload = {"inputs": _b64_encode(image)}
|
577 |
-
for key, value in parameters.items():
|
578 |
-
if value is not None:
|
579 |
-
payload[key] = value
|
580 |
-
|
581 |
-
response = await self.post(json=payload, data=data, model=model, task="image-to-image")
|
582 |
-
return _bytes_to_image(response)
|
583 |
-
|
584 |
-
async def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> str:
|
585 |
-
"""
|
586 |
-
Takes an input image and return text.
|
587 |
-
|
588 |
-
Models can have very different outputs depending on your use case (image captioning, optical character recognition
|
589 |
-
(OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities.
|
590 |
-
|
591 |
-
Args:
|
592 |
-
image (`Union[str, Path, bytes, BinaryIO]`):
|
593 |
-
The input image to caption. It can be raw bytes, an image file, or a URL to an online image..
|
594 |
-
model (`str`, *optional*):
|
595 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
596 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
597 |
-
|
598 |
-
Returns:
|
599 |
-
`str`: The generated text.
|
600 |
-
|
601 |
-
Raises:
|
602 |
-
[`InferenceTimeoutError`]:
|
603 |
-
If the model is unavailable or the request times out.
|
604 |
-
`aiohttp.ClientResponseError`:
|
605 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
606 |
-
|
607 |
-
Example:
|
608 |
-
```py
|
609 |
-
# Must be run in an async context
|
610 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
611 |
-
>>> client = AsyncInferenceClient()
|
612 |
-
>>> await client.image_to_text("cat.jpg")
|
613 |
-
'a cat standing in a grassy field '
|
614 |
-
>>> await client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
|
615 |
-
'a dog laying on the grass next to a flower pot '
|
616 |
-
```
|
617 |
-
"""
|
618 |
-
response = await self.post(data=image, model=model, task="image-to-text")
|
619 |
-
return _bytes_to_dict(response)[0]["generated_text"]
|
620 |
-
|
621 |
-
async def sentence_similarity(
|
622 |
-
self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None
|
623 |
-
) -> List[float]:
|
624 |
-
"""
|
625 |
-
Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings.
|
626 |
-
|
627 |
-
Args:
|
628 |
-
sentence (`str`):
|
629 |
-
The main sentence to compare to others.
|
630 |
-
other_sentences (`List[str]`):
|
631 |
-
The list of sentences to compare to.
|
632 |
-
model (`str`, *optional*):
|
633 |
-
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
|
634 |
-
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
|
635 |
-
Defaults to None.
|
636 |
-
|
637 |
-
Returns:
|
638 |
-
`List[float]`: The embedding representing the input text.
|
639 |
-
|
640 |
-
Raises:
|
641 |
-
[`InferenceTimeoutError`]:
|
642 |
-
If the model is unavailable or the request times out.
|
643 |
-
`aiohttp.ClientResponseError`:
|
644 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
645 |
-
|
646 |
-
Example:
|
647 |
-
```py
|
648 |
-
# Must be run in an async context
|
649 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
650 |
-
>>> client = AsyncInferenceClient()
|
651 |
-
>>> await client.sentence_similarity(
|
652 |
-
... "Machine learning is so easy.",
|
653 |
-
... other_sentences=[
|
654 |
-
... "Deep learning is so straightforward.",
|
655 |
-
... "This is so difficult, like rocket science.",
|
656 |
-
... "I can't believe how much I struggled with this.",
|
657 |
-
... ],
|
658 |
-
... )
|
659 |
-
[0.7785726189613342, 0.45876261591911316, 0.2906220555305481]
|
660 |
-
```
|
661 |
-
"""
|
662 |
-
response = await self.post(
|
663 |
-
json={"inputs": {"source_sentence": sentence, "sentences": other_sentences}},
|
664 |
-
model=model,
|
665 |
-
task="sentence-similarity",
|
666 |
-
)
|
667 |
-
return _bytes_to_dict(response)
|
668 |
-
|
669 |
-
async def summarization(
|
670 |
-
self,
|
671 |
-
text: str,
|
672 |
-
*,
|
673 |
-
parameters: Optional[Dict[str, Any]] = None,
|
674 |
-
model: Optional[str] = None,
|
675 |
-
) -> str:
|
676 |
-
"""
|
677 |
-
Generate a summary of a given text using a specified model.
|
678 |
-
|
679 |
-
Args:
|
680 |
-
text (`str`):
|
681 |
-
The input text to summarize.
|
682 |
-
parameters (`Dict[str, Any]`, *optional*):
|
683 |
-
Additional parameters for summarization. Check out this [page](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task)
|
684 |
-
for more details.
|
685 |
-
model (`str`, *optional*):
|
686 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
687 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
688 |
-
|
689 |
-
Returns:
|
690 |
-
`str`: The generated summary text.
|
691 |
-
|
692 |
-
Raises:
|
693 |
-
[`InferenceTimeoutError`]:
|
694 |
-
If the model is unavailable or the request times out.
|
695 |
-
`aiohttp.ClientResponseError`:
|
696 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
697 |
-
|
698 |
-
Example:
|
699 |
-
```py
|
700 |
-
# Must be run in an async context
|
701 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
702 |
-
>>> client = AsyncInferenceClient()
|
703 |
-
>>> await client.summarization("The Eiffel tower...")
|
704 |
-
'The Eiffel tower is one of the most famous landmarks in the world....'
|
705 |
-
```
|
706 |
-
"""
|
707 |
-
payload: Dict[str, Any] = {"inputs": text}
|
708 |
-
if parameters is not None:
|
709 |
-
payload["parameters"] = parameters
|
710 |
-
response = await self.post(json=payload, model=model, task="summarization")
|
711 |
-
return _bytes_to_dict(response)[0]["summary_text"]
|
712 |
-
|
713 |
-
@overload
|
714 |
-
async def text_generation( # type: ignore
|
715 |
-
self,
|
716 |
-
prompt: str,
|
717 |
-
*,
|
718 |
-
details: Literal[False] = ...,
|
719 |
-
stream: Literal[False] = ...,
|
720 |
-
model: Optional[str] = None,
|
721 |
-
do_sample: bool = False,
|
722 |
-
max_new_tokens: int = 20,
|
723 |
-
best_of: Optional[int] = None,
|
724 |
-
repetition_penalty: Optional[float] = None,
|
725 |
-
return_full_text: bool = False,
|
726 |
-
seed: Optional[int] = None,
|
727 |
-
stop_sequences: Optional[List[str]] = None,
|
728 |
-
temperature: Optional[float] = None,
|
729 |
-
top_k: Optional[int] = None,
|
730 |
-
top_p: Optional[float] = None,
|
731 |
-
truncate: Optional[int] = None,
|
732 |
-
typical_p: Optional[float] = None,
|
733 |
-
watermark: bool = False,
|
734 |
-
) -> str:
|
735 |
-
...
|
736 |
-
|
737 |
-
@overload
|
738 |
-
async def text_generation( # type: ignore
|
739 |
-
self,
|
740 |
-
prompt: str,
|
741 |
-
*,
|
742 |
-
details: Literal[True] = ...,
|
743 |
-
stream: Literal[False] = ...,
|
744 |
-
model: Optional[str] = None,
|
745 |
-
do_sample: bool = False,
|
746 |
-
max_new_tokens: int = 20,
|
747 |
-
best_of: Optional[int] = None,
|
748 |
-
repetition_penalty: Optional[float] = None,
|
749 |
-
return_full_text: bool = False,
|
750 |
-
seed: Optional[int] = None,
|
751 |
-
stop_sequences: Optional[List[str]] = None,
|
752 |
-
temperature: Optional[float] = None,
|
753 |
-
top_k: Optional[int] = None,
|
754 |
-
top_p: Optional[float] = None,
|
755 |
-
truncate: Optional[int] = None,
|
756 |
-
typical_p: Optional[float] = None,
|
757 |
-
watermark: bool = False,
|
758 |
-
) -> TextGenerationResponse:
|
759 |
-
...
|
760 |
-
|
761 |
-
@overload
|
762 |
-
async def text_generation( # type: ignore
|
763 |
-
self,
|
764 |
-
prompt: str,
|
765 |
-
*,
|
766 |
-
details: Literal[False] = ...,
|
767 |
-
stream: Literal[True] = ...,
|
768 |
-
model: Optional[str] = None,
|
769 |
-
do_sample: bool = False,
|
770 |
-
max_new_tokens: int = 20,
|
771 |
-
best_of: Optional[int] = None,
|
772 |
-
repetition_penalty: Optional[float] = None,
|
773 |
-
return_full_text: bool = False,
|
774 |
-
seed: Optional[int] = None,
|
775 |
-
stop_sequences: Optional[List[str]] = None,
|
776 |
-
temperature: Optional[float] = None,
|
777 |
-
top_k: Optional[int] = None,
|
778 |
-
top_p: Optional[float] = None,
|
779 |
-
truncate: Optional[int] = None,
|
780 |
-
typical_p: Optional[float] = None,
|
781 |
-
watermark: bool = False,
|
782 |
-
) -> AsyncIterable[str]:
|
783 |
-
...
|
784 |
-
|
785 |
-
@overload
|
786 |
-
async def text_generation(
|
787 |
-
self,
|
788 |
-
prompt: str,
|
789 |
-
*,
|
790 |
-
details: Literal[True] = ...,
|
791 |
-
stream: Literal[True] = ...,
|
792 |
-
model: Optional[str] = None,
|
793 |
-
do_sample: bool = False,
|
794 |
-
max_new_tokens: int = 20,
|
795 |
-
best_of: Optional[int] = None,
|
796 |
-
repetition_penalty: Optional[float] = None,
|
797 |
-
return_full_text: bool = False,
|
798 |
-
seed: Optional[int] = None,
|
799 |
-
stop_sequences: Optional[List[str]] = None,
|
800 |
-
temperature: Optional[float] = None,
|
801 |
-
top_k: Optional[int] = None,
|
802 |
-
top_p: Optional[float] = None,
|
803 |
-
truncate: Optional[int] = None,
|
804 |
-
typical_p: Optional[float] = None,
|
805 |
-
watermark: bool = False,
|
806 |
-
) -> AsyncIterable[TextGenerationStreamResponse]:
|
807 |
-
...
|
808 |
-
|
809 |
-
async def text_generation(
|
810 |
-
self,
|
811 |
-
prompt: str,
|
812 |
-
*,
|
813 |
-
details: bool = False,
|
814 |
-
stream: bool = False,
|
815 |
-
model: Optional[str] = None,
|
816 |
-
do_sample: bool = False,
|
817 |
-
max_new_tokens: int = 20,
|
818 |
-
best_of: Optional[int] = None,
|
819 |
-
repetition_penalty: Optional[float] = None,
|
820 |
-
return_full_text: bool = False,
|
821 |
-
seed: Optional[int] = None,
|
822 |
-
stop_sequences: Optional[List[str]] = None,
|
823 |
-
temperature: Optional[float] = None,
|
824 |
-
top_k: Optional[int] = None,
|
825 |
-
top_p: Optional[float] = None,
|
826 |
-
truncate: Optional[int] = None,
|
827 |
-
typical_p: Optional[float] = None,
|
828 |
-
watermark: bool = False,
|
829 |
-
decoder_input_details: bool = False,
|
830 |
-
) -> Union[str, TextGenerationResponse, AsyncIterable[str], AsyncIterable[TextGenerationStreamResponse]]:
|
831 |
-
"""
|
832 |
-
Given a prompt, generate the following text.
|
833 |
-
|
834 |
-
It is recommended to have Pydantic installed in order to get inputs validated. This is preferable as it allow
|
835 |
-
early failures.
|
836 |
-
|
837 |
-
API endpoint is supposed to run with the `text-generation-inference` backend (TGI). This backend is the
|
838 |
-
go-to solution to run large language models at scale. However, for some smaller models (e.g. "gpt2") the
|
839 |
-
default `transformers` + `api-inference` solution is still in use. Both approaches have very similar APIs, but
|
840 |
-
not exactly the same. This method is compatible with both approaches but some parameters are only available for
|
841 |
-
`text-generation-inference`. If some parameters are ignored, a warning message is triggered but the process
|
842 |
-
continues correctly.
|
843 |
-
|
844 |
-
To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference.
|
845 |
-
|
846 |
-
Args:
|
847 |
-
prompt (`str`):
|
848 |
-
Input text.
|
849 |
-
details (`bool`, *optional*):
|
850 |
-
By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens,
|
851 |
-
probabilities, seed, finish reason, etc.). Only available for models running on with the
|
852 |
-
`text-generation-inference` backend.
|
853 |
-
stream (`bool`, *optional*):
|
854 |
-
By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of
|
855 |
-
tokens to be returned. Only available for models running on with the `text-generation-inference`
|
856 |
-
backend.
|
857 |
-
model (`str`, *optional*):
|
858 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
859 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
860 |
-
do_sample (`bool`):
|
861 |
-
Activate logits sampling
|
862 |
-
max_new_tokens (`int`):
|
863 |
-
Maximum number of generated tokens
|
864 |
-
best_of (`int`):
|
865 |
-
Generate best_of sequences and return the one if the highest token logprobs
|
866 |
-
repetition_penalty (`float`):
|
867 |
-
The parameter for repetition penalty. 1.0 means no penalty. See [this
|
868 |
-
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
|
869 |
-
return_full_text (`bool`):
|
870 |
-
Whether to prepend the prompt to the generated text
|
871 |
-
seed (`int`):
|
872 |
-
Random sampling seed
|
873 |
-
stop_sequences (`List[str]`):
|
874 |
-
Stop generating tokens if a member of `stop_sequences` is generated
|
875 |
-
temperature (`float`):
|
876 |
-
The value used to module the logits distribution.
|
877 |
-
top_k (`int`):
|
878 |
-
The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
879 |
-
top_p (`float`):
|
880 |
-
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
|
881 |
-
higher are kept for generation.
|
882 |
-
truncate (`int`):
|
883 |
-
Truncate inputs tokens to the given size
|
884 |
-
typical_p (`float`):
|
885 |
-
Typical Decoding mass
|
886 |
-
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
|
887 |
-
watermark (`bool`):
|
888 |
-
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
|
889 |
-
decoder_input_details (`bool`):
|
890 |
-
Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken
|
891 |
-
into account. Defaults to `False`.
|
892 |
-
|
893 |
-
Returns:
|
894 |
-
`Union[str, TextGenerationResponse, Iterable[str], Iterable[TextGenerationStreamResponse]]`:
|
895 |
-
Generated text returned from the server:
|
896 |
-
- if `stream=False` and `details=False`, the generated text is returned as a `str` (default)
|
897 |
-
- if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]`
|
898 |
-
- if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.inference._text_generation.TextGenerationResponse`]
|
899 |
-
- if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.inference._text_generation.TextGenerationStreamResponse`]
|
900 |
-
|
901 |
-
Raises:
|
902 |
-
`ValidationError`:
|
903 |
-
If input values are not valid. No HTTP call is made to the server.
|
904 |
-
[`InferenceTimeoutError`]:
|
905 |
-
If the model is unavailable or the request times out.
|
906 |
-
`aiohttp.ClientResponseError`:
|
907 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
908 |
-
|
909 |
-
Example:
|
910 |
-
```py
|
911 |
-
# Must be run in an async context
|
912 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
913 |
-
>>> client = AsyncInferenceClient()
|
914 |
-
|
915 |
-
# Case 1: generate text
|
916 |
-
>>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12)
|
917 |
-
'100% open source and built to be easy to use.'
|
918 |
-
|
919 |
-
# Case 2: iterate over the generated tokens. Useful async for large generation.
|
920 |
-
>>> async for token in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True):
|
921 |
-
... print(token)
|
922 |
-
100
|
923 |
-
%
|
924 |
-
open
|
925 |
-
source
|
926 |
-
and
|
927 |
-
built
|
928 |
-
to
|
929 |
-
be
|
930 |
-
easy
|
931 |
-
to
|
932 |
-
use
|
933 |
-
.
|
934 |
-
|
935 |
-
# Case 3: get more details about the generation process.
|
936 |
-
>>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True)
|
937 |
-
TextGenerationResponse(
|
938 |
-
generated_text='100% open source and built to be easy to use.',
|
939 |
-
details=Details(
|
940 |
-
finish_reason=<FinishReason.Length: 'length'>,
|
941 |
-
generated_tokens=12,
|
942 |
-
seed=None,
|
943 |
-
prefill=[
|
944 |
-
InputToken(id=487, text='The', logprob=None),
|
945 |
-
InputToken(id=53789, text=' hugging', logprob=-13.171875),
|
946 |
-
(...)
|
947 |
-
InputToken(id=204, text=' ', logprob=-7.0390625)
|
948 |
-
],
|
949 |
-
tokens=[
|
950 |
-
Token(id=1425, text='100', logprob=-1.0175781, special=False),
|
951 |
-
Token(id=16, text='%', logprob=-0.0463562, special=False),
|
952 |
-
(...)
|
953 |
-
Token(id=25, text='.', logprob=-0.5703125, special=False)
|
954 |
-
],
|
955 |
-
best_of_sequences=None
|
956 |
-
)
|
957 |
-
)
|
958 |
-
|
959 |
-
# Case 4: iterate over the generated tokens with more details.
|
960 |
-
# Last object is more complete, containing the full generated text and the finish reason.
|
961 |
-
>>> async for details in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True):
|
962 |
-
... print(details)
|
963 |
-
...
|
964 |
-
TextGenerationStreamResponse(token=Token(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None)
|
965 |
-
TextGenerationStreamResponse(token=Token(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None)
|
966 |
-
TextGenerationStreamResponse(token=Token(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None)
|
967 |
-
TextGenerationStreamResponse(token=Token(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None)
|
968 |
-
TextGenerationStreamResponse(token=Token(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None)
|
969 |
-
TextGenerationStreamResponse(token=Token(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None)
|
970 |
-
TextGenerationStreamResponse(token=Token(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None)
|
971 |
-
TextGenerationStreamResponse(token=Token(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None)
|
972 |
-
TextGenerationStreamResponse(token=Token(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None)
|
973 |
-
TextGenerationStreamResponse(token=Token(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None)
|
974 |
-
TextGenerationStreamResponse(token=Token(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None)
|
975 |
-
TextGenerationStreamResponse(token=Token(
|
976 |
-
id=25,
|
977 |
-
text='.',
|
978 |
-
logprob=-0.5703125,
|
979 |
-
special=False),
|
980 |
-
generated_text='100% open source and built to be easy to use.',
|
981 |
-
details=StreamDetails(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=12, seed=None)
|
982 |
-
)
|
983 |
-
```
|
984 |
-
"""
|
985 |
-
# NOTE: Text-generation integration is taken from the text-generation-inference project. It has more features
|
986 |
-
# like input/output validation (if Pydantic is installed). See `_text_generation.py` header for more details.
|
987 |
-
|
988 |
-
if decoder_input_details and not details:
|
989 |
-
warnings.warn(
|
990 |
-
"`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that"
|
991 |
-
" the output from the server will be truncated."
|
992 |
-
)
|
993 |
-
decoder_input_details = False
|
994 |
-
|
995 |
-
# Validate parameters
|
996 |
-
parameters = TextGenerationParameters(
|
997 |
-
best_of=best_of,
|
998 |
-
details=details,
|
999 |
-
do_sample=do_sample,
|
1000 |
-
max_new_tokens=max_new_tokens,
|
1001 |
-
repetition_penalty=repetition_penalty,
|
1002 |
-
return_full_text=return_full_text,
|
1003 |
-
seed=seed,
|
1004 |
-
stop=stop_sequences if stop_sequences is not None else [],
|
1005 |
-
temperature=temperature,
|
1006 |
-
top_k=top_k,
|
1007 |
-
top_p=top_p,
|
1008 |
-
truncate=truncate,
|
1009 |
-
typical_p=typical_p,
|
1010 |
-
watermark=watermark,
|
1011 |
-
decoder_input_details=decoder_input_details,
|
1012 |
-
)
|
1013 |
-
request = TextGenerationRequest(inputs=prompt, stream=stream, parameters=parameters)
|
1014 |
-
payload = asdict(request)
|
1015 |
-
|
1016 |
-
# Remove some parameters if not a TGI server
|
1017 |
-
if not _is_tgi_server(model):
|
1018 |
-
ignored_parameters = []
|
1019 |
-
for key in "watermark", "stop", "details", "decoder_input_details":
|
1020 |
-
if payload["parameters"][key] is not None:
|
1021 |
-
ignored_parameters.append(key)
|
1022 |
-
del payload["parameters"][key]
|
1023 |
-
if len(ignored_parameters) > 0:
|
1024 |
-
warnings.warn(
|
1025 |
-
(
|
1026 |
-
"API endpoint/model for text-generation is not served via TGI. Ignoring parameters"
|
1027 |
-
f" {ignored_parameters}."
|
1028 |
-
),
|
1029 |
-
UserWarning,
|
1030 |
-
)
|
1031 |
-
if details:
|
1032 |
-
warnings.warn(
|
1033 |
-
(
|
1034 |
-
"API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will"
|
1035 |
-
" be ignored meaning only the generated text will be returned."
|
1036 |
-
),
|
1037 |
-
UserWarning,
|
1038 |
-
)
|
1039 |
-
details = False
|
1040 |
-
if stream:
|
1041 |
-
raise ValueError(
|
1042 |
-
"API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream."
|
1043 |
-
" Please pass `stream=False` as input."
|
1044 |
-
)
|
1045 |
-
|
1046 |
-
# Handle errors separately for more precise error messages
|
1047 |
-
try:
|
1048 |
-
bytes_output = await self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore
|
1049 |
-
except _import_aiohttp().ClientResponseError as e:
|
1050 |
-
error_message = getattr(e, "response_error_payload", {}).get("error", "")
|
1051 |
-
if e.code == 400 and "The following `model_kwargs` are not used by the model" in error_message:
|
1052 |
-
_set_as_non_tgi(model)
|
1053 |
-
return await self.text_generation( # type: ignore
|
1054 |
-
prompt=prompt,
|
1055 |
-
details=details,
|
1056 |
-
stream=stream,
|
1057 |
-
model=model,
|
1058 |
-
do_sample=do_sample,
|
1059 |
-
max_new_tokens=max_new_tokens,
|
1060 |
-
best_of=best_of,
|
1061 |
-
repetition_penalty=repetition_penalty,
|
1062 |
-
return_full_text=return_full_text,
|
1063 |
-
seed=seed,
|
1064 |
-
stop_sequences=stop_sequences,
|
1065 |
-
temperature=temperature,
|
1066 |
-
top_k=top_k,
|
1067 |
-
top_p=top_p,
|
1068 |
-
truncate=truncate,
|
1069 |
-
typical_p=typical_p,
|
1070 |
-
watermark=watermark,
|
1071 |
-
decoder_input_details=decoder_input_details,
|
1072 |
-
)
|
1073 |
-
raise_text_generation_error(e)
|
1074 |
-
|
1075 |
-
# Parse output
|
1076 |
-
if stream:
|
1077 |
-
return _async_stream_text_generation_response(bytes_output, details) # type: ignore
|
1078 |
-
|
1079 |
-
data = _bytes_to_dict(bytes_output)[0]
|
1080 |
-
return TextGenerationResponse(**data) if details else data["generated_text"]
|
1081 |
-
|
1082 |
-
async def text_to_image(
|
1083 |
-
self,
|
1084 |
-
prompt: str,
|
1085 |
-
*,
|
1086 |
-
negative_prompt: Optional[str] = None,
|
1087 |
-
height: Optional[float] = None,
|
1088 |
-
width: Optional[float] = None,
|
1089 |
-
num_inference_steps: Optional[float] = None,
|
1090 |
-
guidance_scale: Optional[float] = None,
|
1091 |
-
model: Optional[str] = None,
|
1092 |
-
**kwargs,
|
1093 |
-
) -> "Image":
|
1094 |
-
"""
|
1095 |
-
Generate an image based on a given text using a specified model.
|
1096 |
-
|
1097 |
-
<Tip warning={true}>
|
1098 |
-
|
1099 |
-
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
|
1100 |
-
|
1101 |
-
</Tip>
|
1102 |
-
|
1103 |
-
Args:
|
1104 |
-
prompt (`str`):
|
1105 |
-
The prompt to generate an image from.
|
1106 |
-
negative_prompt (`str`, *optional*):
|
1107 |
-
An optional negative prompt for the image generation.
|
1108 |
-
height (`float`, *optional*):
|
1109 |
-
The height in pixels of the image to generate.
|
1110 |
-
width (`float`, *optional*):
|
1111 |
-
The width in pixels of the image to generate.
|
1112 |
-
num_inference_steps (`int`, *optional*):
|
1113 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
1114 |
-
expense of slower inference.
|
1115 |
-
guidance_scale (`float`, *optional*):
|
1116 |
-
Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
1117 |
-
usually at the expense of lower image quality.
|
1118 |
-
model (`str`, *optional*):
|
1119 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
1120 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
1121 |
-
|
1122 |
-
Returns:
|
1123 |
-
`Image`: The generated image.
|
1124 |
-
|
1125 |
-
Raises:
|
1126 |
-
[`InferenceTimeoutError`]:
|
1127 |
-
If the model is unavailable or the request times out.
|
1128 |
-
`aiohttp.ClientResponseError`:
|
1129 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
1130 |
-
|
1131 |
-
Example:
|
1132 |
-
```py
|
1133 |
-
# Must be run in an async context
|
1134 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
1135 |
-
>>> client = AsyncInferenceClient()
|
1136 |
-
|
1137 |
-
>>> image = await client.text_to_image("An astronaut riding a horse on the moon.")
|
1138 |
-
>>> image.save("astronaut.png")
|
1139 |
-
|
1140 |
-
>>> image = await client.text_to_image(
|
1141 |
-
... "An astronaut riding a horse on the moon.",
|
1142 |
-
... negative_prompt="low resolution, blurry",
|
1143 |
-
... model="stabilityai/stable-diffusion-2-1",
|
1144 |
-
... )
|
1145 |
-
>>> image.save("better_astronaut.png")
|
1146 |
-
```
|
1147 |
-
"""
|
1148 |
-
parameters = {
|
1149 |
-
"inputs": prompt,
|
1150 |
-
"negative_prompt": negative_prompt,
|
1151 |
-
"height": height,
|
1152 |
-
"width": width,
|
1153 |
-
"num_inference_steps": num_inference_steps,
|
1154 |
-
"guidance_scale": guidance_scale,
|
1155 |
-
**kwargs,
|
1156 |
-
}
|
1157 |
-
payload = {}
|
1158 |
-
for key, value in parameters.items():
|
1159 |
-
if value is not None:
|
1160 |
-
payload[key] = value
|
1161 |
-
response = await self.post(json=payload, model=model, task="text-to-image")
|
1162 |
-
return _bytes_to_image(response)
|
1163 |
-
|
1164 |
-
async def text_to_speech(self, text: str, *, model: Optional[str] = None) -> bytes:
|
1165 |
-
"""
|
1166 |
-
Synthesize an audio of a voice pronouncing a given text.
|
1167 |
-
|
1168 |
-
Args:
|
1169 |
-
text (`str`):
|
1170 |
-
The text to synthesize.
|
1171 |
-
model (`str`, *optional*):
|
1172 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
1173 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
1174 |
-
|
1175 |
-
Returns:
|
1176 |
-
`bytes`: The generated audio.
|
1177 |
-
|
1178 |
-
Raises:
|
1179 |
-
[`InferenceTimeoutError`]:
|
1180 |
-
If the model is unavailable or the request times out.
|
1181 |
-
`aiohttp.ClientResponseError`:
|
1182 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
1183 |
-
|
1184 |
-
Example:
|
1185 |
-
```py
|
1186 |
-
# Must be run in an async context
|
1187 |
-
>>> from pathlib import Path
|
1188 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
1189 |
-
>>> client = AsyncInferenceClient()
|
1190 |
-
|
1191 |
-
>>> audio = await client.text_to_speech("Hello world")
|
1192 |
-
>>> Path("hello_world.flac").write_bytes(audio)
|
1193 |
-
```
|
1194 |
-
"""
|
1195 |
-
return await self.post(json={"inputs": text}, model=model, task="text-to-speech")
|
1196 |
-
|
1197 |
-
async def zero_shot_image_classification(
|
1198 |
-
self, image: ContentT, labels: List[str], *, model: Optional[str] = None
|
1199 |
-
) -> List[ClassificationOutput]:
|
1200 |
-
"""
|
1201 |
-
Provide input image and text labels to predict text labels for the image.
|
1202 |
-
|
1203 |
-
Args:
|
1204 |
-
image (`Union[str, Path, bytes, BinaryIO]`):
|
1205 |
-
The input image to caption. It can be raw bytes, an image file, or a URL to an online image.
|
1206 |
-
labels (`List[str]`):
|
1207 |
-
List of string possible labels. The `len(labels)` must be greater than 1.
|
1208 |
-
model (`str`, *optional*):
|
1209 |
-
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
1210 |
-
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
1211 |
-
|
1212 |
-
Returns:
|
1213 |
-
`List[Dict]`: List of classification outputs containing the predicted labels and their confidence.
|
1214 |
-
|
1215 |
-
Raises:
|
1216 |
-
[`InferenceTimeoutError`]:
|
1217 |
-
If the model is unavailable or the request times out.
|
1218 |
-
`aiohttp.ClientResponseError`:
|
1219 |
-
If the request fails with an HTTP error status code other than HTTP 503.
|
1220 |
-
|
1221 |
-
Example:
|
1222 |
-
```py
|
1223 |
-
# Must be run in an async context
|
1224 |
-
>>> from huggingface_hub import AsyncInferenceClient
|
1225 |
-
>>> client = AsyncInferenceClient()
|
1226 |
-
|
1227 |
-
>>> await client.zero_shot_image_classification(
|
1228 |
-
... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg",
|
1229 |
-
... labels=["dog", "cat", "horse"],
|
1230 |
-
... )
|
1231 |
-
[{"label": "dog", "score": 0.956}, ...]
|
1232 |
-
```
|
1233 |
-
"""
|
1234 |
-
|
1235 |
-
# Raise valueerror if input is less than 2 labels
|
1236 |
-
if len(labels) < 2:
|
1237 |
-
raise ValueError("You must specify at least 2 classes to compare. Please specify more than 1 class.")
|
1238 |
-
|
1239 |
-
response = await self.post(
|
1240 |
-
json={"image": _b64_encode(image), "parameters": {"candidate_labels": ",".join(labels)}},
|
1241 |
-
model=model,
|
1242 |
-
task="zero-shot-image-classification",
|
1243 |
-
)
|
1244 |
-
return _bytes_to_dict(response)
|
1245 |
-
|
1246 |
-
def _resolve_url(self, model: Optional[str] = None, task: Optional[str] = None) -> str:
|
1247 |
-
model = model or self.model
|
1248 |
-
|
1249 |
-
# If model is already a URL, ignore `task` and return directly
|
1250 |
-
if model is not None and (model.startswith("http://") or model.startswith("https://")):
|
1251 |
-
return model
|
1252 |
-
|
1253 |
-
# # If no model but task is set => fetch the recommended one for this task
|
1254 |
-
if model is None:
|
1255 |
-
if task is None:
|
1256 |
-
raise ValueError(
|
1257 |
-
"You must specify at least a model (repo_id or URL) or a task, either when instantiating"
|
1258 |
-
" `InferenceClient` or when making a request."
|
1259 |
-
)
|
1260 |
-
model = _get_recommended_model(task)
|
1261 |
-
|
1262 |
-
# Compute InferenceAPI url
|
1263 |
-
return (
|
1264 |
-
# Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks.
|
1265 |
-
f"{INFERENCE_ENDPOINT}/pipeline/{task}/{model}"
|
1266 |
-
if task in ("feature-extraction", "sentence-similarity")
|
1267 |
-
# Otherwise, we use the default endpoint
|
1268 |
-
else f"{INFERENCE_ENDPOINT}/models/{model}"
|
1269 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/agent/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from autogpt.agent.agent import Agent
|
2 |
-
from autogpt.agent.agent_manager import AgentManager
|
3 |
-
|
4 |
-
__all__ = ["Agent", "AgentManager"]
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/tests/modules/test_conv.py
DELETED
@@ -1,203 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from itertools import product
|
8 |
-
import math
|
9 |
-
import random
|
10 |
-
|
11 |
-
import pytest
|
12 |
-
import torch
|
13 |
-
from torch import nn
|
14 |
-
|
15 |
-
from audiocraft.modules import (
|
16 |
-
NormConv1d,
|
17 |
-
NormConvTranspose1d,
|
18 |
-
StreamableConv1d,
|
19 |
-
StreamableConvTranspose1d,
|
20 |
-
pad1d,
|
21 |
-
unpad1d,
|
22 |
-
)
|
23 |
-
|
24 |
-
|
25 |
-
def test_get_extra_padding_for_conv1d():
|
26 |
-
# TODO: Implement me!
|
27 |
-
pass
|
28 |
-
|
29 |
-
|
30 |
-
def test_pad1d_zeros():
|
31 |
-
x = torch.randn(1, 1, 20)
|
32 |
-
|
33 |
-
xp1 = pad1d(x, (0, 5), mode='constant', value=0.)
|
34 |
-
assert xp1.shape[-1] == 25
|
35 |
-
xp2 = pad1d(x, (5, 5), mode='constant', value=0.)
|
36 |
-
assert xp2.shape[-1] == 30
|
37 |
-
xp3 = pad1d(x, (0, 0), mode='constant', value=0.)
|
38 |
-
assert xp3.shape[-1] == 20
|
39 |
-
xp4 = pad1d(x, (10, 30), mode='constant', value=0.)
|
40 |
-
assert xp4.shape[-1] == 60
|
41 |
-
|
42 |
-
with pytest.raises(AssertionError):
|
43 |
-
pad1d(x, (-1, 0), mode='constant', value=0.)
|
44 |
-
|
45 |
-
with pytest.raises(AssertionError):
|
46 |
-
pad1d(x, (0, -1), mode='constant', value=0.)
|
47 |
-
|
48 |
-
with pytest.raises(AssertionError):
|
49 |
-
pad1d(x, (-1, -1), mode='constant', value=0.)
|
50 |
-
|
51 |
-
|
52 |
-
def test_pad1d_reflect():
|
53 |
-
x = torch.randn(1, 1, 20)
|
54 |
-
|
55 |
-
xp1 = pad1d(x, (0, 5), mode='reflect', value=0.)
|
56 |
-
assert xp1.shape[-1] == 25
|
57 |
-
xp2 = pad1d(x, (5, 5), mode='reflect', value=0.)
|
58 |
-
assert xp2.shape[-1] == 30
|
59 |
-
xp3 = pad1d(x, (0, 0), mode='reflect', value=0.)
|
60 |
-
assert xp3.shape[-1] == 20
|
61 |
-
xp4 = pad1d(x, (10, 30), mode='reflect', value=0.)
|
62 |
-
assert xp4.shape[-1] == 60
|
63 |
-
|
64 |
-
with pytest.raises(AssertionError):
|
65 |
-
pad1d(x, (-1, 0), mode='reflect', value=0.)
|
66 |
-
|
67 |
-
with pytest.raises(AssertionError):
|
68 |
-
pad1d(x, (0, -1), mode='reflect', value=0.)
|
69 |
-
|
70 |
-
with pytest.raises(AssertionError):
|
71 |
-
pad1d(x, (-1, -1), mode='reflect', value=0.)
|
72 |
-
|
73 |
-
|
74 |
-
def test_unpad1d():
|
75 |
-
x = torch.randn(1, 1, 20)
|
76 |
-
|
77 |
-
u1 = unpad1d(x, (5, 5))
|
78 |
-
assert u1.shape[-1] == 10
|
79 |
-
u2 = unpad1d(x, (0, 5))
|
80 |
-
assert u2.shape[-1] == 15
|
81 |
-
u3 = unpad1d(x, (5, 0))
|
82 |
-
assert u3.shape[-1] == 15
|
83 |
-
u4 = unpad1d(x, (0, 0))
|
84 |
-
assert u4.shape[-1] == x.shape[-1]
|
85 |
-
|
86 |
-
with pytest.raises(AssertionError):
|
87 |
-
unpad1d(x, (-1, 0))
|
88 |
-
|
89 |
-
with pytest.raises(AssertionError):
|
90 |
-
unpad1d(x, (0, -1))
|
91 |
-
|
92 |
-
with pytest.raises(AssertionError):
|
93 |
-
unpad1d(x, (-1, -1))
|
94 |
-
|
95 |
-
|
96 |
-
class TestNormConv1d:
|
97 |
-
|
98 |
-
def test_norm_conv1d_modules(self):
|
99 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
100 |
-
t0 = torch.randn(N, C, T)
|
101 |
-
|
102 |
-
C_out, kernel_size, stride = 1, 4, 1
|
103 |
-
expected_out_length = int((T - kernel_size) / stride + 1)
|
104 |
-
wn_conv = NormConv1d(C, 1, kernel_size=4, norm='weight_norm')
|
105 |
-
gn_conv = NormConv1d(C, 1, kernel_size=4, norm='time_group_norm')
|
106 |
-
nn_conv = NormConv1d(C, 1, kernel_size=4, norm='none')
|
107 |
-
|
108 |
-
assert isinstance(wn_conv.norm, nn.Identity)
|
109 |
-
assert isinstance(wn_conv.conv, nn.Conv1d)
|
110 |
-
|
111 |
-
assert isinstance(gn_conv.norm, nn.GroupNorm)
|
112 |
-
assert isinstance(gn_conv.conv, nn.Conv1d)
|
113 |
-
|
114 |
-
assert isinstance(nn_conv.norm, nn.Identity)
|
115 |
-
assert isinstance(nn_conv.conv, nn.Conv1d)
|
116 |
-
|
117 |
-
for conv_layer in [wn_conv, gn_conv, nn_conv]:
|
118 |
-
out = conv_layer(t0)
|
119 |
-
assert isinstance(out, torch.Tensor)
|
120 |
-
assert list(out.shape) == [N, C_out, expected_out_length]
|
121 |
-
|
122 |
-
|
123 |
-
class TestNormConvTranspose1d:
|
124 |
-
|
125 |
-
def test_normalizations(self):
|
126 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
127 |
-
t0 = torch.randn(N, C, T)
|
128 |
-
|
129 |
-
C_out, kernel_size, stride = 1, 4, 1
|
130 |
-
expected_out_length = (T - 1) * stride + (kernel_size - 1) + 1
|
131 |
-
|
132 |
-
wn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='weight_norm')
|
133 |
-
gn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='time_group_norm')
|
134 |
-
nn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='none')
|
135 |
-
|
136 |
-
assert isinstance(wn_convtr.norm, nn.Identity)
|
137 |
-
assert isinstance(wn_convtr.convtr, nn.ConvTranspose1d)
|
138 |
-
|
139 |
-
assert isinstance(gn_convtr.norm, nn.GroupNorm)
|
140 |
-
assert isinstance(gn_convtr.convtr, nn.ConvTranspose1d)
|
141 |
-
|
142 |
-
assert isinstance(nn_convtr.norm, nn.Identity)
|
143 |
-
assert isinstance(nn_convtr.convtr, nn.ConvTranspose1d)
|
144 |
-
|
145 |
-
for convtr_layer in [wn_convtr, gn_convtr, nn_convtr]:
|
146 |
-
out = convtr_layer(t0)
|
147 |
-
assert isinstance(out, torch.Tensor)
|
148 |
-
assert list(out.shape) == [N, C_out, expected_out_length]
|
149 |
-
|
150 |
-
|
151 |
-
class TestStreamableConv1d:
|
152 |
-
|
153 |
-
def get_streamable_conv1d_output_length(self, length, kernel_size, stride, dilation):
|
154 |
-
# StreamableConv1d internally pads to make sure that the last window is full
|
155 |
-
padding_total = (kernel_size - 1) * dilation - (stride - 1)
|
156 |
-
n_frames = (length - kernel_size + padding_total) / stride + 1
|
157 |
-
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
|
158 |
-
return ideal_length // stride
|
159 |
-
|
160 |
-
def test_streamable_conv1d(self):
|
161 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
162 |
-
t0 = torch.randn(N, C, T)
|
163 |
-
C_out = 1
|
164 |
-
|
165 |
-
# conv params are [(kernel_size, stride, dilation)]
|
166 |
-
conv_params = [(4, 1, 1), (4, 2, 1), (3, 1, 3), (10, 5, 1), (3, 2, 3)]
|
167 |
-
for causal, (kernel_size, stride, dilation) in product([False, True], conv_params):
|
168 |
-
expected_out_length = self.get_streamable_conv1d_output_length(T, kernel_size, stride, dilation)
|
169 |
-
sconv = StreamableConv1d(C, C_out, kernel_size=kernel_size, stride=stride, dilation=dilation, causal=causal)
|
170 |
-
out = sconv(t0)
|
171 |
-
assert isinstance(out, torch.Tensor)
|
172 |
-
print(list(out.shape), [N, C_out, expected_out_length])
|
173 |
-
assert list(out.shape) == [N, C_out, expected_out_length]
|
174 |
-
|
175 |
-
|
176 |
-
class TestStreamableConvTranspose1d:
|
177 |
-
|
178 |
-
def get_streamable_convtr1d_output_length(self, length, kernel_size, stride):
|
179 |
-
padding_total = (kernel_size - stride)
|
180 |
-
return (length - 1) * stride - padding_total + (kernel_size - 1) + 1
|
181 |
-
|
182 |
-
def test_streamable_convtr1d(self):
|
183 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
184 |
-
t0 = torch.randn(N, C, T)
|
185 |
-
|
186 |
-
C_out = 1
|
187 |
-
|
188 |
-
with pytest.raises(AssertionError):
|
189 |
-
StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=False, trim_right_ratio=0.5)
|
190 |
-
StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=-1.)
|
191 |
-
StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=2)
|
192 |
-
|
193 |
-
# causal params are [(causal, trim_right)]
|
194 |
-
causal_params = [(False, 1.0), (True, 1.0), (True, 0.5), (True, 0.0)]
|
195 |
-
# conv params are [(kernel_size, stride)]
|
196 |
-
conv_params = [(4, 1), (4, 2), (3, 1), (10, 5)]
|
197 |
-
for ((causal, trim_right_ratio), (kernel_size, stride)) in product(causal_params, conv_params):
|
198 |
-
expected_out_length = self.get_streamable_convtr1d_output_length(T, kernel_size, stride)
|
199 |
-
sconvtr = StreamableConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride,
|
200 |
-
causal=causal, trim_right_ratio=trim_right_ratio)
|
201 |
-
out = sconvtr(t0)
|
202 |
-
assert isinstance(out, torch.Tensor)
|
203 |
-
assert list(out.shape) == [N, C_out, expected_out_length]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datatrooper/zero-shot-image-classification/app.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
from turtle import title
|
2 |
-
import gradio as gr
|
3 |
-
from transformers import pipeline
|
4 |
-
import numpy as np
|
5 |
-
from PIL import Image
|
6 |
-
|
7 |
-
|
8 |
-
pipe = pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch32")
|
9 |
-
images="dog.jpg"
|
10 |
-
|
11 |
-
def shot(image, labels_text):
|
12 |
-
PIL_image = Image.fromarray(np.uint8(image)).convert('RGB')
|
13 |
-
labels = labels_text.split(",")
|
14 |
-
res = pipe(images=PIL_image,
|
15 |
-
candidate_labels=labels,
|
16 |
-
hypothesis_template= "This is a photo of a {}")
|
17 |
-
return {dic["label"]: dic["score"] for dic in res}
|
18 |
-
|
19 |
-
iface = gr.Interface(shot,
|
20 |
-
["image", "text"],
|
21 |
-
"label",
|
22 |
-
examples=[["dog.jpg", "dog,cat,bird"],
|
23 |
-
["germany.jpg", "germany,belgium,colombia"],
|
24 |
-
["colombia.jpg", "germany,belgium,colombia"]],
|
25 |
-
description="Add a picture and a list of labels separated by commas",
|
26 |
-
title="Zero-shot Image Classification")
|
27 |
-
|
28 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/tflib/autosummary.py
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# This work is licensed under the Creative Commons Attribution-NonCommercial
|
4 |
-
# 4.0 International License. To view a copy of this license, visit
|
5 |
-
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
|
6 |
-
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
|
7 |
-
|
8 |
-
"""Helper for adding automatically tracked values to Tensorboard.
|
9 |
-
|
10 |
-
Autosummary creates an identity op that internally keeps track of the input
|
11 |
-
values and automatically shows up in TensorBoard. The reported value
|
12 |
-
represents an average over input components. The average is accumulated
|
13 |
-
constantly over time and flushed when save_summaries() is called.
|
14 |
-
|
15 |
-
Notes:
|
16 |
-
- The output tensor must be used as an input for something else in the
|
17 |
-
graph. Otherwise, the autosummary op will not get executed, and the average
|
18 |
-
value will not get accumulated.
|
19 |
-
- It is perfectly fine to include autosummaries with the same name in
|
20 |
-
several places throughout the graph, even if they are executed concurrently.
|
21 |
-
- It is ok to also pass in a python scalar or numpy array. In this case, it
|
22 |
-
is added to the average immediately.
|
23 |
-
"""
|
24 |
-
|
25 |
-
from collections import OrderedDict
|
26 |
-
import numpy as np
|
27 |
-
import tensorflow as tf
|
28 |
-
from tensorboard import summary as summary_lib
|
29 |
-
from tensorboard.plugins.custom_scalar import layout_pb2
|
30 |
-
|
31 |
-
from . import tfutil
|
32 |
-
from .tfutil import TfExpression
|
33 |
-
from .tfutil import TfExpressionEx
|
34 |
-
|
35 |
-
_dtype = tf.float64
|
36 |
-
_vars = OrderedDict() # name => [var, ...]
|
37 |
-
_immediate = OrderedDict() # name => update_op, update_value
|
38 |
-
_finalized = False
|
39 |
-
_merge_op = None
|
40 |
-
|
41 |
-
|
42 |
-
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
|
43 |
-
"""Internal helper for creating autosummary accumulators."""
|
44 |
-
assert not _finalized
|
45 |
-
name_id = name.replace("/", "_")
|
46 |
-
v = tf.cast(value_expr, _dtype)
|
47 |
-
|
48 |
-
if v.shape.is_fully_defined():
|
49 |
-
size = np.prod(tfutil.shape_to_list(v.shape))
|
50 |
-
size_expr = tf.constant(size, dtype=_dtype)
|
51 |
-
else:
|
52 |
-
size = None
|
53 |
-
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
|
54 |
-
|
55 |
-
if size == 1:
|
56 |
-
if v.shape.ndims != 0:
|
57 |
-
v = tf.reshape(v, [])
|
58 |
-
v = [size_expr, v, tf.square(v)]
|
59 |
-
else:
|
60 |
-
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
|
61 |
-
v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
|
62 |
-
|
63 |
-
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
|
64 |
-
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
|
65 |
-
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
|
66 |
-
|
67 |
-
if name in _vars:
|
68 |
-
_vars[name].append(var)
|
69 |
-
else:
|
70 |
-
_vars[name] = [var]
|
71 |
-
return update_op
|
72 |
-
|
73 |
-
|
74 |
-
def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None) -> TfExpressionEx:
|
75 |
-
"""Create a new autosummary.
|
76 |
-
|
77 |
-
Args:
|
78 |
-
name: Name to use in TensorBoard
|
79 |
-
value: TensorFlow expression or python value to track
|
80 |
-
passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.
|
81 |
-
|
82 |
-
Example use of the passthru mechanism:
|
83 |
-
|
84 |
-
n = autosummary('l2loss', loss, passthru=n)
|
85 |
-
|
86 |
-
This is a shorthand for the following code:
|
87 |
-
|
88 |
-
with tf.control_dependencies([autosummary('l2loss', loss)]):
|
89 |
-
n = tf.identity(n)
|
90 |
-
"""
|
91 |
-
tfutil.assert_tf_initialized()
|
92 |
-
name_id = name.replace("/", "_")
|
93 |
-
|
94 |
-
if tfutil.is_tf_expression(value):
|
95 |
-
with tf.name_scope("summary_" + name_id), tf.device(value.device):
|
96 |
-
update_op = _create_var(name, value)
|
97 |
-
with tf.control_dependencies([update_op]):
|
98 |
-
return tf.identity(value if passthru is None else passthru)
|
99 |
-
|
100 |
-
else: # python scalar or numpy array
|
101 |
-
if name not in _immediate:
|
102 |
-
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
|
103 |
-
update_value = tf.placeholder(_dtype)
|
104 |
-
update_op = _create_var(name, update_value)
|
105 |
-
_immediate[name] = update_op, update_value
|
106 |
-
|
107 |
-
update_op, update_value = _immediate[name]
|
108 |
-
tfutil.run(update_op, {update_value: value})
|
109 |
-
return value if passthru is None else passthru
|
110 |
-
|
111 |
-
|
112 |
-
def finalize_autosummaries() -> None:
|
113 |
-
"""Create the necessary ops to include autosummaries in TensorBoard report.
|
114 |
-
Note: This should be done only once per graph.
|
115 |
-
"""
|
116 |
-
global _finalized
|
117 |
-
tfutil.assert_tf_initialized()
|
118 |
-
|
119 |
-
if _finalized:
|
120 |
-
return None
|
121 |
-
|
122 |
-
_finalized = True
|
123 |
-
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
|
124 |
-
|
125 |
-
# Create summary ops.
|
126 |
-
with tf.device(None), tf.control_dependencies(None):
|
127 |
-
for name, vars_list in _vars.items():
|
128 |
-
name_id = name.replace("/", "_")
|
129 |
-
with tfutil.absolute_name_scope("Autosummary/" + name_id):
|
130 |
-
moments = tf.add_n(vars_list)
|
131 |
-
moments /= moments[0]
|
132 |
-
with tf.control_dependencies([moments]): # read before resetting
|
133 |
-
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
|
134 |
-
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
|
135 |
-
mean = moments[1]
|
136 |
-
std = tf.sqrt(moments[2] - tf.square(moments[1]))
|
137 |
-
tf.summary.scalar(name, mean)
|
138 |
-
tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
|
139 |
-
tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
|
140 |
-
|
141 |
-
# Group by category and chart name.
|
142 |
-
cat_dict = OrderedDict()
|
143 |
-
for series_name in sorted(_vars.keys()):
|
144 |
-
p = series_name.split("/")
|
145 |
-
cat = p[0] if len(p) >= 2 else ""
|
146 |
-
chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
|
147 |
-
if cat not in cat_dict:
|
148 |
-
cat_dict[cat] = OrderedDict()
|
149 |
-
if chart not in cat_dict[cat]:
|
150 |
-
cat_dict[cat][chart] = []
|
151 |
-
cat_dict[cat][chart].append(series_name)
|
152 |
-
|
153 |
-
# Setup custom_scalar layout.
|
154 |
-
categories = []
|
155 |
-
for cat_name, chart_dict in cat_dict.items():
|
156 |
-
charts = []
|
157 |
-
for chart_name, series_names in chart_dict.items():
|
158 |
-
series = []
|
159 |
-
for series_name in series_names:
|
160 |
-
series.append(layout_pb2.MarginChartContent.Series(
|
161 |
-
value=series_name,
|
162 |
-
lower="xCustomScalars/" + series_name + "/margin_lo",
|
163 |
-
upper="xCustomScalars/" + series_name + "/margin_hi"))
|
164 |
-
margin = layout_pb2.MarginChartContent(series=series)
|
165 |
-
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
|
166 |
-
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
|
167 |
-
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
|
168 |
-
return layout
|
169 |
-
|
170 |
-
def save_summaries(file_writer, global_step=None):
|
171 |
-
"""Call FileWriter.add_summary() with all summaries in the default graph,
|
172 |
-
automatically finalizing and merging them on the first call.
|
173 |
-
"""
|
174 |
-
global _merge_op
|
175 |
-
tfutil.assert_tf_initialized()
|
176 |
-
|
177 |
-
if _merge_op is None:
|
178 |
-
layout = finalize_autosummaries()
|
179 |
-
if layout is not None:
|
180 |
-
file_writer.add_summary(layout)
|
181 |
-
with tf.device(None), tf.control_dependencies(None):
|
182 |
-
_merge_op = tf.summary.merge_all()
|
183 |
-
|
184 |
-
file_writer.add_summary(_merge_op.eval(), global_step)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DonDoesStuff/sd_xl_base_0.9/style.css
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
#col-container {
|
2 |
-
max-width: 800px;
|
3 |
-
margin-left: auto;
|
4 |
-
margin-right: auto;
|
5 |
-
}
|
6 |
-
a {
|
7 |
-
color: inherit;
|
8 |
-
text-decoration: underline;
|
9 |
-
}
|
10 |
-
.gradio-container {
|
11 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
12 |
-
}
|
13 |
-
.gr-button {
|
14 |
-
color: white;
|
15 |
-
border-color: #9d66e5;
|
16 |
-
background: #9d66e5;
|
17 |
-
}
|
18 |
-
input[type='range'] {
|
19 |
-
accent-color: #9d66e5;
|
20 |
-
}
|
21 |
-
.dark input[type='range'] {
|
22 |
-
accent-color: #dfdfdf;
|
23 |
-
}
|
24 |
-
.container {
|
25 |
-
max-width: 800px;
|
26 |
-
margin: auto;
|
27 |
-
padding-top: 1.5rem;
|
28 |
-
}
|
29 |
-
#gallery {
|
30 |
-
min-height: 22rem;
|
31 |
-
margin-bottom: 15px;
|
32 |
-
margin-left: auto;
|
33 |
-
margin-right: auto;
|
34 |
-
border-bottom-right-radius: .5rem !important;
|
35 |
-
border-bottom-left-radius: .5rem !important;
|
36 |
-
}
|
37 |
-
#gallery>div>.h-full {
|
38 |
-
min-height: 20rem;
|
39 |
-
}
|
40 |
-
.details:hover {
|
41 |
-
text-decoration: underline;
|
42 |
-
}
|
43 |
-
.gr-button {
|
44 |
-
white-space: nowrap;
|
45 |
-
}
|
46 |
-
.gr-button:focus {
|
47 |
-
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
48 |
-
outline: none;
|
49 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
50 |
-
--tw-border-opacity: 1;
|
51 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
52 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
53 |
-
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
54 |
-
--tw-ring-opacity: .5;
|
55 |
-
}
|
56 |
-
#advanced-options {
|
57 |
-
margin-bottom: 20px;
|
58 |
-
}
|
59 |
-
.footer {
|
60 |
-
margin-bottom: 45px;
|
61 |
-
margin-top: 35px;
|
62 |
-
text-align: center;
|
63 |
-
border-bottom: 1px solid #e5e5e5;
|
64 |
-
}
|
65 |
-
.footer>p {
|
66 |
-
font-size: .8rem;
|
67 |
-
display: inline-block;
|
68 |
-
padding: 0 10px;
|
69 |
-
transform: translateY(10px);
|
70 |
-
background: white;
|
71 |
-
}
|
72 |
-
.dark .logo{ filter: invert(1); }
|
73 |
-
.dark .footer {
|
74 |
-
border-color: #303030;
|
75 |
-
}
|
76 |
-
.dark .footer>p {
|
77 |
-
background: #0b0f19;
|
78 |
-
}
|
79 |
-
.acknowledgments h4{
|
80 |
-
margin: 1.25em 0 .25em 0;
|
81 |
-
font-weight: bold;
|
82 |
-
font-size: 115%;
|
83 |
-
}
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Duskfallcrew/shindi-realistic-skin-style/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Shindi Realistic Skin Style
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/tutorials/ctracker/mot_online/matching.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
from __future__ import division
|
3 |
-
from __future__ import print_function
|
4 |
-
|
5 |
-
import lap
|
6 |
-
import numpy as np
|
7 |
-
import scipy
|
8 |
-
from cython_bbox import bbox_overlaps as bbox_ious
|
9 |
-
from scipy.spatial.distance import cdist
|
10 |
-
|
11 |
-
chi2inv95 = {
|
12 |
-
1: 3.8415,
|
13 |
-
2: 5.9915,
|
14 |
-
3: 7.8147,
|
15 |
-
4: 9.4877,
|
16 |
-
5: 11.070,
|
17 |
-
6: 12.592,
|
18 |
-
7: 14.067,
|
19 |
-
8: 15.507,
|
20 |
-
9: 16.919}
|
21 |
-
|
22 |
-
def merge_matches(m1, m2, shape):
|
23 |
-
O,P,Q = shape
|
24 |
-
m1 = np.asarray(m1)
|
25 |
-
m2 = np.asarray(m2)
|
26 |
-
|
27 |
-
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
|
28 |
-
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
|
29 |
-
|
30 |
-
mask = M1*M2
|
31 |
-
match = mask.nonzero()
|
32 |
-
match = list(zip(match[0], match[1]))
|
33 |
-
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
|
34 |
-
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
|
35 |
-
|
36 |
-
return match, unmatched_O, unmatched_Q
|
37 |
-
|
38 |
-
|
39 |
-
def _indices_to_matches(cost_matrix, indices, thresh):
|
40 |
-
matched_cost = cost_matrix[tuple(zip(*indices))]
|
41 |
-
matched_mask = (matched_cost <= thresh)
|
42 |
-
|
43 |
-
matches = indices[matched_mask]
|
44 |
-
unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))
|
45 |
-
unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))
|
46 |
-
|
47 |
-
return matches, unmatched_a, unmatched_b
|
48 |
-
|
49 |
-
|
50 |
-
def linear_assignment(cost_matrix, thresh):
|
51 |
-
if cost_matrix.size == 0:
|
52 |
-
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
|
53 |
-
matches, unmatched_a, unmatched_b = [], [], []
|
54 |
-
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
|
55 |
-
for ix, mx in enumerate(x):
|
56 |
-
if mx >= 0:
|
57 |
-
matches.append([ix, mx])
|
58 |
-
unmatched_a = np.where(x < 0)[0]
|
59 |
-
unmatched_b = np.where(y < 0)[0]
|
60 |
-
matches = np.asarray(matches)
|
61 |
-
return matches, unmatched_a, unmatched_b
|
62 |
-
|
63 |
-
|
64 |
-
def ious(atlbrs, btlbrs):
|
65 |
-
"""
|
66 |
-
Compute cost based on IoU
|
67 |
-
:type atlbrs: list[tlbr] | np.ndarray
|
68 |
-
:type atlbrs: list[tlbr] | np.ndarray
|
69 |
-
|
70 |
-
:rtype ious np.ndarray
|
71 |
-
"""
|
72 |
-
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
|
73 |
-
if ious.size == 0:
|
74 |
-
return ious
|
75 |
-
|
76 |
-
ious = bbox_ious(
|
77 |
-
np.ascontiguousarray(atlbrs, dtype=np.float),
|
78 |
-
np.ascontiguousarray(btlbrs, dtype=np.float)
|
79 |
-
)
|
80 |
-
|
81 |
-
return ious
|
82 |
-
|
83 |
-
|
84 |
-
def iou_distance(atracks, btracks):
|
85 |
-
"""
|
86 |
-
Compute cost based on IoU
|
87 |
-
:type atracks: list[STrack]
|
88 |
-
:type btracks: list[STrack]
|
89 |
-
|
90 |
-
:rtype cost_matrix np.ndarray
|
91 |
-
"""
|
92 |
-
|
93 |
-
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
|
94 |
-
atlbrs = atracks
|
95 |
-
btlbrs = btracks
|
96 |
-
else:
|
97 |
-
atlbrs = [track.tlbr for track in atracks]
|
98 |
-
btlbrs = [track.tlbr for track in btracks]
|
99 |
-
_ious = ious(atlbrs, btlbrs)
|
100 |
-
cost_matrix = 1 - _ious
|
101 |
-
|
102 |
-
return cost_matrix
|
103 |
-
|
104 |
-
def embedding_distance(tracks, detections, metric='cosine'):
|
105 |
-
"""
|
106 |
-
:param tracks: list[STrack]
|
107 |
-
:param detections: list[BaseTrack]
|
108 |
-
:param metric:
|
109 |
-
:return: cost_matrix np.ndarray
|
110 |
-
"""
|
111 |
-
|
112 |
-
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
|
113 |
-
if cost_matrix.size == 0:
|
114 |
-
return cost_matrix
|
115 |
-
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
|
116 |
-
#for i, track in enumerate(tracks):
|
117 |
-
#cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
|
118 |
-
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
|
119 |
-
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features
|
120 |
-
return cost_matrix
|
121 |
-
|
122 |
-
def embedding_distance2(tracks, detections, metric='cosine'):
|
123 |
-
"""
|
124 |
-
:param tracks: list[STrack]
|
125 |
-
:param detections: list[BaseTrack]
|
126 |
-
:param metric:
|
127 |
-
:return: cost_matrix np.ndarray
|
128 |
-
"""
|
129 |
-
|
130 |
-
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
|
131 |
-
if cost_matrix.size == 0:
|
132 |
-
return cost_matrix
|
133 |
-
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
|
134 |
-
#for i, track in enumerate(tracks):
|
135 |
-
#cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
|
136 |
-
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
|
137 |
-
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features
|
138 |
-
track_features = np.asarray([track.features[0] for track in tracks], dtype=np.float)
|
139 |
-
cost_matrix2 = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features
|
140 |
-
track_features = np.asarray([track.features[len(track.features)-1] for track in tracks], dtype=np.float)
|
141 |
-
cost_matrix3 = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features
|
142 |
-
for row in range(len(cost_matrix)):
|
143 |
-
cost_matrix[row] = (cost_matrix[row]+cost_matrix2[row]+cost_matrix3[row])/3
|
144 |
-
return cost_matrix
|
145 |
-
|
146 |
-
|
147 |
-
def vis_id_feature_A_distance(tracks, detections, metric='cosine'):
|
148 |
-
track_features = []
|
149 |
-
det_features = []
|
150 |
-
leg1 = len(tracks)
|
151 |
-
leg2 = len(detections)
|
152 |
-
cost_matrix = np.zeros((leg1, leg2), dtype=np.float)
|
153 |
-
cost_matrix_det = np.zeros((leg1, leg2), dtype=np.float)
|
154 |
-
cost_matrix_track = np.zeros((leg1, leg2), dtype=np.float)
|
155 |
-
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
|
156 |
-
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
|
157 |
-
if leg2 != 0:
|
158 |
-
cost_matrix_det = np.maximum(0.0, cdist(det_features, det_features, metric))
|
159 |
-
if leg1 != 0:
|
160 |
-
cost_matrix_track = np.maximum(0.0, cdist(track_features, track_features, metric))
|
161 |
-
if cost_matrix.size == 0:
|
162 |
-
return track_features, det_features, cost_matrix, cost_matrix_det, cost_matrix_track
|
163 |
-
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric))
|
164 |
-
if leg1 > 10:
|
165 |
-
leg1 = 10
|
166 |
-
tracks = tracks[:10]
|
167 |
-
if leg2 > 10:
|
168 |
-
leg2 = 10
|
169 |
-
detections = detections[:10]
|
170 |
-
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
|
171 |
-
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
|
172 |
-
return track_features, det_features, cost_matrix, cost_matrix_det, cost_matrix_track
|
173 |
-
|
174 |
-
def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):
|
175 |
-
if cost_matrix.size == 0:
|
176 |
-
return cost_matrix
|
177 |
-
gating_dim = 2 if only_position else 4
|
178 |
-
gating_threshold = chi2inv95[gating_dim]
|
179 |
-
measurements = np.asarray([det.to_xyah() for det in detections])
|
180 |
-
for row, track in enumerate(tracks):
|
181 |
-
gating_distance = kf.gating_distance(
|
182 |
-
track.mean, track.covariance, measurements, only_position)
|
183 |
-
cost_matrix[row, gating_distance > gating_threshold] = np.inf
|
184 |
-
return cost_matrix
|
185 |
-
|
186 |
-
|
187 |
-
def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
|
188 |
-
if cost_matrix.size == 0:
|
189 |
-
return cost_matrix
|
190 |
-
gating_dim = 2 if only_position else 4
|
191 |
-
gating_threshold = chi2inv95[gating_dim]
|
192 |
-
measurements = np.asarray([det.to_xyah() for det in detections])
|
193 |
-
for row, track in enumerate(tracks):
|
194 |
-
gating_distance = kf.gating_distance(
|
195 |
-
track.mean, track.covariance, measurements, only_position, metric='maha')
|
196 |
-
cost_matrix[row, gating_distance > gating_threshold] = np.inf
|
197 |
-
cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance
|
198 |
-
return cost_matrix
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EXPOSUREEE/Ai-Image-Enhancer/inference_realesrgan.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import cv2
|
3 |
-
import glob
|
4 |
-
import os
|
5 |
-
from basicsr.archs.rrdbnet_arch import RRDBNet
|
6 |
-
|
7 |
-
from realesrgan import RealESRGANer
|
8 |
-
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
9 |
-
|
10 |
-
|
11 |
-
def main():
|
12 |
-
"""Inference demo for Real-ESRGAN.
|
13 |
-
"""
|
14 |
-
parser = argparse.ArgumentParser()
|
15 |
-
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
|
16 |
-
parser.add_argument(
|
17 |
-
'-n',
|
18 |
-
'--model_name',
|
19 |
-
type=str,
|
20 |
-
default='RealESRGAN_x4plus',
|
21 |
-
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
|
22 |
-
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
|
23 |
-
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
|
24 |
-
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
|
25 |
-
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
|
26 |
-
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
|
27 |
-
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
|
28 |
-
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
|
29 |
-
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
|
30 |
-
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
|
31 |
-
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
|
32 |
-
parser.add_argument(
|
33 |
-
'--alpha_upsampler',
|
34 |
-
type=str,
|
35 |
-
default='realesrgan',
|
36 |
-
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
|
37 |
-
parser.add_argument(
|
38 |
-
'--ext',
|
39 |
-
type=str,
|
40 |
-
default='auto',
|
41 |
-
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
42 |
-
args = parser.parse_args()
|
43 |
-
|
44 |
-
# determine models according to model names
|
45 |
-
args.model_name = args.model_name.split('.')[0]
|
46 |
-
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
|
47 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
48 |
-
netscale = 4
|
49 |
-
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
|
50 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
51 |
-
netscale = 4
|
52 |
-
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
|
53 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
54 |
-
netscale = 2
|
55 |
-
elif args.model_name in [
|
56 |
-
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
|
57 |
-
]: # x2 VGG-style model (XS size)
|
58 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
|
59 |
-
netscale = 2
|
60 |
-
elif args.model_name in [
|
61 |
-
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
|
62 |
-
]: # x4 VGG-style model (XS size)
|
63 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
64 |
-
netscale = 4
|
65 |
-
|
66 |
-
# determine model paths
|
67 |
-
model_path = os.path.join('.', args.model_name + '.pth')
|
68 |
-
if not os.path.isfile(model_path):
|
69 |
-
model_path = os.path.join('.', args.model_name + '.pth')
|
70 |
-
if not os.path.isfile(model_path):
|
71 |
-
raise ValueError(f'Model {args.model_name} does not exist.')
|
72 |
-
|
73 |
-
# restorer
|
74 |
-
upsampler = RealESRGANer(
|
75 |
-
scale=netscale,
|
76 |
-
model_path=model_path,
|
77 |
-
model=model,
|
78 |
-
tile=args.tile,
|
79 |
-
tile_pad=args.tile_pad,
|
80 |
-
pre_pad=args.pre_pad,
|
81 |
-
half=args.half)
|
82 |
-
|
83 |
-
if args.face_enhance: # Use GFPGAN for face enhancement
|
84 |
-
from gfpgan import GFPGANer
|
85 |
-
face_enhancer = GFPGANer(
|
86 |
-
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
|
87 |
-
upscale=args.outscale,
|
88 |
-
arch='clean',
|
89 |
-
channel_multiplier=2,
|
90 |
-
bg_upsampler=upsampler)
|
91 |
-
os.makedirs(args.output, exist_ok=True)
|
92 |
-
|
93 |
-
if os.path.isfile(args.input):
|
94 |
-
paths = [args.input]
|
95 |
-
else:
|
96 |
-
paths = sorted(glob.glob(os.path.join(args.input, '*')))
|
97 |
-
|
98 |
-
for idx, path in enumerate(paths):
|
99 |
-
imgname, extension = os.path.splitext(os.path.basename(path))
|
100 |
-
print('Testing', idx, imgname)
|
101 |
-
|
102 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
103 |
-
if len(img.shape) == 3 and img.shape[2] == 4:
|
104 |
-
img_mode = 'RGBA'
|
105 |
-
else:
|
106 |
-
img_mode = None
|
107 |
-
|
108 |
-
try:
|
109 |
-
if args.face_enhance:
|
110 |
-
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
111 |
-
else:
|
112 |
-
output, _ = upsampler.enhance(img, outscale=args.outscale)
|
113 |
-
except RuntimeError as error:
|
114 |
-
print('Error', error)
|
115 |
-
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
116 |
-
else:
|
117 |
-
if args.ext == 'auto':
|
118 |
-
extension = extension[1:]
|
119 |
-
else:
|
120 |
-
extension = args.ext
|
121 |
-
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
122 |
-
extension = 'png'
|
123 |
-
save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}')
|
124 |
-
cv2.imwrite(save_path, output)
|
125 |
-
|
126 |
-
|
127 |
-
if __name__ == '__main__':
|
128 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Epitech/hand-sign-detection/app.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import gradio as gr
|
3 |
-
import math
|
4 |
-
import numpy as np
|
5 |
-
from cvzone.ClassificationModule import Classifier
|
6 |
-
from cvzone.HandTrackingModule import HandDetector
|
7 |
-
|
8 |
-
bgSize = 96
|
9 |
-
classifier = Classifier("keras_model.h5", "labels.txt")
|
10 |
-
detector = HandDetector(maxHands=1)
|
11 |
-
labels = ["Look", "Drink", "Eat", "Ok"]
|
12 |
-
offset = 20
|
13 |
-
|
14 |
-
def segment(image):
|
15 |
-
hands, frame = detector.findHands(image)
|
16 |
-
try:
|
17 |
-
if hands:
|
18 |
-
hand = hands[0]
|
19 |
-
x, y, w, h = hand['bbox']
|
20 |
-
croppedHand = np.ones((bgSize, bgSize, 3), np.uint8) * 12
|
21 |
-
imgCrop = frame[y - offset:y + h +
|
22 |
-
offset, x - offset:x + w + offset]
|
23 |
-
aspectRatio = h / w
|
24 |
-
if aspectRatio > 1:
|
25 |
-
constant = bgSize / h
|
26 |
-
wComputed = math.floor(constant * w)
|
27 |
-
bgResize = cv2.resize(imgCrop, (wComputed, bgSize))
|
28 |
-
bgResizeShape = bgResize.shape
|
29 |
-
wGap = math.floor((bgSize-wComputed)/2)
|
30 |
-
croppedHand[:bgResizeShape[0],
|
31 |
-
wGap:wGap + wComputed] = bgResize
|
32 |
-
else:
|
33 |
-
constant = bgSize / w
|
34 |
-
hComputed = math.floor(constant * h)
|
35 |
-
bgResize = cv2.resize(imgCrop, (bgSize, hComputed))
|
36 |
-
bgResizeShape = bgResize.shape
|
37 |
-
hGap = math.floor((bgSize - hComputed) / 2)
|
38 |
-
croppedHand[hGap: hComputed + hGap, :] = bgResize
|
39 |
-
_, index = classifier.getPrediction(croppedHand, draw=False)
|
40 |
-
return labels[index]
|
41 |
-
except Exception as e:
|
42 |
-
print(e)
|
43 |
-
return 'No sign detected'
|
44 |
-
|
45 |
-
gr.interface.Interface(fn=segment, live=True, inputs=gr.Image(source='webcam', streaming=True), outputs="text").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EuroPython2022/Leaderboard/app.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
-
import pandas as pd
|
4 |
-
import gradio as gr
|
5 |
-
from huggingface_hub.hf_api import SpaceInfo
|
6 |
-
from pathlib import Path
|
7 |
-
|
8 |
-
|
9 |
-
path = f"https://huggingface.co/api/spaces"
|
10 |
-
|
11 |
-
|
12 |
-
def get_europython_spaces():
|
13 |
-
r = requests.get(path)
|
14 |
-
d = r.json()
|
15 |
-
spaces = [SpaceInfo(**x) for x in d]
|
16 |
-
blocks_spaces = {}
|
17 |
-
for i in range(0,len(spaces)):
|
18 |
-
if spaces[i].id.split('/')[0] == 'EuroPython2022' and hasattr(spaces[i], 'likes') and spaces[i].id != 'EuroPython2022/Leaderboard' and spaces[i].id != 'EuroPython2022/README':
|
19 |
-
blocks_spaces[spaces[i].id]=spaces[i].likes
|
20 |
-
df = pd.DataFrame(
|
21 |
-
[{"Spaces_Name": Spaces, "likes": likes} for Spaces,likes in blocks_spaces.items()])
|
22 |
-
df = df.sort_values(by=['likes'],ascending=False)
|
23 |
-
return df
|
24 |
-
|
25 |
-
|
26 |
-
block = gr.Blocks()
|
27 |
-
|
28 |
-
with block:
|
29 |
-
gr.Markdown("""Leaderboard for the most popular EuroPython 2022 Spaces. To learn more and join, see <a href="https://huggingface.co/EuroPython2022" target="_blank" style="text-decoration: underline">EuroPython 2022 Event</a>""")
|
30 |
-
with gr.Tabs():
|
31 |
-
with gr.TabItem("EuroPython 2022 Leaderboard"):
|
32 |
-
with gr.Row():
|
33 |
-
data = gr.outputs.Dataframe(type="pandas")
|
34 |
-
with gr.Row():
|
35 |
-
data_run = gr.Button("Refresh")
|
36 |
-
data_run.click(get_europython_spaces, inputs=None, outputs=data)
|
37 |
-
|
38 |
-
block.load(get_europython_spaces, inputs=None, outputs=data)
|
39 |
-
block.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EuroPython2022/clickbaitonator/fudge/README.md
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
# FUDGE: Controlled Text Generation With Future Discriminators
|
2 |
-
|
3 |
-
This repo contains code corresponding to the paper FUDGE: Controlled Text Generation With Future Discriminators (https://arxiv.org/abs/2104.05218) by Kevin Yang and Dan Klein, published at NAACL 2021.
|
4 |
-
|
5 |
-
You can also find a video presentation at http://somup.com/crhlVPFKN7 and the corresponding slides in `slides.pptx`.
|
6 |
-
|
7 |
-
## Setup/Installation
|
8 |
-
|
9 |
-
We tested on Python 3.8.5 but earlier versions of Python 3 are almost certainly fine. To get the required packages (other versions likely to work too):
|
10 |
-
|
11 |
-
```
|
12 |
-
pip install -r requirements.txt
|
13 |
-
```
|
14 |
-
|
15 |
-
Additionally, to get our pre-trained predictor checkpoints and training data, run:
|
16 |
-
|
17 |
-
```
|
18 |
-
wget https://naacl2021-fudge-files.s3.amazonaws.com/large_files.zip
|
19 |
-
```
|
20 |
-
|
21 |
-
and extract the zip to the top-level `lm-prediction/` folder. (There should be three folders, `ckpt/`, `train_data/`, and `topic_human_evals/`. The zip is 7GB.) Note: the zip seems to not work for some people actually, if this is the case you can get the files directly from https://drive.google.com/drive/folders/1GZfOGqpQxDmIfD2RvuhUQla9eX2OHUXU?usp=sharing (13GB).
|
22 |
-
|
23 |
-
`ckpt/` contains predictor checkpoints for each task if you are just interested in running inference. (Note that for the paper results, we used predictors trained with an older version of the code, but the new checkpoints get similar results, so you are OK to use the new predictors provided here if e.g. you just want to use FUDGE as a baseline. You can just run the evaluation commands provided below; it should take maybe 5-60 minutes depending on the task and your compute, assuming you have a GPU.)
|
24 |
-
|
25 |
-
`train_data/` contains our GPT2-generated training data for the poetry and topic tasks' predictors. See https://github.com/raosudha89/GYAFC-corpus for instructions on gaining access to the GYAFC data used for the machine translation formality task; replace our dummy folders with the corresponding folders/files if you want to train our formality predictor.
|
26 |
-
|
27 |
-
## Clickbait
|
28 |
-
To generate outputs, run:
|
29 |
-
|
30 |
-
```
|
31 |
-
python -u evaluate_clickbait.py --ckpt ckpt/topic/future_word_predictor/model.pth.tar --dataset_info ckpt/topic/future_word_predictor/dataset_info --in_file topic_data/topic_prefixes.txt --condition_lambda 4.0 --verbose --precondition_topk 200 --length_cutoff 80 --device cpu
|
32 |
-
|
33 |
-
python -u evaluate_clickbait.py --ckpt ckpt/formality/predictor_gyafc_entertainment_music/model.pth.tar --dataset_info ckpt/formality/predictor_gyafc_entertainment_music/dataset_info --in_file formality_data/fisher_test_oracle.es
|
34 |
-
|
35 |
-
python -u evaluate_clickbait.py --ckpt ckpt/topic/future_word_predictor/model.pth.tar --dataset_info ckpt/topic/future_word_predictor/dataset_info --in_file topic_data/topic_prefixes.txt --condition_lambda 4.0 --verbose --precondition_topk 200 --sample_size 3 --max_sample_batch 1 --length_cutoff 80 --log_file clickbait_preds.log
|
36 |
-
```
|
37 |
-
|
38 |
-
Then evaluate metrics using:
|
39 |
-
|
40 |
-
```
|
41 |
-
python eval_topic_metrics.py --log_file topic_preds.log --tw_dir topic_data/test_wordlists
|
42 |
-
```
|
43 |
-
|
44 |
-
|
45 |
-
## Poetry Couplet Completion
|
46 |
-
|
47 |
-
### Evaluation
|
48 |
-
|
49 |
-
To generate outputs, run:
|
50 |
-
|
51 |
-
```
|
52 |
-
python -u evaluate_poetry.py --iambic_ckpt ckpt/poetry/iambic_predictor/model.pth.tar --rhyme_ckpt ckpt/poetry/rhyme_predictor/model.pth.tar --newline_ckpt ckpt/poetry/newline_predictor/model.pth.tar --dataset_info ckpt/poetry/rhyme_predictor/dataset_info --rhyme_info ckpt/poetry/rhyme_predictor/rhyme_info --prefix_file poetry_data/couplet_prefixes.txt --precondition_topk 200 > poetry_preds.log
|
53 |
-
```
|
54 |
-
|
55 |
-
Then evaluate metrics using:
|
56 |
-
|
57 |
-
```
|
58 |
-
python eval_poetry_metrics.py --pred_file poetry_preds.log --prefix_file poetry_data/couplet_prefixes.txt
|
59 |
-
```
|
60 |
-
|
61 |
-
### Training your own predictors
|
62 |
-
|
63 |
-
Example commands for all three predictors used in the poetry task below. (You actually probably don't need so many epochs for iambic and rhyme; in any case the commands will save intermediate ckpts so you can just stop them early if needed by inspecting the log.)
|
64 |
-
|
65 |
-
Iambic predictor:
|
66 |
-
|
67 |
-
```
|
68 |
-
python -u main.py --task iambic --data_dir train_data/gpt2_generations --save_dir ckpt/poetry/iambic_retrain_predictor --num_workers 20 --batch_size 128 --epoch_max_len 100000 --validation_freq 10 --lr 2e-4 --epochs 1500 > iambic_retrain_predictor.log
|
69 |
-
```
|
70 |
-
|
71 |
-
Rhyme predictor:
|
72 |
-
|
73 |
-
```
|
74 |
-
python -u main.py --task rhyme --data_dir train_data/gpt2_generations --save_dir ckpt/poetry/rhyme_retrain_predictor --num_workers 20 --batch_size 128 --epoch_max_len 100000 --validation_freq 10 --lr 2e-4 --epochs 1500 > rhyme_retrain_predictor.log
|
75 |
-
```
|
76 |
-
|
77 |
-
End of sentence predictor (referred to as "newline" in the code; 50 epochs is more than enough for this one):
|
78 |
-
|
79 |
-
```
|
80 |
-
python -u main.py --task newline --data_dir train_data/gpt2_generations --save_dir ckpt/poetry/newline_retrain_predictor --num_workers 20 --batch_size 128 --epoch_max_len 100000 --validation_freq 10 --lr 2e-4 --epochs 50 > newline_retrain_predictor.log
|
81 |
-
```
|
82 |
-
|
83 |
-
The same evaluation commands as before will work; just modify the paths in the command to point to `model_best.pth.tar`, `dataset_info`, and `rhyme_info` from your newly trained ckpt folders.
|
84 |
-
|
85 |
-
## Topic Control
|
86 |
-
|
87 |
-
### Evaluation
|
88 |
-
|
89 |
-
To generate outputs, run:
|
90 |
-
|
91 |
-
```
|
92 |
-
python -u evaluate_topic.py --ckpt ckpt/topic/future_word_predictor/model.pth.tar --dataset_info ckpt/topic/future_word_predictor/dataset_info --prefix_file topic_data/topic_prefixes.txt --wordlist_dir topic_data/wordlists --condition_lambda 4.0 --verbose --precondition_topk 200 --topk 10 --sample_size 3 --max_sample_batch 1 --length_cutoff 80 --log_file topic_preds.log
|
93 |
-
```
|
94 |
-
|
95 |
-
Then evaluate metrics using:
|
96 |
-
|
97 |
-
```
|
98 |
-
python eval_topic_metrics.py --log_file topic_preds.log --tw_dir topic_data/test_wordlists
|
99 |
-
```
|
100 |
-
|
101 |
-
You can also find our original generations and baselines in `topic_human_evals/`.
|
102 |
-
|
103 |
-
### Training your own predictors
|
104 |
-
|
105 |
-
Example command below.
|
106 |
-
|
107 |
-
```
|
108 |
-
python -u main.py --task topic --data_dir train_data/gpt2_generations --save_dir ckpt/topic/future_word_retrain_predictor --num_workers 20 --batch_size 128 --epoch_max_len 100000 --validation_freq 10 --lr 2e-4 --epochs 500 --glove_file train_data/glove.840B.300d.txt > future_word_retrain_predictor.log
|
109 |
-
```
|
110 |
-
|
111 |
-
The same evaluation commands as before will work; just modify the paths in the command to point to `model_best.pth.tar`, `dataset_info`, and `rhyme_info` from your newly trained ckpt folders.
|
112 |
-
|
113 |
-
## Machine Translation Formality
|
114 |
-
|
115 |
-
### Evaluation
|
116 |
-
|
117 |
-
To generate outputs, run:
|
118 |
-
|
119 |
-
```
|
120 |
-
python -u evaluate_formality.py --ckpt ckpt/formality/predictor_gyafc_entertainment_music/model.pth.tar --dataset_info ckpt/formality/predictor_gyafc_entertainment_music/dataset_info --in_file formality_data/fisher_test_oracle.es --model_path ckpt/formality/marian_finetune_fisher > formality_preds.log
|
121 |
-
```
|
122 |
-
|
123 |
-
The above command generates predictions using the Marian model finetuned on the Fisher dataset; remove the `--model_path` argument to get predictions with the un-finetuned Marian model from HuggingFace (referred to as 0-shot in the paper)
|
124 |
-
|
125 |
-
Then evaluate metrics using:
|
126 |
-
|
127 |
-
```
|
128 |
-
python eval_formality_metrics.py --pred formality_preds.log --ref formality_data/test.noid.cleaned_0 formality_data/test.noid.cleaned_1 --ckpt ckpt/formality/test_evaluator_gyafc_family_relationships/model.pth.tar --dataset_info ckpt/formality/test_evaluator_gyafc_family_relationships/dataset_info
|
129 |
-
```
|
130 |
-
|
131 |
-
### Training your own predictors
|
132 |
-
|
133 |
-
Example command below. (Reminder: you need to go get the GYAFC dataset following the instructions in https://github.com/raosudha89/GYAFC-corpus.)
|
134 |
-
|
135 |
-
```
|
136 |
-
python -u main.py --task formality --data_dir train_data/GYAFC_Corpus/Entertainment_Music --save_dir ckpt/formality/formality_retrain_predictor --num_workers 20 --batch_size 32 --epoch_max_len 1000000 --validation_freq 1 --lr 2e-5 --epochs 20 > formality_retrain_predictor.log
|
137 |
-
```
|
138 |
-
|
139 |
-
(The test-time formality evaluator is trained in the same way, just using the Family/Relationships half of the GYAFC dataset.)
|
140 |
-
|
141 |
-
The same evaluation commands as before will work; just modify the paths in the command to point to `model_best.pth.tar`, `dataset_info`, and `rhyme_info` from your newly trained ckpt folders.
|
142 |
-
|
143 |
-
## Running FUDGE on your own data
|
144 |
-
|
145 |
-
The code has been refactored so that the iambic (poetry), rhyme (poetry), newline (poetry), future word (topic), and formality (machine translation) are controlled by the `--task` flag to `main.py`. You should add your task as another option here, then modify the data processing in `data.py` and the model in `model.py` as needed for your task. (In `data.py` you probably won't need all the entries of the tuple that is expected of the loader; you can just put dummy entries in the ones you don't need.) You might also need to modify the loss computation in the `train` and `validate` functions in `main.py`. You'll probably want to write new evaluation scripts, though the existing poetry/topic/formality ones are hopefully helpful as references.
|
146 |
-
|
147 |
-
Alternatively, the general FUDGE framework is pretty simple, so you could always try reimplementing things yourself. A few additional details based on questions I've received:
|
148 |
-
|
149 |
-
(1) The formality task setup is likely closest to what you want if you're just trying to run the simplest form of FUDGE (take a language model, and use a classifier to optimize toward a single attribute) although you may need to swap out the Marian translation model/tokenizer we use.
|
150 |
-
|
151 |
-
(2) When you construct your training data, if you have an example in your data e.g. "This movie is great!" for positive sentiment, you want to learn on all the pairs (This, +), (This movie, +), (This movie is, +), etc., as that's one of the main points of our approach.
|
152 |
-
|
153 |
-
(3) For computational efficiency, we first filter the base model's next token probabilities down to the top 200 (Sec. 3.1 in the paper), before adding the classifier logits. This way you only need to evaluate your classifier on 200 continuations. Then afterward, you filter down again to whatever top-k/greedy/nucleus sampling you're using for evaluation (we use top-k with k=10 for poetry and topic, greedy for formality).
|
154 |
-
|
155 |
-
(4) You can use a pretrained LM backbone instead of a simple LSTM backbone for the predictor as well. This should work better when your dataset is smaller.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|