Commit
·
057a09e
1
Parent(s):
9bb6253
Update parquet files (step 100 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- names.txt +0 -0
- spaces/17TheWord/RealESRGAN/realesrgan/models/realesrgan_model.py +0 -258
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LINK Free FieldIT (CRM) Current Version.md +0 -26
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Test Questions And Answers Pdf.md +0 -16
- spaces/1gistliPinn/ChatGPT4/Examples/Addictive Drums Authorization Code 111 14.md +0 -109
- spaces/1gistliPinn/ChatGPT4/Examples/DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bullet Echo Mod Apk and Enjoy Free Shopping and Epic Battles.md +0 -94
- spaces/1phancelerku/anime-remove-background/Burger Please Mod Apk The Ultimate Fun Game with Unlimited Money.md +0 -132
- spaces/1phancelerku/anime-remove-background/Download Ragnarok X APK - The 3D MMORPG Mobile Game that Brings Back the Classic Masterpiece.md +0 -146
- spaces/1phancelerku/anime-remove-background/FM WhatsApp APK Download for Android - Latest Version 2023 with New Features.md +0 -95
- spaces/1phancelerku/anime-remove-background/Free 3D Models of Orange Trees - Easy to Customize and Render.md +0 -136
- spaces/1toTree/lora_test/ppdiffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +0 -536
- spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/README.md +0 -13
- spaces/AI-Hobbyist/Hoyo-RVC/docs/training_tips_ko.md +0 -53
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/zero_shot.py +0 -95
- spaces/AIWaves/Software_Company/README.md +0 -13
- spaces/AIWaves/Software_Company/src/agents/Component/ToolComponent.py +0 -887
- spaces/Abeer123/Pokemon_Digimon/README.md +0 -13
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Equing.py +0 -81
- spaces/AgentVerse/agentVerse/ui/.github/CODE_OF_CONDUCT.md +0 -84
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.d.ts +0 -39
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py +0 -413
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py +0 -5
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py +0 -39
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/requirements.py +0 -146
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_timer.py +0 -19
- spaces/AtomdffAI/wechatgpt4atom/bot/bot.py +0 -13
- spaces/Bart92/RVC_HF/i18n/scan_i18n.py +0 -75
- spaces/Benson/text-generation/Examples/Descargar Amp Letras De Fuera De Mi Vientre Por Prospa Ochimana.md +0 -56
- spaces/BigChungux/Pet_Survey2/app.py +0 -172
- spaces/Billyosoro/ESRGAN/realesrgan/utils.py +0 -280
- spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene_model-checkpoint.py +0 -22
- spaces/Chris4K/llms_compare/app.py +0 -274
- spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/static/andrew_alpha.js +0 -208
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/help_system.js +0 -84
- spaces/CikeyQI/meme-api/meme_generator/memes/dont_touch/__init__.py +0 -57
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/v5/theme.py +0 -59
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/shapes.py +0 -183
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/__main__.py +0 -6
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-6f7117a6.js +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-be790e2e.css +0 -1
- spaces/DaleChen/AutoGPT/autogpt/processing/html.py +0 -33
- spaces/Dantra1/CeliaSensei/models.py +0 -533
- spaces/Datasculptor/DescriptionGPT/tools/download_cc.py +0 -47
- spaces/Datasculptor/MusicGen/tests/utils/__init__.py +0 -5
- spaces/Datasculptor/StyleGAN-NADA/e4e/models/psp.py +0 -99
- spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/hrnet.py +0 -835
- spaces/Detomo/ai-comic-generation/src/lib/useImageDimension.ts +0 -20
- spaces/Dineshdc/MygenAIChatbot/README.md +0 -12
- spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/model.py +0 -680
names.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/17TheWord/RealESRGAN/realesrgan/models/realesrgan_model.py
DELETED
@@ -1,258 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
|
5 |
-
from basicsr.data.transforms import paired_random_crop
|
6 |
-
from basicsr.models.srgan_model import SRGANModel
|
7 |
-
from basicsr.utils import DiffJPEG, USMSharp
|
8 |
-
from basicsr.utils.img_process_util import filter2D
|
9 |
-
from basicsr.utils.registry import MODEL_REGISTRY
|
10 |
-
from collections import OrderedDict
|
11 |
-
from torch.nn import functional as F
|
12 |
-
|
13 |
-
|
14 |
-
@MODEL_REGISTRY.register()
|
15 |
-
class RealESRGANModel(SRGANModel):
|
16 |
-
"""RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
17 |
-
|
18 |
-
It mainly performs:
|
19 |
-
1. randomly synthesize LQ images in GPU tensors
|
20 |
-
2. optimize the networks with GAN training.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, opt):
|
24 |
-
super(RealESRGANModel, self).__init__(opt)
|
25 |
-
self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
|
26 |
-
self.usm_sharpener = USMSharp().cuda() # do usm sharpening
|
27 |
-
self.queue_size = opt.get('queue_size', 180)
|
28 |
-
|
29 |
-
@torch.no_grad()
|
30 |
-
def _dequeue_and_enqueue(self):
|
31 |
-
"""It is the training pair pool for increasing the diversity in a batch.
|
32 |
-
|
33 |
-
Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
|
34 |
-
batch could not have different resize scaling factors. Therefore, we employ this training pair pool
|
35 |
-
to increase the degradation diversity in a batch.
|
36 |
-
"""
|
37 |
-
# initialize
|
38 |
-
b, c, h, w = self.lq.size()
|
39 |
-
if not hasattr(self, 'queue_lr'):
|
40 |
-
assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
|
41 |
-
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
|
42 |
-
_, c, h, w = self.gt.size()
|
43 |
-
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
|
44 |
-
self.queue_ptr = 0
|
45 |
-
if self.queue_ptr == self.queue_size: # the pool is full
|
46 |
-
# do dequeue and enqueue
|
47 |
-
# shuffle
|
48 |
-
idx = torch.randperm(self.queue_size)
|
49 |
-
self.queue_lr = self.queue_lr[idx]
|
50 |
-
self.queue_gt = self.queue_gt[idx]
|
51 |
-
# get first b samples
|
52 |
-
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
|
53 |
-
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
|
54 |
-
# update the queue
|
55 |
-
self.queue_lr[0:b, :, :, :] = self.lq.clone()
|
56 |
-
self.queue_gt[0:b, :, :, :] = self.gt.clone()
|
57 |
-
|
58 |
-
self.lq = lq_dequeue
|
59 |
-
self.gt = gt_dequeue
|
60 |
-
else:
|
61 |
-
# only do enqueue
|
62 |
-
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
|
63 |
-
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
|
64 |
-
self.queue_ptr = self.queue_ptr + b
|
65 |
-
|
66 |
-
@torch.no_grad()
|
67 |
-
def feed_data(self, data):
|
68 |
-
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
|
69 |
-
"""
|
70 |
-
if self.is_train and self.opt.get('high_order_degradation', True):
|
71 |
-
# training data synthesis
|
72 |
-
self.gt = data['gt'].to(self.device)
|
73 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
74 |
-
|
75 |
-
self.kernel1 = data['kernel1'].to(self.device)
|
76 |
-
self.kernel2 = data['kernel2'].to(self.device)
|
77 |
-
self.sinc_kernel = data['sinc_kernel'].to(self.device)
|
78 |
-
|
79 |
-
ori_h, ori_w = self.gt.size()[2:4]
|
80 |
-
|
81 |
-
# ----------------------- The first degradation process ----------------------- #
|
82 |
-
# blur
|
83 |
-
out = filter2D(self.gt_usm, self.kernel1)
|
84 |
-
# random resize
|
85 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
|
86 |
-
if updown_type == 'up':
|
87 |
-
scale = np.random.uniform(1, self.opt['resize_range'][1])
|
88 |
-
elif updown_type == 'down':
|
89 |
-
scale = np.random.uniform(self.opt['resize_range'][0], 1)
|
90 |
-
else:
|
91 |
-
scale = 1
|
92 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
93 |
-
out = F.interpolate(out, scale_factor=scale, mode=mode)
|
94 |
-
# add noise
|
95 |
-
gray_noise_prob = self.opt['gray_noise_prob']
|
96 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob']:
|
97 |
-
out = random_add_gaussian_noise_pt(
|
98 |
-
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
99 |
-
else:
|
100 |
-
out = random_add_poisson_noise_pt(
|
101 |
-
out,
|
102 |
-
scale_range=self.opt['poisson_scale_range'],
|
103 |
-
gray_prob=gray_noise_prob,
|
104 |
-
clip=True,
|
105 |
-
rounds=False)
|
106 |
-
# JPEG compression
|
107 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
|
108 |
-
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
|
109 |
-
out = self.jpeger(out, quality=jpeg_p)
|
110 |
-
|
111 |
-
# ----------------------- The second degradation process ----------------------- #
|
112 |
-
# blur
|
113 |
-
if np.random.uniform() < self.opt['second_blur_prob']:
|
114 |
-
out = filter2D(out, self.kernel2)
|
115 |
-
# random resize
|
116 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
|
117 |
-
if updown_type == 'up':
|
118 |
-
scale = np.random.uniform(1, self.opt['resize_range2'][1])
|
119 |
-
elif updown_type == 'down':
|
120 |
-
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
|
121 |
-
else:
|
122 |
-
scale = 1
|
123 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
124 |
-
out = F.interpolate(
|
125 |
-
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
|
126 |
-
# add noise
|
127 |
-
gray_noise_prob = self.opt['gray_noise_prob2']
|
128 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
|
129 |
-
out = random_add_gaussian_noise_pt(
|
130 |
-
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
131 |
-
else:
|
132 |
-
out = random_add_poisson_noise_pt(
|
133 |
-
out,
|
134 |
-
scale_range=self.opt['poisson_scale_range2'],
|
135 |
-
gray_prob=gray_noise_prob,
|
136 |
-
clip=True,
|
137 |
-
rounds=False)
|
138 |
-
|
139 |
-
# JPEG compression + the final sinc filter
|
140 |
-
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
|
141 |
-
# as one operation.
|
142 |
-
# We consider two orders:
|
143 |
-
# 1. [resize back + sinc filter] + JPEG compression
|
144 |
-
# 2. JPEG compression + [resize back + sinc filter]
|
145 |
-
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
|
146 |
-
if np.random.uniform() < 0.5:
|
147 |
-
# resize back + the final sinc filter
|
148 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
149 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
150 |
-
out = filter2D(out, self.sinc_kernel)
|
151 |
-
# JPEG compression
|
152 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
153 |
-
out = torch.clamp(out, 0, 1)
|
154 |
-
out = self.jpeger(out, quality=jpeg_p)
|
155 |
-
else:
|
156 |
-
# JPEG compression
|
157 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
158 |
-
out = torch.clamp(out, 0, 1)
|
159 |
-
out = self.jpeger(out, quality=jpeg_p)
|
160 |
-
# resize back + the final sinc filter
|
161 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
162 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
163 |
-
out = filter2D(out, self.sinc_kernel)
|
164 |
-
|
165 |
-
# clamp and round
|
166 |
-
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
167 |
-
|
168 |
-
# random crop
|
169 |
-
gt_size = self.opt['gt_size']
|
170 |
-
(self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size,
|
171 |
-
self.opt['scale'])
|
172 |
-
|
173 |
-
# training pair pool
|
174 |
-
self._dequeue_and_enqueue()
|
175 |
-
# sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue
|
176 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
177 |
-
self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
|
178 |
-
else:
|
179 |
-
# for paired training or validation
|
180 |
-
self.lq = data['lq'].to(self.device)
|
181 |
-
if 'gt' in data:
|
182 |
-
self.gt = data['gt'].to(self.device)
|
183 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
184 |
-
|
185 |
-
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
|
186 |
-
# do not use the synthetic process during validation
|
187 |
-
self.is_train = False
|
188 |
-
super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
|
189 |
-
self.is_train = True
|
190 |
-
|
191 |
-
def optimize_parameters(self, current_iter):
|
192 |
-
# usm sharpening
|
193 |
-
l1_gt = self.gt_usm
|
194 |
-
percep_gt = self.gt_usm
|
195 |
-
gan_gt = self.gt_usm
|
196 |
-
if self.opt['l1_gt_usm'] is False:
|
197 |
-
l1_gt = self.gt
|
198 |
-
if self.opt['percep_gt_usm'] is False:
|
199 |
-
percep_gt = self.gt
|
200 |
-
if self.opt['gan_gt_usm'] is False:
|
201 |
-
gan_gt = self.gt
|
202 |
-
|
203 |
-
# optimize net_g
|
204 |
-
for p in self.net_d.parameters():
|
205 |
-
p.requires_grad = False
|
206 |
-
|
207 |
-
self.optimizer_g.zero_grad()
|
208 |
-
self.output = self.net_g(self.lq)
|
209 |
-
|
210 |
-
l_g_total = 0
|
211 |
-
loss_dict = OrderedDict()
|
212 |
-
if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
|
213 |
-
# pixel loss
|
214 |
-
if self.cri_pix:
|
215 |
-
l_g_pix = self.cri_pix(self.output, l1_gt)
|
216 |
-
l_g_total += l_g_pix
|
217 |
-
loss_dict['l_g_pix'] = l_g_pix
|
218 |
-
# perceptual loss
|
219 |
-
if self.cri_perceptual:
|
220 |
-
l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt)
|
221 |
-
if l_g_percep is not None:
|
222 |
-
l_g_total += l_g_percep
|
223 |
-
loss_dict['l_g_percep'] = l_g_percep
|
224 |
-
if l_g_style is not None:
|
225 |
-
l_g_total += l_g_style
|
226 |
-
loss_dict['l_g_style'] = l_g_style
|
227 |
-
# gan loss
|
228 |
-
fake_g_pred = self.net_d(self.output)
|
229 |
-
l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
|
230 |
-
l_g_total += l_g_gan
|
231 |
-
loss_dict['l_g_gan'] = l_g_gan
|
232 |
-
|
233 |
-
l_g_total.backward()
|
234 |
-
self.optimizer_g.step()
|
235 |
-
|
236 |
-
# optimize net_d
|
237 |
-
for p in self.net_d.parameters():
|
238 |
-
p.requires_grad = True
|
239 |
-
|
240 |
-
self.optimizer_d.zero_grad()
|
241 |
-
# real
|
242 |
-
real_d_pred = self.net_d(gan_gt)
|
243 |
-
l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
|
244 |
-
loss_dict['l_d_real'] = l_d_real
|
245 |
-
loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
|
246 |
-
l_d_real.backward()
|
247 |
-
# fake
|
248 |
-
fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9
|
249 |
-
l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
|
250 |
-
loss_dict['l_d_fake'] = l_d_fake
|
251 |
-
loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
|
252 |
-
l_d_fake.backward()
|
253 |
-
self.optimizer_d.step()
|
254 |
-
|
255 |
-
if self.ema_decay > 0:
|
256 |
-
self.model_ema(decay=self.ema_decay)
|
257 |
-
|
258 |
-
self.log_dict = self.reduce_loss_dict(loss_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LINK Free FieldIT (CRM) Current Version.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Free FieldIT (CRM) Current Version</h1>
|
3 |
-
<p>FieldIT (CRM) is a customer relationship management software that helps you manage your contacts, tasks, appointments, documents, and more. It is designed for small and medium businesses that need a simple and affordable solution to organize their data and improve their productivity.</p>
|
4 |
-
<p>If you want to download free FieldIT (CRM) current version, you can follow these steps:</p>
|
5 |
-
<h2>Download free FieldIT (CRM) current version</h2><br /><p><b><b>Download Zip</b> — <a href="https://byltly.com/2uKz0T">https://byltly.com/2uKz0T</a></b></p><br /><br />
|
6 |
-
<ol>
|
7 |
-
<li>Go to <a href="https://download.cnet.com/FieldIT-CRM/3000-2652_4-75213029.html">this link</a> [^3^] and click on the green Download Now button.</li>
|
8 |
-
<li>Save the file FieldITCRM.exe to your computer and run it.</li>
|
9 |
-
<li>Follow the installation wizard to complete the setup.</li>
|
10 |
-
<li>Launch FieldIT (CRM) and enter your name and email address to register for a free license.</li>
|
11 |
-
<li>Enjoy using FieldIT (CRM) for your business needs.</li>
|
12 |
-
</ol>
|
13 |
-
<p>Note that the current version of FieldIT (CRM) is 3.8.20, which was released on December 28, 2012 [^3^]. It is compatible with Windows 2003, XP, Vista, 7, and 8 [^3^]. If you need more advanced features or support, you can upgrade to a paid version of FieldIT (CRM).</p>
|
14 |
-
<p>If you are looking for other CRM software options, you can also check out Microsoft Dynamics 365 [^1^] or SAP CRM [^2^], which are more comprehensive and scalable solutions for larger enterprises. They offer various modules and enhancements for different business functions and industries. However, they also require more investment and technical expertise to implement and maintain.</p>
|
15 |
-
<p>Whatever CRM software you choose, make sure it meets your business goals and requirements. A good CRM software can help you improve your customer satisfaction, loyalty, retention, and revenue.</p><p>Here are some additional tips on how to use CRM software effectively:</p>
|
16 |
-
<ul>
|
17 |
-
<li>Keep your data clean and updated. Make sure you enter accurate and complete information about your contacts, leads, opportunities, and activities. Avoid duplicate or outdated records that can cause confusion and errors.</li>
|
18 |
-
<li>Segment your customers and prospects. Use criteria such as industry, location, size, revenue, needs, preferences, and behavior to group your customers and prospects into different categories. This can help you tailor your marketing campaigns, sales pitches, and service offerings to each segment.</li>
|
19 |
-
<li>Automate your workflows and processes. Use the features and tools of your CRM software to automate repetitive and routine tasks such as sending emails, scheduling appointments, creating reports, and generating invoices. This can save you time and resources and reduce human errors.</li>
|
20 |
-
<li>Analyze your data and performance. Use the dashboards, charts, graphs, and reports of your CRM software to monitor and measure your key performance indicators (KPIs) such as sales revenue, conversion rate, customer satisfaction, retention rate, and lifetime value. This can help you identify your strengths and weaknesses and make informed decisions.</li>
|
21 |
-
<li>Integrate your CRM software with other systems and applications. Connect your CRM software with your email, calendar, social media, accounting, e-commerce, and other platforms that you use for your business. This can help you streamline your data flow and communication and enhance your productivity and efficiency.</li>
|
22 |
-
</ul>
|
23 |
-
<p>By following these tips, you can make the most out of your CRM software and boost your business growth.</p>
|
24 |
-
<p></p> 7b8c122e87<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Test Questions And Answers Pdf.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Prepare for Excel 2016 Test Questions and Answers PDF</h1>
|
3 |
-
<p>If you are planning to take an Excel 2016 test, you might be looking for some resources to help you prepare. One of the best ways to study for an Excel 2016 test is to use a PDF file that contains questions and answers. A PDF file is a document that can be viewed on any device and printed easily. A PDF file that contains Excel 2016 test questions and answers can help you practice your skills, review your knowledge, and identify your strengths and weaknesses.</p>
|
4 |
-
<p>However, not all PDF files that contain Excel 2016 test questions and answers are created equal. Some PDF files may have outdated, inaccurate, or irrelevant information. Some PDF files may have poor formatting, spelling, or grammar. Some PDF files may have too few or too many questions, or questions that are too easy or too hard. Therefore, you need to be careful when choosing a PDF file that contains Excel 2016 test questions and answers.</p>
|
5 |
-
<h2>excel 2016 test questions and answers pdf</h2><br /><p><b><b>Download</b> ✯ <a href="https://byltly.com/2uKA0w">https://byltly.com/2uKA0w</a></b></p><br /><br />
|
6 |
-
<p>Here are some tips to help you find and use a good PDF file that contains Excel 2016 test questions and answers:</p>
|
7 |
-
<ul>
|
8 |
-
<li>Look for a reputable source. You should look for a PDF file that comes from a reliable source, such as a reputable website, a certified instructor, or a trusted publisher. You can check the source's credentials, reviews, ratings, or testimonials to verify its quality and credibility.</li>
|
9 |
-
<li>Look for a recent version. You should look for a PDF file that is updated to reflect the latest changes and features of Excel 2016. You can check the date of publication, the edition number, or the version number to see if the PDF file is current and relevant.</li>
|
10 |
-
<li>Look for a comprehensive coverage. You should look for a PDF file that covers all the topics and skills that are tested on the Excel 2016 exam. You can check the table of contents, the introduction, or the summary to see if the PDF file has a complete and balanced coverage of the exam content.</li>
|
11 |
-
<li>Look for a clear format. You should look for a PDF file that has a clear and consistent format that makes it easy to read and understand. You can check the font size, the color scheme, the layout, the headings, the bullet points, the tables, the charts, or the images to see if the PDF file has a good visual presentation.</li>
|
12 |
-
<li>Look for a correct answer key. You should look for a PDF file that has a correct and detailed answer key that explains how to solve each question. You can check the accuracy, the logic, the steps, the formulas, the functions, or the references to see if the PDF file has a good explanation of the answers.</li>
|
13 |
-
</ul>
|
14 |
-
<p>By following these tips, you can find and use a good PDF file that contains Excel 2016 test questions and answers. A good PDF file can help you prepare for your Excel 2016 test effectively and efficiently.</p> ddb901b051<br />
|
15 |
-
<br />
|
16 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Addictive Drums Authorization Code 111 14.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Addictive Drums Authorization Code 111 14: How to Unlock the Full Potential of Your Drum Software</h1>
|
3 |
-
|
4 |
-
<p>If you are a music producer, composer, or drummer, you probably know how amazing Addictive Drums 2 is. This software allows you to create realistic and expressive drum tracks with ease, using a variety of kits, presets, and effects. You can use it as a standalone application or as a plug-in in your favorite music production software.</p>
|
5 |
-
|
6 |
-
<p>But what if you want to access more features and options in Addictive Drums 2? What if you want to customize your drum sounds, mix and match different kits, and tweak every parameter to your liking? Well, you need an authorization code to unlock the full potential of your drum software.</p>
|
7 |
-
<h2>Addictive Drums Authorization Code 111 14</h2><br /><p><b><b>DOWNLOAD</b> → <a href="https://imgfil.com/2uxYJU">https://imgfil.com/2uxYJU</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<h2>What is Addictive Drums Authorization Code 111 14?</h2>
|
10 |
-
|
11 |
-
<p>Addictive Drums Authorization Code 111 14 is a special code that you can use to activate Addictive Drums 2 on your computer. This code is provided by XLN Audio, the company that develops and distributes Addictive Drums 2. You can get this code by purchasing Addictive Drums 2 from their official website or from an authorized dealer.</p>
|
12 |
-
|
13 |
-
<p>Once you have this code, you can enter it in the Addictive Drums 2 activation window and enjoy all the benefits of the full version of the software. You can use any kit, preset, or effect that you want, and you can also download and install additional content from the XLN Audio website. You can also use Addictive Drums 2 on up to three computers with the same code.</p>
|
14 |
-
|
15 |
-
<h2>How to Activate Addictive Drums 2 with Authorization Code 111 14?</h2>
|
16 |
-
|
17 |
-
<p>Activating Addictive Drums 2 with Authorization Code 111 14 is very easy and straightforward. Here are the steps you need to follow:</p>
|
18 |
-
|
19 |
-
<ol>
|
20 |
-
<li>Download and install Addictive Drums 2 from the XLN Audio website or from the installation disc that came with your purchase.</li>
|
21 |
-
<li>Launch Addictive Drums 2 as a standalone application or as a plug-in in your music production software.</li>
|
22 |
-
<li>Click on the "Activate Product" button in the lower right corner of the Addictive Drums 2 window.</li>
|
23 |
-
<li>Enter your email address and password that you used to register your product on the XLN Audio website. If you don't have an account yet, you can create one for free.</li>
|
24 |
-
<li>Enter your Addictive Drums Authorization Code 111 14 in the field provided and click on "Activate".</li>
|
25 |
-
<li>Wait for the activation process to complete and restart Addictive Drums 2.</li>
|
26 |
-
</ol>
|
27 |
-
|
28 |
-
<p>Congratulations! You have successfully activated Addictive Drums 2 with Authorization Code 111 14. You can now enjoy all the features and options that this software has to offer.</p>
|
29 |
-
|
30 |
-
<h2>Why Choose Addictive Drums Authorization Code 111 14?</h2>
|
31 |
-
|
32 |
-
<p>Addictive Drums Authorization Code 111 14 is the best way to unlock the full potential of your drum software. Here are some of the reasons why you should choose this code:</p>
|
33 |
-
|
34 |
-
<ul>
|
35 |
-
<li>It is easy to use and activate. You don't need any complicated procedures or technical skills to activate Addictive Drums 2 with this code.</li>
|
36 |
-
<li>It is secure and reliable. You don't have to worry about viruses, malware, or scams when you use this code. It is provided by XLN Audio, a reputable company that has been in the music industry for over a decade.</li>
|
37 |
-
<li>It is affordable and worth it. You don't have to spend a fortune to get this code. It is reasonably priced and gives you access to a lot of value-added content and features that will enhance your drum production.</li>
|
38 |
-
<li>It is compatible and flexible. You can use this code on any computer that meets the minimum system requirements for Addictive Drums 2. You can also use it on up to three computers with the same code.</li>
|
39 |
-
</ul>
|
40 |
-
|
41 |
-
<p>Addictive Drums Authorization Code 111 14 is the ultimate solution for anyone who wants to create professional and realistic drum tracks with ease. Don't miss this opportunity to get this code and experience the power of Addictive Drums 2.</p>
|
42 |
-
<p></p>
|
43 |
-
|
44 |
-
<h3>Get Your Addictive Drums Authorization Code 111 14 Today!</h3>
|
45 |
-
|
46 |
-
<p>If you are ready to take your drum production to the next level, don't hesitate to get your Addictive Drums Authorization Code 111 14 today. You can get this code by visiting the XLN Audio website or by contacting an authorized dealer near you.</p>
|
47 |
-
|
48 |
-
<p>Addictive Drums Authorization Code 111 14 is the key to unlocking the full potential of your drum software. Get it now and start creating amazing drum tracks with Addictive Drums 2!</p>
|
49 |
-
<h4>What are the Benefits of Addictive Drums 2?</h4>
|
50 |
-
|
51 |
-
<p>Addictive Drums 2 is not just another drum software. It is a complete drum production solution that offers many benefits for music producers, composers, and drummers. Here are some of the benefits of Addictive Drums 2:</p>
|
52 |
-
|
53 |
-
<ul>
|
54 |
-
<li>It is realistic and expressive. Addictive Drums 2 uses high-quality samples and advanced algorithms to create drum sounds that are realistic and dynamic. You can control the velocity, pitch, tone, and articulation of each drum hit with your keyboard, mouse, or MIDI controller. You can also use the built-in groove engine to create realistic drum patterns and variations.</li>
|
55 |
-
<li>It is versatile and customizable. Addictive Drums 2 comes with a huge library of kits, presets, and effects that cover a wide range of genres and styles. You can mix and match different elements to create your own custom kits and sounds. You can also adjust the volume, pan, EQ, compression, reverb, and other parameters of each drum channel with the intuitive mixer.</li>
|
56 |
-
<li>It is creative and inspiring. Addictive Drums 2 allows you to experiment and explore different possibilities with your drum tracks. You can use the beat transformer to change the feel and groove of your patterns. You can use the FX section to add distortion, modulation, delay, and other effects to your drums. You can also use the cloud sync feature to access new content and updates from XLN Audio.</li>
|
57 |
-
</ul>
|
58 |
-
|
59 |
-
<p>Addictive Drums 2 is a powerful and flexible drum software that will help you create professional and realistic drum tracks with ease.</p>
|
60 |
-
|
61 |
-
<h4>How to Get the Best Out of Addictive Drums Authorization Code 111 14?</h4>
|
62 |
-
|
63 |
-
<p>Addictive Drums Authorization Code 111 14 is a great way to activate Addictive Drums 2 on your computer. However, there are some tips and tricks that you can use to get the best out of this code and your drum software. Here are some of them:</p>
|
64 |
-
|
65 |
-
<ul>
|
66 |
-
<li>Make sure your computer meets the minimum system requirements for Addictive Drums 2. You need at least Windows 7 or Mac OS X 10.7, a dual-core CPU, 4 GB of RAM, and an internet connection.</li>
|
67 |
-
<li>Register your product on the XLN Audio website to get access to additional content and updates. You can also join the XLN Audio community and get support and feedback from other users.</li>
|
68 |
-
<li>Watch the tutorial videos and read the user manual to learn how to use Addictive Drums 2 effectively. You can also check out the FAQ section and the online help center for more information.</li>
|
69 |
-
<li>Experiment with different kits, presets, and effects to find your own sound and style. You can also create your own custom kits and presets and save them for later use.</li>
|
70 |
-
<li>Use MIDI loops or record your own patterns to create realistic and expressive drum tracks. You can also edit and quantize your MIDI data with the built-in editor.</li>
|
71 |
-
</ul>
|
72 |
-
|
73 |
-
<p>Addictive Drums Authorization Code 111 14 is a valuable tool that will help you unlock the full potential of your drum software. Use it wisely and enjoy creating amazing drum tracks with Addictive Drums 2!</p>
|
74 |
-
<h4>Where to Buy Addictive Drums Authorization Code 111 14?</h4>
|
75 |
-
|
76 |
-
<p>If you are interested in buying Addictive Drums Authorization Code 111 14, you have two options. You can either buy it directly from the XLN Audio website or from an authorized dealer near you.</p>
|
77 |
-
|
78 |
-
<p>Buying from the XLN Audio website is the easiest and fastest way to get your code. You can choose from different payment methods and currencies, and you can also get instant access to your code and your product downloads. You can also benefit from the XLN Audio loyalty program and get discounts and rewards for your purchases.</p>
|
79 |
-
|
80 |
-
<p>Buying from an authorized dealer is another option that you can consider. You can find a list of authorized dealers on the XLN Audio website or by contacting their customer support. Buying from an authorized dealer can give you some advantages, such as local support, warranty, and physical installation discs.</p>
|
81 |
-
|
82 |
-
<p>Whichever option you choose, make sure you buy Addictive Drums Authorization Code 111 14 from a legitimate source. Avoid buying from unauthorized sellers or websites that offer suspiciously low prices or free codes. These codes may be fake, stolen, or expired, and they may not work or may cause problems with your software.</p>
|
83 |
-
|
84 |
-
<h4>How to Troubleshoot Addictive Drums Authorization Code 111 14?</h4>
|
85 |
-
|
86 |
-
<p>Addictive Drums Authorization Code 111 14 is a reliable and secure code that should work without any issues. However, if you encounter any problems with your code or your software activation, here are some tips and solutions that you can try:</p>
|
87 |
-
|
88 |
-
<ul>
|
89 |
-
<li>Make sure you enter your code correctly. Check for any typos, spaces, or extra characters in your code. You can also copy and paste your code from your email or your XLN Audio account to avoid errors.</li>
|
90 |
-
<li>Make sure you have an internet connection. You need an internet connection to activate Addictive Drums 2 with your code. If your internet connection is slow or unstable, try using a different network or device.</li>
|
91 |
-
<li>Make sure you have the latest version of Addictive Drums 2 installed. You can check for updates on the XLN Audio website or by using the XLN Online Installer application.</li>
|
92 |
-
<li>Make sure you have enough disk space and memory on your computer. Addictive Drums 2 requires at least 4 GB of RAM and 15 GB of disk space to run smoothly.</li>
|
93 |
-
<li>Contact XLN Audio customer support. If none of the above solutions work, you can contact XLN Audio customer support by email or phone. They will help you solve any issues with your code or your software activation.</li>
|
94 |
-
</ul>
|
95 |
-
|
96 |
-
<p>Addictive Drums Authorization Code 111 14 is a simple and effective way to activate Addictive Drums 2 on your computer. If you follow these tips and solutions, you should be able to enjoy your drum software without any problems.</p>
|
97 |
-
<h4>Conclusion</h4>
|
98 |
-
|
99 |
-
<p>Addictive Drums 2 is a powerful and versatile drum production software that can help you create realistic and expressive drum tracks with ease. Whether you use it as a standalone application or as a plug-in in your music production software, Addictive Drums 2 will give you access to a huge library of kits, presets, and effects that cover a wide range of genres and styles.</p>
|
100 |
-
|
101 |
-
<p>To unlock the full potential of your drum software, you need Addictive Drums Authorization Code 111 14. This code will allow you to activate Addictive Drums 2 on your computer and enjoy all the features and options that this software has to offer. You can also use this code on up to three computers with the same code.</p>
|
102 |
-
|
103 |
-
<p>Addictive Drums Authorization Code 111 14 is easy to use and activate. You can get this code by buying Addictive Drums 2 from the XLN Audio website or from an authorized dealer near you. You can also register your product on the XLN Audio website to get access to additional content and updates.</p>
|
104 |
-
|
105 |
-
<p>If you have any problems with your code or your software activation, you can follow the tips and solutions that we have provided in this article. You can also contact XLN Audio customer support for more help and assistance.</p>
|
106 |
-
|
107 |
-
<p>Addictive Drums Authorization Code 111 14 is the key to unlocking the full potential of your drum software. Get it now and start creating amazing drum tracks with Addictive Drums 2!</p> 3cee63e6c2<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://imgfil.com/2uy1N6">https://imgfil.com/2uy1N6</a></b></p><br /><br />
|
2 |
-
|
3 |
-
SCPH- .... PlayStation 2 SCPH 39001.rar 8.11 Mb . SCPH 30000-50000.pdf 2.17 Mb . 31584 . ... DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar · Crack For ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bullet Echo Mod Apk and Enjoy Free Shopping and Epic Battles.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Free Download Bullet Echo Mod Apk: A Stealthy Battle Royale Game</h1>
|
3 |
-
<p>If you are looking for a new and exciting action shooter game to play on your mobile device, you might want to check out Bullet Echo. This game is a PvP tactical team shooter that pits teams of players against each other in intense, competitive matches. You can choose from dozens of different heroes with unique play styles, guns, and abilities. You can also play in various game modes, such as Team vs Team, Solo, and Battle Royale.</p>
|
4 |
-
<h2>free download bullet echo mod apk</h2><br /><p><b><b>Download Zip</b> > <a href="https://urlin.us/2uSTI0">https://urlin.us/2uSTI0</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to enjoy the game without spending any money or waiting for long hours to unlock new heroes and perks? Well, you can do that by downloading the Bullet Echo Mod Apk. This is a modified version of the original game that gives you unlimited money, free shopping, unlocked heroes, and more. In this article, we will tell you everything you need to know about Bullet Echo Mod Apk, including what it is, how to download and install it, and some tips and tricks to play it.</p>
|
6 |
-
<h2>What is Bullet Echo?</h2>
|
7 |
-
<p>Bullet Echo is a game developed by ZeptoLab, the creators of popular games like Cut the Rope, King of Thieves, and C.A.T.S. It is a top-down PvP tactical shooter that combines stealth, teamwork, and shooting skills. Here are some of the features of the game:</p>
|
8 |
-
<h3>A top-down PvP tactical shooter</h3>
|
9 |
-
<p>In Bullet Echo, you play as one of the heroes in a team of up to five players. Your goal is to eliminate the enemy team or be the last team standing when the battle ends. You can use your gun, your abilities, and your flashlight to fight your way through ever-changing environments. However, your vision is limited by the beam of your flashlight, so you have to rely on sound cues to locate your enemies and allies.</p>
|
10 |
-
<h3>A game with multiple modes and heroes</h3>
|
11 |
-
<p>Bullet Echo offers three main game modes that you can play solo or with friends online. These are:</p>
|
12 |
-
<ul>
|
13 |
-
<li>Team vs Team: This is a classic 5v5 team deathmatch mode where you have to kill more enemies than the other team.</li>
|
14 |
-
<li>Solo: This is a free-for-all mode where 15 players compete against each other in a small map.</li>
|
15 |
-
<li>Battle Royale: This is a mode where five teams of three players fight each other in a large map. The map shrinks over time and only one team or one player can survive.</li>
|
16 |
-
</ul>
|
17 |
-
<p>Besides these modes, you can also participate in championships, missions, and events to earn valuable resources and rewards.</p>
|
18 |
-
<p>The game also features 21 heroes at launch and more heroes coming soon. Each hero has a unique set of abilities that can be used in combat. For example, some heroes can turn invisible, some can create electrical shields, some can heal themselves or their teammates, and some can launch rockets or grenades. You can unlock new heroes by playing online matches or by using resources that you earn or buy.</p>
|
19 |
-
<p>How to get bullet echo mod apk for free<br />
|
20 |
-
Bullet echo mod apk unlimited money and free shopping<br />
|
21 |
-
Bullet echo mod apk latest version download<br />
|
22 |
-
Bullet echo hack mod apk download for android<br />
|
23 |
-
Bullet echo mod apk offline mode<br />
|
24 |
-
Bullet echo mod apk with unlimited ammo and health<br />
|
25 |
-
Bullet echo mod apk no root required<br />
|
26 |
-
Bullet echo mod apk gameplay and features<br />
|
27 |
-
Bullet echo mod apk download link and installation guide<br />
|
28 |
-
Bullet echo mod apk review and rating<br />
|
29 |
-
Bullet echo mod apk tips and tricks<br />
|
30 |
-
Bullet echo mod apk best weapons and characters<br />
|
31 |
-
Bullet echo mod apk cheats and codes<br />
|
32 |
-
Bullet echo mod apk vs original game comparison<br />
|
33 |
-
Bullet echo mod apk multiplayer mode and online battles<br />
|
34 |
-
Bullet echo mod apk new update and patch notes<br />
|
35 |
-
Bullet echo mod apk best settings and graphics<br />
|
36 |
-
Bullet echo mod apk problems and solutions<br />
|
37 |
-
Bullet echo mod apk support and feedback<br />
|
38 |
-
Bullet echo mod apk alternatives and similar games<br />
|
39 |
-
Download bullet echo mod apk from wonderapk.com[^1^]<br />
|
40 |
-
Bullet echo mod apk free download without survey<br />
|
41 |
-
Bullet echo mod apk file size and requirements<br />
|
42 |
-
Bullet echo mod apk fun and addictive shooting game<br />
|
43 |
-
Bullet echo mod apk missions and challenges<br />
|
44 |
-
Bullet echo mod apk rewards and achievements<br />
|
45 |
-
Bullet echo mod apk skins and customizations<br />
|
46 |
-
Bullet echo mod apk maps and locations<br />
|
47 |
-
Bullet echo mod apk enemies and bosses<br />
|
48 |
-
Bullet echo mod apk sound and music</p>
|
49 |
-
<h3>A game with stealth and sound mechanics</h3>
|
50 |
-
<p>One of the most unique aspects of Bullet Echo is the stealth and sound mechanics. Unlike other shooter games, you cannot see the whole map or the enemies' positions. You can only see what your flashlight illuminates, which is a narrow cone of light in front of you. This means that you have to be careful about where you point your flashlight, as it can reveal your location to the enemies or blind your teammates. You also have to use sound cues to detect the enemies and allies. You can hear the footsteps, gunshots, and abilities of other players, as well as the ambient noises of the environment. You can use these sounds to locate and track your targets, or to avoid being detected by them. You can also use your abilities to create sound distractions or to silence your enemies. These mechanics make Bullet Echo a game that requires strategy, coordination, and stealth. You have to work with your team to plan your moves, communicate your positions, and execute your attacks. You also have to adapt to the changing situations and environments, as the maps are randomly generated and have different layouts, obstacles, and items.</p>
|
51 |
-
<h2>What is Bullet Echo Mod Apk?</h2>
|
52 |
-
<p>Bullet Echo Mod Apk is a modified version of the original game that gives you some advantages and features that are not available in the official version. These are:</p>
|
53 |
-
<h3>A modified version of the original game</h3>
|
54 |
-
<p>Bullet Echo Mod Apk is not an official app from ZeptoLab. It is a third-party app that has been modified by some developers or hackers to alter some aspects of the game. This means that it is not authorized by ZeptoLab and it may not be compatible with the latest updates or features of the game. It also means that it may contain some bugs, errors, or viruses that can harm your device or compromise your security.</p>
|
55 |
-
<h3>A version with unlimited money and free shopping</h3>
|
56 |
-
<p>One of the main benefits of Bullet Echo Mod Apk is that it gives you unlimited money and free shopping. This means that you can buy anything you want in the game without spending any real money or waiting for long hours. You can buy new heroes, perks, skins, weapons, and more with just a few clicks. You can also upgrade your heroes and perks to the maximum level without any restrictions.</p>
|
57 |
-
<h3>A version with unlocked heroes and perks</h3>
|
58 |
-
<p>Another benefit of Bullet Echo Mod Apk is that it gives you unlocked heroes and perks. This means that you can access all the heroes and perks in the game without having to unlock them by playing online matches or using resources. You can choose any hero you want from the start and use their abilities in combat. You can also use any perk you want to enhance your performance and customize your play style.</p>
|
59 |
-
<h2>How to Download and Install Bullet Echo Mod Apk?</h2>
|
60 |
-
<p>If you want to download and install Bullet Echo Mod Apk on your device, you have to follow these steps:</p>
|
61 |
-
<h3>The download link and steps</h3>
|
62 |
-
<ol>
|
63 |
-
<li>Go to this link: https://www.apkdone.com/bullet-echo/</li>
|
64 |
-
<li>Click on the green button that says "Download APK (100 MB)"</li>
|
65 |
-
<li>Wait for the download to finish and then open the file</li>
|
66 |
-
<li>Click on "Install" and allow unknown sources if prompted</li>
|
67 |
-
<li>Wait for the installation to finish and then open the app</li>
|
68 |
-
</ol>
|
69 |
-
<h3>The requirements and precautions</h3>
|
70 |
-
<ul>
|
71 |
-
<li>You need an Android device with Android 5.0 or higher to run Bullet Echo Mod Apk</li>
|
72 |
-
<li>You need at least 100 MB of free storage space on your device to download and install Bullet Echo Mod Apk</li>
|
73 |
-
<li>You need a stable internet connection to play online matches in Bullet Echo Mod Apk</li>
|
74 |
-
<li>You should not use your real account or personal information when playing Bullet Echo Mod Apk, as it may get banned or hacked by ZeptoLab or other players</li>
|
75 |
-
<li>You should not update Bullet Echo Mod Apk from Google Play Store or other sources, as it may overwrite the modded features or cause errors</li>
|
76 |
-
<li>You should backup your data before installing Bullet Echo Mod Apk, as it may delete or corrupt your existing data</li>
|
77 |
-
<li>You should uninstall Bullet Echo Mod Apk if you encounter any problems or if you want to switch back to the official version of the game</li>
|
78 |
-
</ul>
|
79 |
-
<h3>The benefits and drawbacks</h3>
|
80 |
-
<table border="1">
|
81 |
-
<tr><th>Benefits</th><th>Drawbacks</th></tr>
|
82 |
-
<tr><td>You can enjoy unlimited money and free shopping in Bullet Echo Mod Apk</td><td>You may get banned or hacked by ZeptoLab or other players for using Bullet Echo Mod Apk</td></tr>
|
83 |
-
<tr><td>You can access all the heroes and perks should use Bullet Echo Mod Apk at your own risk and discretion.</li>
|
84 |
-
<li>Q: How can I update Bullet Echo Mod Apk?</li>
|
85 |
-
<li>A: You should not update Bullet Echo Mod Apk from Google Play Store or other sources, as it may overwrite the modded features or cause errors. You should only update Bullet Echo Mod Apk from the same link that you downloaded it from, or from a trusted source that provides the latest version of the mod. You should also backup your data before updating Bullet Echo Mod Apk, as it may delete or corrupt your existing data.</li>
|
86 |
-
<li>Q: How can I switch back to the official version of Bullet Echo?</li>
|
87 |
-
<li>A: If you want to switch back to the official version of Bullet Echo, you have to uninstall Bullet Echo Mod Apk from your device. You can do this by going to your device settings, finding the app, and tapping on "Uninstall". You should also delete any residual files or folders that are related to Bullet Echo Mod Apk. Then, you can download and install the official version of Bullet Echo from Google Play Store or other sources.</li>
|
88 |
-
<li>Q: Can I play Bullet Echo Mod Apk with other players who use the official version of the game?</li>
|
89 |
-
<li>A: Yes, you can play Bullet Echo Mod Apk with other players who use the official version of the game. However, you may face unfair or unbalanced matches against them, as they may not have the same advantages and features that you have. You may also get reported or banned by them for using Bullet Echo Mod Apk, as it is considered cheating or hacking by ZeptoLab and other players.</li>
|
90 |
-
<li>Q: Can I play Bullet Echo Mod Apk offline?</li>
|
91 |
-
<li>A: No, you cannot play Bullet Echo Mod Apk offline. You need a stable internet connection to play online matches in Bullet Echo Mod Apk. You also need an internet connection to download and install Bullet Echo Mod Apk on your device.</li>
|
92 |
-
</ul></p> 197e85843d<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Burger Please Mod Apk The Ultimate Fun Game with Unlimited Money.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Burger Please Mod APK Unlimited Money: How to Download and Play</h1>
|
3 |
-
<p>Do you love burgers? Do you want to run your own burger shop? Do you want to have unlimited money, diamonds, coins, and cash in your game? If you answered yes to any of these questions, then you might be interested in <strong>Burger Please Mod APK Unlimited Money</strong>, a modified version of the popular casual game <strong>Burger Please</strong>. In this article, we will tell you everything you need to know about this mod, including what it is, how to download and install it, and how to play it. Let's get started!</p>
|
4 |
-
<h2>burger please mod apk unlimited money</h2><br /><p><b><b>Download File</b> ->>->>->> <a href="https://jinyurl.com/2uNLp7">https://jinyurl.com/2uNLp7</a></b></p><br /><br />
|
5 |
-
<h2>What is Burger Please?</h2>
|
6 |
-
<p><strong>Burger Please</strong> is a casual game where you run your own burger shop and serve delicious burgers to your customers. You can customize your shop, upgrade your equipment, unlock new recipes, and create your own burger combinations. You can also compete with other players in the leaderboard, complete daily missions, and earn rewards.</p>
|
7 |
-
<h3>A casual game where you run your own burger shop</h3>
|
8 |
-
<p>The main mode of <strong>Burger Please</strong> is the <em>Career Mode</em>, where you start from a small burger stand and work your way up to a big burger empire. You will have to manage your time, resources, and customer satisfaction as you prepare and serve burgers according to their orders. You will also have to deal with different types of customers, such as kids, adults, celebrities, zombies, aliens, and more. Each customer has their own preferences, personality, and patience level, so you have to be careful not to make them angry or disappointed.</p>
|
9 |
-
<h3>The features and gameplay of Burger Please</h3>
|
10 |
-
<p><strong>Burger Please</strong> has many features and gameplay elements that make it fun and addictive. Some of them are:</p>
|
11 |
-
<ul>
|
12 |
-
<li><strong>Customization:</strong> You can customize your shop with different themes, decorations, furniture, and accessories. You can also customize your character with different outfits, hairstyles, accessories, and expressions.</li>
|
13 |
-
<li><strong>Upgrades:</strong> You can upgrade your equipment, such as your grill, fryer, toaster, blender, etc., to make them faster, more efficient, and more durable. You can also upgrade your ingredients, such as your meat, cheese, lettuce, tomato, etc., to make them tastier, fresher, and more nutritious.</li>
|
14 |
-
<li><strong>Recipes:</strong> You can unlock new recipes as you progress in the game. You can also create your own recipes by combining different ingredients. You can save your recipes in your cookbook and use them anytime.</li>
|
15 |
-
<li><strong>Challenges:</strong> You can challenge yourself with different modes and levels in the game. You can play the <em>Time Attack Mode</em>, where you have to serve as many burgers as possible in a limited time. You can play the <em>Endless Mode</em>, where you have to serve burgers until you run out of ingredients or customers. You can also play the <em>Boss Mode</em>, where you have to face a special customer who will test your skills.</li>
|
16 |
-
mod APK from a trusted and reliable source, such as a reputable website, forum, or blog.</li>
|
17 |
-
<li>You should always scan the mod APK with an antivirus or anti-malware program before installing it on your device.</li>
|
18 |
-
<li>You should always read the reviews and comments of other users who have used the mod APK before downloading and installing it.</li>
|
19 |
-
<li>You should always follow the instructions and requirements of the mod APK carefully and correctly.</li>
|
20 |
-
</ul>
|
21 |
-
</li>
|
22 |
-
</ul>
|
23 |
-
<h2>How to Download and Install Burger Please Mod APK Unlimited Money?</h2>
|
24 |
-
<p>If you have decided to use <strong>Burger Please Mod APK Unlimited Money</strong>, you will need to download and install it on your device. The process may vary depending on the type of device you have, but here are some general steps that you can follow:</p>
|
25 |
-
<h3>The steps to download and install Burger Please Mod APK Unlimited Money on your Android device</h3>
|
26 |
-
<ol>
|
27 |
-
<li>Go to the link where you can download the mod APK file. You can search for it online or use the link provided by the source.</li>
|
28 |
-
<li>Tap on the download button and wait for the file to be downloaded on your device.</li>
|
29 |
-
<li>Once the file is downloaded, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the mod APK file.</li>
|
30 |
-
<li>Locate the mod APK file in your device storage and tap on it to start the installation process.</li>
|
31 |
-
<li>Follow the on-screen instructions and grant the necessary permissions to install the mod APK file.</li>
|
32 |
-
<li>Wait for the installation to be completed and then launch the game from your app drawer or home screen.</li>
|
33 |
-
<li>Enjoy playing <strong>Burger Please Mod APK Unlimited Money</strong> on your Android device.</li>
|
34 |
-
</ol>
|
35 |
-
<h3>The steps to download and install Burger Please Mod APK Unlimited Money on your PC using an emulator</h3>
|
36 |
-
<ol>
|
37 |
-
<li>Go to the link where you can download the mod APK file. You can search for it online or use the link provided by the source.</li>
|
38 |
-
<li>Download and install an Android emulator on your PC. An emulator is a software that allows you to run Android apps on your PC. Some popular emulators are BlueStacks, NoxPlayer, LDPlayer, etc.</li>
|
39 |
-
<li>Launch the emulator and sign in with your Google account. This will allow you to access the Google Play Store and other Google services on your PC.</li>
|
40 |
-
<li>Drag and drop the mod APK file into the emulator or use the built-in browser to download it from the link.</li>
|
41 |
-
<li>Install the mod APK file using the emulator's app installer or file manager.</li>
|
42 |
-
<li>Launch the game from the emulator's app drawer or home screen.</li>
|
43 |
-
<li>Enjoy playing <strong>Burger Please Mod APK Unlimited Money</strong> on your PC using an emulator.</li>
|
44 |
-
</ol>
|
45 |
-
<h3>The steps to download and install Burger Please Mod APK Unlimited Money on your iOS device using a third-party app store</h3>
|
46 |
-
<ol>
|
47 |
-
<li>Go to the link where you can download the mod IPA file. You can search for it online or use the link provided by the source. The mod IPA file is a modified version of the original game that works on iOS devices. The mod IPA file is not an official version of the game and it is not supported or endorsed by the original developers.</li>
|
48 |
-
<li>Download and install a third-party app store on your iOS device. A third-party app store is an alternative to the Apple App Store that allows you to download and install apps that are not available or approved by Apple. Some popular third-party app stores are TutuApp, Panda Helper, AppValley, etc.</li>
|
49 |
-
<li>Launch the third-party app store and search for <strong>Burger Please Mod IPA Unlimited Money</strong> or use the link provided by the source.</li>
|
50 |
-
<li>Tap on the download button and wait for the file to be downloaded on your device.</li>
|
51 |
-
<li>Once the file is downloaded, go to your device settings and trust the developer profile of the third-party app store. This will allow you to install the mod IPA file.</li>
|
52 |
-
<li>Locate the mod IPA file in your device storage and tap on it to start the installation process.</li>
|
53 |
-
<li>Follow the on-screen instructions and grant the necessary permissions to install the mod IPA file.</li>
|
54 |
-
<li>Wait for the installation to be completed and then launch the game from your app drawer or home screen.</li>
|
55 |
-
<li>Enjoy playing <strong>Burger Please Mod IPA Unlimited Money</strong> on your iOS device using a third-party app store.</li>
|
56 |
-
</ol>
|
57 |
-
<h2>How to Play Burger Please Mod APK Unlimited Money?</h2>
|
58 |
-
<p>Now that you have downloaded and installed <strong>Burger Please Mod APK Unlimited Money</strong>, you might be wondering how to play it. Well, the gameplay is pretty much the same as the original game, except that you have unlimited resources and access to everything. However, if you want some tips and tricks to play it effectively and enjoyably, here are some suggestions:</p>
|
59 |
-
<h3>The tips and tricks to play Burger Please Mod APK Unlimited Money effectively and enjoyably</h3>
|
60 |
-
<ul>
|
61 |
-
<li><strong>Experiment with different recipes:</strong> Since you have unlimited ingredients, you can try out different combinations and create your own recipes. You can also use the cookbook to save your recipes and use them anytime. You might discover some delicious and unique burgers that will impress your customers and yourself.</li>
|
62 |
-
<li><strong>Upgrade your equipment and ingredients:</strong> Since you have unlimited money, diamonds, coins, and cash, you can upgrade your equipment and ingredients to the max level. This will make your burgers faster, better, and more profitable. You can also buy new equipment and ingredients that will enhance your gameplay and variety.</li>
|
63 |
-
<li><strong>Customize your shop and character:</strong> Since you have unlimited money, diamonds, coins, and cash, you can customize your shop and character with any theme, decoration, furniture, accessory, outfit, hairstyle, etc. that you want. You can also change them anytime according to your mood or preference. You can make your shop and character look unique and attractive.</li>
|
64 |
-
<li><strong>Compete with other players:</strong> Since you have unlimited money, diamonds, coins, and cash, you can compete with other players in the leaderboard without any fear or pressure. You can also challenge yourself with different modes and levels in the game. You can show off your skills and achievements to other players and prove that you are the best burger master.</li>
|
65 |
-
<li><strong>Have fun:</strong> The most important tip is to have fun while playing <strong>Burger Please Mod APK Unlimited Money</strong>. Don't take it too seriously or get bored by having everything easy. Enjoy the game as a casual and relaxing activity that will make you happy and hungry.</li>
|
66 |
-
</ul>
|
67 |
-
<h3>The best strategies and techniques to play Burger Please Mod APK Unlimited Money successfully and competitively</h3>
|
68 |
-
<ul>
|
69 |
-
<li><strong>Plan ahead:</strong> Even though you have unlimited resources, you still need to plan ahead when preparing and serving burgers. You need to pay attention to the customer's order, the ingredient's availability, the equipment's condition, etc. You need to avoid wasting time or making mistakes that will affect your performance or customer satisfaction.</li>
|
70 |
-
<li><strong>Prioritize your customers:</strong> Even though you have unlimited resources, you still need to prioritize your customers when serving burgers. You need to consider their preferences, personality, patience level, etc. You need to serve them quickly, accurately, politely, etc. You need to avoid making them angry or disappointed that will affect your reputation or income.</li>
|
71 |
-
<li><strong>Balanc e your resources:</strong> Even though you have unlimited resources, you still need to balance your resources when buying and upgrading items. You need to consider the cost, benefit, quality, quantity, etc. of each item. You need to avoid overspending or underutilizing your resources that will affect your gameplay or variety.</li>
|
72 |
-
<li><strong>Use your creativity:</strong> Even though you have unlimited resources, you still need to use your creativity when creating and customizing burgers. You need to experiment with different ingredients, recipes, combinations, etc. You need to make your burgers look appealing, delicious, and unique. You need to impress your customers and yourself with your creativity.</li>
|
73 |
-
<li><strong>Have fun:</strong> The most important strategy is to have fun while playing <strong>Burger Please Mod APK Unlimited Money</strong>. Don't take it too seriously or get stressed by the competition or the challenge. Enjoy the game as a casual and relaxing activity that will make you happy and hungry.</li>
|
74 |
-
</ul>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>In conclusion, <strong>Burger Please Mod APK Unlimited Money</strong> is a modified version of the original game that gives you unlimited money, diamonds, coins, and cash in your game. It allows you to enjoy the game without any limitations or restrictions. However, it also has some benefits and drawbacks, risks and precautions, tips and tricks, and strategies and techniques that you should know before downloading and installing it. We hope that this article has helped you learn more about this mod and how to download and play it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy burger making!</p>
|
77 |
-
<p>burger please mod apk latest version<br />
|
78 |
-
burger please mod apk free download<br />
|
79 |
-
burger please mod apk android 1<br />
|
80 |
-
burger please mod apk unlimited coins<br />
|
81 |
-
burger please mod apk hack<br />
|
82 |
-
burger please mod apk offline<br />
|
83 |
-
burger please mod apk no ads<br />
|
84 |
-
burger please mod apk revdl<br />
|
85 |
-
burger please mod apk rexdl<br />
|
86 |
-
burger please mod apk happymod<br />
|
87 |
-
burger please mod apk cheat<br />
|
88 |
-
burger please mod apk 0.8.0<br />
|
89 |
-
burger please mod apk terbaru<br />
|
90 |
-
burger please mod apk premium<br />
|
91 |
-
burger please mod apk pro<br />
|
92 |
-
burger please mod apk vip<br />
|
93 |
-
burger please mod apk unlocked<br />
|
94 |
-
burger please mod apk full<br />
|
95 |
-
burger please mod apk mega<br />
|
96 |
-
burger please mod apk 2023<br />
|
97 |
-
burger please hack apk download<br />
|
98 |
-
burger please hack apk unlimited money<br />
|
99 |
-
burger please cheat apk unlimited money<br />
|
100 |
-
burger please cheat apk download<br />
|
101 |
-
burger please cheat apk free<br />
|
102 |
-
burger please unlimited money apk download<br />
|
103 |
-
burger please unlimited money apk free<br />
|
104 |
-
burger please unlimited money apk latest<br />
|
105 |
-
burger please unlimited money apk offline<br />
|
106 |
-
burger please unlimited money apk online<br />
|
107 |
-
download game burger please mod apk<br />
|
108 |
-
download game burger please mod apk unlimited money<br />
|
109 |
-
download game burger please hack apk<br />
|
110 |
-
download game burger please cheat apk<br />
|
111 |
-
download game burger please unlimited money apk<br />
|
112 |
-
game burger please mod apk free download<br />
|
113 |
-
game burger please mod apk latest version<br />
|
114 |
-
game burger please hack apk download<br />
|
115 |
-
game burger please cheat apk download<br />
|
116 |
-
game burger please unlimited money apk download</p>
|
117 |
-
<h2>FAQs</h2>
|
118 |
-
<p>Here are some frequently asked questions about <strong>Burger Please Mod APK Unlimited Money</strong>:</p>
|
119 |
-
<ol>
|
120 |
-
<li><strong>Is Burger Please Mod APK Unlimited Money safe to use?</strong></li>
|
121 |
-
<p>It depends on the source and the file that you download. Some mod APK files may be safe to use, while others may be fake or malicious. You should always download a mod APK file from a trusted and reliable source, such as a reputable website, forum, or blog. You should also scan the mod APK file with an antivirus or anti-malware program before installing it on your device.</p>
|
122 |
-
<li><strong>Is Burger Please Mod APK Unlimited Money legal to use?</strong></li>
|
123 |
-
<p>No, it is not legal to use <strong>Burger Please Mod APK Unlimited Money</strong>. It is a modified version of the original game that violates the terms and conditions of the game and the intellectual property rights of the original developers. Using a mod APK file may result in legal actions or penalties from the original developers or the authorities.</p>
|
124 |
-
<li><strong>Can I play Burger Please Mod APK Unlimited Money online?</strong></li>
|
125 |
-
<p>Yes, you can play <strong>Burger Please Mod APK Unlimited Money</strong> online with other players. However, you may face some issues or problems when playing online, such as lagging, crashing, banning, etc. You may also encounter some players who are using the same mod or other mods that may give them an unfair advantage or disadvantage.</p>
|
126 |
-
<li><strong>Can I play Burger Please Mod APK Unlimited Money offline?</strong></li>
|
127 |
-
<p>Yes, you can play <strong>Burger Please Mod APK Unlimited Money</strong> offline without an internet connection. However, you may not be able to access some features or functions of the game that require an internet connection, such as the leaderboard, the daily missions, the ads, etc.</p>
|
128 |
-
<li><strong>Can I update Burger Please Mod APK Unlimited Money?</strong></li>
|
129 |
-
<p>No, you cannot update <strong>Burger Please Mod APK Unlimited Money</strong>. Updating a mod APK file may cause some errors or issues in the game or remove the modifications. You may also lose your progress or account if you update a mod APK file. If you want to update the game, you will have to uninstall the mod APK file and install the original game from the Google Play Store or the Apple App Store.</p>
|
130 |
-
</ol></p> 197e85843d<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Ragnarok X APK - The 3D MMORPG Mobile Game that Brings Back the Classic Masterpiece.md
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Ragnarok X APK: How to Play the Next Generation of the Classic MMORPG on Your Android Device</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are a fan of the classic Ragnarok Online, you might be interested in trying out its latest mobile version, Ragnarok X: Next Generation. This game is an official remake of the original masterpiece, authorized by Gravity from South Korea. It features retro style, new classes, cross-server PvP, guild wars, pet taming, marriage system, and more. In this article, we will show you how to download Ragnarok X APK and play it on your Android device. We will also share some features, tips, and tricks for playing this game.</p>
|
5 |
-
<h3>What is Ragnarok X: Next Generation?</h3>
|
6 |
-
<p>Ragnarok X: Next Generation is a 3D MMORPG mobile game that is based on the classic Ragnarok Online. It is developed by Nuverse and published by Gravity in various regions. The game aims to recreate the original love and nostalgia of the classic game, while adding new elements and improvements. The game has been launched in Southeast Asia, Taiwan, Hong Kong, Macau, Japan, and Korea. It has received positive reviews from players and critics alike.</p>
|
7 |
-
<h2>download ragnarok x apk</h2><br /><p><b><b>Download File</b> ✑ ✑ ✑ <a href="https://jinyurl.com/2uNKI1">https://jinyurl.com/2uNKI1</a></b></p><br /><br />
|
8 |
-
<h3>Why should you download Ragnarok X APK?</h3>
|
9 |
-
<p>If you want to play Ragnarok X: Next Generation on your Android device, you might need to download its APK file. APK stands for Android Package Kit, which is a file format that contains all the necessary components for installing an app on an Android device. There are several reasons why you might want to download Ragnarok X APK:</p>
|
10 |
-
<ul>
|
11 |
-
<li>The game is not available in your region or country.</li>
|
12 |
-
<li>The game is not compatible with your device or operating system.</li>
|
13 |
-
<li>You want to access the latest version or features of the game before they are officially released.</li>
|
14 |
-
<li>You want to avoid any potential errors or bugs that might occur during the installation process from the Google Play Store.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to download Ragnarok X APK safely and easily?</h3>
|
17 |
-
<p>Downloading Ragnarok X APK is not difficult, but you need to be careful about the source and the file. There are many websites that offer APK files for various apps and games, but not all of them are trustworthy or reliable. Some of them might contain malware, viruses, or other harmful content that could damage your device or compromise your privacy. Therefore, you need to follow these steps to download Ragnarok X APK safely and easily:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Find a reputable website that offers Ragnarok X APK. You can use a search engine or a review site to find one. Some examples of trustworthy websites are , , and .</li>
|
20 |
-
<li>Check the details and information about the APK file. Make sure it matches the version, size, developer, and description of the game. You can also read the comments and ratings from other users to see if they have any issues or complaints.</li>
|
21 |
-
<li>Download the APK file to your device. You might need to enable the option to install apps from unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store.</li>
|
22 |
-
<li>Install the APK file by tapping on it and following the instructions on the screen. You might need to grant some permissions to the app to run properly.</li>
|
23 |
-
</ol>
|
24 |
-
<p>Congratulations, you have successfully downloaded and installed Ragnarok X APK on your Android device. You can now launch the game and enjoy its features.</p>
|
25 |
-
<h2>Features of Ragnarok X: Next Generation</h2>
|
26 |
-
<p>Ragnarok X: Next Generation is not just a simple remake of the classic Ragnarok Online. It also offers many new and exciting features that will enhance your gaming experience. Here are some of the features that you can expect from this game:</p>
|
27 |
-
<h3>Super Novice: The ultimate all-rounder class</h3>
|
28 |
-
<p>One of the most unique features of Ragnarok X: Next Generation is the Super Novice class. This is a special class that can learn skills from all other classes, making it the most versatile and flexible class in the game. You can customize your Super Novice according to your preferences and play style. You can also equip any weapon or armor that you want, as long as you meet the requirements. However, the Super Novice also has some drawbacks, such as low HP and SP, and limited skill slots. Therefore, you need to be careful and strategic when playing as a Super Novice.</p>
|
29 |
-
<h3>World of Champions: The cross-server PvP competition</h3>
|
30 |
-
<p>If you are looking for some thrilling and competitive action, you should try out the World of Champions mode. This is a cross-server PvP mode that pits players from different servers against each other in a 5v5 battle. You can join this mode by registering in the World of Champions NPC in Prontera. You will be matched with other players based on your rank and level. The winning team will receive rewards such as honor points, zeny, and rare items. You can also use your honor points to exchange for exclusive costumes and accessories.</p>
|
31 |
-
<h3>Guild vs Guild: The ultimate test of teamwork and strategy</h3>
|
32 |
-
<p>Another feature that will test your skills and teamwork is the Guild vs Guild mode. This is a large-scale war mode that involves up to 50 guilds fighting for the control of castles. You can join this mode by being a member of a guild that has registered for the war. You will need to cooperate with your guildmates to attack or defend the castles, using various strategies and tactics. The guilds that successfully occupy the castles will receive rewards such as guild funds, zeny, and rare items. They will also have access to exclusive dungeons and quests.</p>
|
33 |
-
<p>How to download ragnarok x apk on android<br />
|
34 |
-
Ragnarok x apk latest version 2023<br />
|
35 |
-
Ragnarok x next generation apk free download<br />
|
36 |
-
Ragnarok x apk download for pc<br />
|
37 |
-
Ragnarok x apk mod unlimited money<br />
|
38 |
-
Download ragnarok x apk from google play<br />
|
39 |
-
Ragnarok x apk offline installer<br />
|
40 |
-
Ragnarok x apk size and requirements<br />
|
41 |
-
Ragnarok x apk update and patch notes<br />
|
42 |
-
Ragnarok x apk mirror and alternative links<br />
|
43 |
-
Download ragnarok x apk english version<br />
|
44 |
-
Ragnarok x apk obb data download<br />
|
45 |
-
Ragnarok x apk hack and cheat tool<br />
|
46 |
-
Download ragnarok x apk for ios<br />
|
47 |
-
Ragnarok x apk gameplay and review<br />
|
48 |
-
Ragnarok x apk error and fix guide<br />
|
49 |
-
Download ragnarok x apk from official website<br />
|
50 |
-
Ragnarok x apk features and benefits<br />
|
51 |
-
Ragnarok x apk tips and tricks<br />
|
52 |
-
Download ragnarok x apk for emulator<br />
|
53 |
-
Ragnarok x apk best class and build<br />
|
54 |
-
Ragnarok x apk redeem code and rewards<br />
|
55 |
-
Download ragnarok x apk for tablet<br />
|
56 |
-
Ragnarok x apk system compatibility test<br />
|
57 |
-
Ragnarok x apk graphics and sound quality<br />
|
58 |
-
Download ragnarok x apk old versions<br />
|
59 |
-
Ragnarok x apk new events and promotions<br />
|
60 |
-
Ragnarok x apk customer service and support<br />
|
61 |
-
Download ragnarok x apk for mac<br />
|
62 |
-
Ragnarok x apk ratings and reviews<br />
|
63 |
-
Download ragnarok x apk from apkpure<br />
|
64 |
-
Ragnarok x apk comparison with other games<br />
|
65 |
-
Ragnarok x apk guide and walkthrough<br />
|
66 |
-
Download ragnarok x apk from uptodown<br />
|
67 |
-
Ragnarok x apk community and forum<br />
|
68 |
-
Download ragnarok x apk from apkmirror<br />
|
69 |
-
Ragnarok x apk collaboration with slime 2.0<br />
|
70 |
-
Ragnarok x apk pet system and taming guide<br />
|
71 |
-
Download ragnarok x apk from apkpure.com/ragnarox/com.play.rosea/download/apk(^3^)<br />
|
72 |
-
Ragnarok x next generation 2nd anniversary celebration event(^2^)</p>
|
73 |
-
<h3>Pet Taming: The adorable companions for your adventure</h3>
|
74 |
-
<p>If you are looking for some cute and loyal companions for your adventure, you should try out the pet taming feature. This feature allows you to capture and tame various monsters in the game, such as Poring, Lunatic, Yoyo, Baphomet Jr., and more. You will need to use specific items to lure and tame them, such as apples, bananas, honey, etc. Once you have tamed them, they will follow you around and assist you in combat. They will also have their own skills and attributes that you can upgrade by feeding them and giving them affection.</p>
|
75 |
-
<h3>Marriage System: The romantic journey with your loved one</h3>
|
76 |
-
<p>If you are looking for some romance in your life, you should try out the marriage system feature. This feature allows you to propose to another player that you have a good relationship with, and get married in a beautiful ceremony. You will need to buy a ring, a wedding dress or suit, and a wedding invitation card to prepare for the wedding. You will also need to invite your friends and guildmates to witness your special day. Once you are married, you will receive benefits such as bonus stats, exclusive skills, and special quests.</p>
|
77 |
-
<h2>Tips and Tricks for Playing Ragnarok X: Next Generation</h2>
|
78 |
-
<p>Ragnarok X: Next Generation is a fun and immersive game that will keep you entertained for hours. However, it can also be challenging and complex at times, especially for beginners. Therefore, we have prepared some tips and tricks for playing this game that will help you improve your performance and enjoyment.</p>
|
79 |
-
<h3>How to level up fast and efficiently?</h3>
|
80 |
-
<p>One of the most important aspects of playing Ragnarok X: Next Generation is leveling up your character. Leveling up will increase your stats, unlock new skills, and allow you to access more content in the game. Here are some ways to level up fast and efficiently:</p>
|
81 |
-
<ul>
|
82 |
-
<li>Complete quests: Quests are one of the main sources of experience points in the game. You can find quests from NPCs in various towns and maps. Quests will also reward you with zeny, items, and other benefits.</li>
|
83 |
-
<li>Join parties: Parties are groups of players that cooperate with each other in combat. Join ing parties will allow you to share experience points and loot with other players. You can also benefit from their skills and buffs. You can join parties by using the party finder feature or by inviting other players manually.</li>
|
84 |
-
<li>Use items: Items are consumables that can boost your experience points gain and other aspects of your character. You can use items such as EXP potions, field manuals, battle manuals, etc. to increase your experience points gain. You can also use items such as food, scrolls, cards, etc. to enhance your stats and skills.</li>
|
85 |
-
<li>Explore maps: Maps are the areas where you can find monsters, NPCs, quests, and other features in the game. Exploring maps will allow you to discover new places, encounter new monsters, and complete new quests. You can also gain experience points by killing monsters and collecting items.</li>
|
86 |
-
</ul>
|
87 |
-
<h3>How to earn zeny and upgrade your equipment?</h3>
|
88 |
-
<p>Zeny is the main currency in Ragnarok X: Next Generation. You will need zeny to buy items, upgrade equipment, enhance skills, and perform other actions in the game. Equipment is the gear that you can equip on your character to improve your stats and abilities. Upgrading equipment will increase its quality and effectiveness. Here are some ways to earn zeny and upgrade your equipment:</p>
|
89 |
-
<ul>
|
90 |
-
<li>Sell items: Items are the things that you can collect, craft, or buy in the game. You can sell items that you don't need or want to other players or NPCs for zeny. You can use the auction house feature or the personal shop feature to sell items to other players. You can also use the NPC shops or the vending machine feature to sell items to NPCs.</li>
|
91 |
-
<li>Craft items: Crafting is the process of creating new items from raw materials or existing items. You can craft items such as weapons, armor, accessories, potions, etc. by using the crafting feature or the blacksmith feature. You can use the crafted items for yourself or sell them for zeny.</li>
|
92 |
-
<li>Upgrade items: Upgrading is the process of improving the quality and level of your equipment. You can upgrade your equipment by using the upgrade feature or the refine feature. You will need materials such as ores, crystals, eluniums, etc. to upgrade your equipment. Upgrading your equipment will increase its stats and effects.</li>
|
93 |
-
<li>Enhance items: Enhancing is the process of adding extra effects or attributes to your equipment. You can enhance your equipment by using the enhance feature or the enchant feature. You will need materials such as cards, runes, gems, etc. to enhance your equipment. Enhancing your equipment will add special bonuses and abilities to it.</li>
|
94 |
-
</ul>
|
95 |
-
<h3>How to join a guild and participate in guild activities?</h3>
|
96 |
-
<p>A guild is a group of players that share a common goal and interest in the game. Joining a guild will allow you to interact with other players, cooperate with them in combat, and enjoy various benefits and features in the game. Guild activities are events or modes that are exclusive for guild members. Participating in guild activities will allow you to earn rewards, improve your reputation, and have fun with your guildmates. Here are some ways to join a guild and participate in guild activities:</p>
|
97 |
-
<ul>
|
98 |
-
<li>Find a guild: Finding a guild is the first step to joining a guild. You can find a guild by using the guild finder feature or by browsing the guild list feature. You can also find a guild by asking other players or by checking online forums or communities.</li>
|
99 |
-
<li>Apply for a guild: Applying for a guild is the second step to joining a guild. You can apply for a guild by sending a request to the guild leader or by accepting an invitation from a guild member. You will need to wait for the approval of the guild leader or the guild officer before you can join the guild.</li>
|
100 |
-
<li>Contribute to a guild: Contributing to a guild is the third step to joining a guild. You can contribute to a guild by donating zeny, materials, or items to the guild fund or by completing guild quests or missions. Contributing to a guild will increase your contribution points and your reputation within the guild.</li>
|
101 |
-
<li>Participate in guild activities: Participating in guild activities is the fourth step to joining a guild. You can participate in guild activities by joining the guild war, the guild dungeon, the guild raid, or the guild party. Participating in guild activities will earn you rewards such as zeny, items, honor points, or rare items. You will also have fun and bond with your guildmates.</li>
|
102 |
-
</ul>
|
103 |
-
<h3>How to customize your character and skills?</h3>
|
104 |
-
<p>Customizing your character and skills is one of the most enjoyable aspects of playing Ragnarok X: Next Generation. Customizing your character and skills will allow you to express your personality, style, and preferences in the game. You can also optimize your performance and effectiveness in combat by choosing the best combination of skills and equipment for your character. Here are some ways to customize your character and skills:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Choose a class: Choosing a class is the first step to customizing your character and skills. You can choose from six classes in the game: Swordsman, Thief, Archer, Mage, Acolyte, and Merchant. Each class has its own strengths, weaknesses, and roles in the game. You can also change your class later in the game by using the job change feature.</li>
|
107 |
-
<li>Choose a hairstyle: Choosing a hairstyle is the second step to customizing your character and skills. You can choose from various hairstyles in the game, ranging from cute to cool to elegant. You can also change your hairstyle later in the game by using the barber shop feature or by buying hair coupons.</li>
|
108 |
-
<li>Choose a costume: Choosing a costume is the third step to customizing your character and skills. You can choose from various costumes in the game, such as uniforms, suits, dresses, casual wear, etc. You can also change your costume later in the game by using the wardrobe feature or by buying costume coupons.</li>
|
109 |
-
<li>Choose a skill build: Choosing a skill build is the fourth step to customizing your character and skills. You can choose from various skills in the game, depending on your class and level. You can also change your skill build later in the game by using the skill reset feature or by buying skill reset coupons.</li>
|
110 |
-
</ul>
|
111 |
-
<h3>How to enjoy the social aspects of the game?</h3>
|
112 |
-
<p>Ragnarok X: Next Generation is not only a game, but also a social platform. You can interact with other players, make friends, chat, trade, and have fun with them in the game. You can also join various events and activities that are designed to enhance your social experience in the game. Here are some ways to enjoy the social aspects of the game:</p>
|
113 |
-
<ul>
|
114 |
-
<li>Use chat: Chat is one of the main ways to communicate with other players in the game. You can use chat to send messages, emojis, stickers, or voice messages to other players. You can also use chat to join different channels, such as world chat, guild chat, party chat, etc.</li>
|
115 |
-
<li>Use friend: Friend is one of the main ways to connect with other players in the game. You can use friend to add other players as your friends, send them gifts, invite them to parties or guilds, or view their profiles.</li>
|
116 |
-
<li>Use emoticon: Emoticon is one of the main ways to express yourself in the game. You can use emoticon to perform various actions or gestures with your character, such as waving, laughing, crying, dancing, etc. You can also use emoticon to interact with other players or NPCs.</li>
|
117 |
-
<li>Use event: Event is one of the main ways to participate in various activities in the game. You can use event to join different events that are held regularly or occasionally in the game, such as festivals, concerts, quizzes, etc. You can also use event to earn rewards such as zeny, items, costumes, etc.</li>
|
118 |
-
</ul>
|
119 |
-
<h2>Conclusion</h2>
|
120 |
-
<p>Ragnarok X: Next Generation is a great game for fans of Ragnarok Online and MMORPGs in general. It offers a nostalgic and immersive experience that will keep you hooked for hours. It also offers many new and exciting features that will enhance your gaming experience. If you want to play this game on your Android device, you should download Ragnarok X APK from a reputable website and install it on your device. You should also follow our tips and tricks for playing this game that will help you improve your performance and enjoyment.</p>
|
121 |
-
<h2>FAQs</h2>
|
122 |
-
<p>Here are some frequently asked questions about Ragnarok X: Next Generation:</p>
|
123 |
-
<ol>
|
124 |
-
<li><b>Is Ragnarok X: Next Generation free to play?</b><br>
|
125 |
-
Yes, Ragnarok X: Next Generation is free to play. However, it also has some optional in-app purchases that can enhance your gameplay or appearance. You can buy items such as zeny, diamonds, costumes, etc. with real money. However, these purchases are not necessary to enjoy the game.</li>
|
126 |
-
<li><b>Is Ragnarok X: Next Generation compatible with my device?</b><br>
|
127 |
-
Ragnarok X: Next Generation is compatible with most Android devices that have at least 2 GB of RAM and Android 5.0 or higher. However, some devices might have issues with the game due to various factors such as hardware, software, or network. If you encounter any problems with the game, you can contact the customer service or check the official website for solutions.</li>
|
128 |
-
<li><b>Is Ragnarok X: Next Generation safe to download and play?</b><br>
|
129 |
-
Yes, Ragnarok X: Next Generation is safe to download and play. The game is authorized by Gravity from South Korea and developed by Nuverse, a reputable game company. The game also has various security measures and policies to protect your privacy and data. However, you should be careful about the source and the file of the APK that you download, as some websites might offer fake or harmful APK files. You should also avoid using any third-party tools or hacks that might compromise your account or device.</li>
|
130 |
-
<li><b>How can I contact the customer service or the community of Ragnarok X: Next Generation?</b><br>
|
131 |
-
You can contact the customer service or the community of Ragnarok X: Next Generation by using the following methods:</p>
|
132 |
-
<ul>
|
133 |
-
<li>Customer service: You can use the customer service feature in the game to submit a ticket or chat with an agent. You can also email them at [email protected].</li>
|
134 |
-
<li>Community: You can use the community feature in the game to join various groups or forums. You can also follow their official social media accounts such as Facebook, Instagram, Twitter, YouTube, etc.</li>
|
135 |
-
</ul></li>
|
136 |
-
<li><b>How can I support Ragnarok X: Next Generation?</b><br>
|
137 |
-
You can support Ragnarok X: Next Generation by doing the following things:</p>
|
138 |
-
<ul>
|
139 |
-
<li>Play the game regularly and invite your friends to join you.</li>
|
140 |
-
<li>Rate and review the game on the Google Play Store or other platforms.</li>
|
141 |
-
<li>Share your feedback and suggestions with the developers and the customer service.</li>
|
142 |
-
<li>Purchase items or services in the game to support its development and maintenance.</li>
|
143 |
-
</ul></li>
|
144 |
-
</ol></p> 401be4b1e0<br />
|
145 |
-
<br />
|
146 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FM WhatsApp APK Download for Android - Latest Version 2023 with New Features.md
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FM WhatsApp 2023 APK Download: Everything You Need to Know</h1>
|
3 |
-
<p>Are you looking for a way to enhance your WhatsApp experience with more features and customization options? If yes, then you might want to try FM WhatsApp, one of the most popular and advanced WhatsApp mods available. In this article, we will tell you everything you need to know about FM WhatsApp 2023 APK download, including what it is, what it offers, how to download and install it, how to update it, and some frequently asked questions. Let's get started!</p>
|
4 |
-
<h2>What is FM WhatsApp?</h2>
|
5 |
-
<p>FM WhatsApp is a modified version of the official WhatsApp app that adds more functionality and personalization to the original app. It is developed by Fouad Mokdad, a well-known modder who also created other popular WhatsApp mods like Fouad WhatsApp and YoWhatsApp. FM WhatsApp allows you to enjoy features that are not available in the official app, such as themes, fonts, emojis, privacy settings, anti-delete messages, status downloader, and much more.</p>
|
6 |
-
<h2>fm whatsapp 2023 apk download</h2><br /><p><b><b>Download Zip</b> ––– <a href="https://jinyurl.com/2uNKsH">https://jinyurl.com/2uNKsH</a></b></p><br /><br />
|
7 |
-
<h3>Features of FM WhatsApp</h3>
|
8 |
-
<p>Here are some of the main features that you can get with FM WhatsApp:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Anti-ban: You can use FM WhatsApp without worrying about getting banned by the official app.</li>
|
11 |
-
<li>Customization: You can change the look and feel of your WhatsApp app with hundreds of themes, fonts, and emojis.</li>
|
12 |
-
<li>Privacy: You can hide your online status, last seen, blue ticks, typing status, and more.</li>
|
13 |
-
<li>Anti-delete messages: You can view messages and status updates that have been deleted by the sender.</li>
|
14 |
-
<li>Media sharing: You can send up to 90 images at once and video files up to 700 MB.</li>
|
15 |
-
<li>Image quality: You can increase the quality of images that you send or receive.</li>
|
16 |
-
<li>And many more.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>Benefits of FM WhatsApp</h3>
|
19 |
-
<p>Here are some of the benefits that you can enjoy with FM WhatsApp:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You can have more control over your WhatsApp app and customize it according to your preferences.</li>
|
22 |
-
<li>You can access features that are not available in the official app and enhance your user experience.</li>
|
23 |
-
<li>You can protect your privacy and security with more options and settings.</li>
|
24 |
-
<li>You can communicate with your contacts more easily and conveniently with more media sharing options.</li>
|
25 |
-
</ul>
|
26 |
-
<h2>How to Download and Install FM WhatsApp 2023 APK on Android?</h2>
|
27 |
-
<p>If you want to download and install FM WhatsApp 2023 APK on your Android device, you need to follow these steps:</p>
|
28 |
-
<h3>Download FM WhatsApp APK File</h3>
|
29 |
-
<p>First, you need to download the latest version of FM WhatsApp APK file from a reliable source. You can use this link to download the file. The file name is <code>FMWA9.25_By_FouadMODS.apk</code>, the file size is 50.2 MB, and the latest update is March 2022. Make sure that your device has enough storage space and a stable internet connection before downloading the file.</p>
|
30 |
-
<h3>Enable Unknown Sources</h3>
|
31 |
-
<p>Next, you need to enable unknown sources on your device to allow the installation of apps from sources other than the Google Play Store. To do this, go to <code>Settings > Security > Unknown Sources</code> and toggle it on. You may see a warning message, but you can ignore it and proceed.</p>
|
32 |
-
<h3>Install FM WhatsApp APK File</h3>
|
33 |
-
<p>Then, you need to locate the downloaded APK file on your device and tap on it to start the installation process. You may see a pop-up asking for permissions, but you can grant them and follow the instructions on the screen. The installation may take a few minutes, so please be patient.</p>
|
34 |
-
<h3>Verify Your Phone Number</h3>
|
35 |
-
<p>Finally, you need to verify your phone number to activate FM WhatsApp on your device. To do this, open the app and enter your phone number. You will receive a verification code via SMS or a phone call. Enter the code and confirm your account. You can also restore your chat backup from the official app if you have one. That's it! You have successfully installed FM WhatsApp 2023 APK on your Android device.</p>
|
36 |
-
<h2>How to Update FM WhatsApp to the Latest Version?</h2>
|
37 |
-
<p>If you want to update FM WhatsApp to the latest version, you need to follow these steps:</p>
|
38 |
-
<p>fm whatsapp 2023 apk download latest version<br />
|
39 |
-
fm whatsapp 2023 apk download for android<br />
|
40 |
-
fm whatsapp 2023 apk download by fouad mods<br />
|
41 |
-
fm whatsapp 2023 apk download free<br />
|
42 |
-
fm whatsapp 2023 apk download update<br />
|
43 |
-
fm whatsapp 2023 apk download new features<br />
|
44 |
-
fm whatsapp 2023 apk download anti ban<br />
|
45 |
-
fm whatsapp 2023 apk download link<br />
|
46 |
-
fm whatsapp 2023 apk download official website<br />
|
47 |
-
fm whatsapp 2023 apk download install<br />
|
48 |
-
fm whatsapp 2023 apk download how to use<br />
|
49 |
-
fm whatsapp 2023 apk download benefits<br />
|
50 |
-
fm whatsapp 2023 apk download review<br />
|
51 |
-
fm whatsapp 2023 apk download comparison<br />
|
52 |
-
fm whatsapp 2023 apk download alternatives<br />
|
53 |
-
fm whatsapp 2023 apk download tips and tricks<br />
|
54 |
-
fm whatsapp 2023 apk download guide<br />
|
55 |
-
fm whatsapp 2023 apk download tutorial<br />
|
56 |
-
fm whatsapp 2023 apk download faq<br />
|
57 |
-
fm whatsapp 2023 apk download support<br />
|
58 |
-
fm whatsapp 2023 apk download problems and solutions<br />
|
59 |
-
fm whatsapp 2023 apk download modded version<br />
|
60 |
-
fm whatsapp 2023 apk download customization options<br />
|
61 |
-
fm whatsapp 2023 apk download themes and fonts<br />
|
62 |
-
fm whatsapp 2023 apk download emoji and stickers<br />
|
63 |
-
fm whatsapp 2023 apk download privacy and security settings<br />
|
64 |
-
fm whatsapp 2023 apk download hide online status and last seen<br />
|
65 |
-
fm whatsapp 2023 apk download view deleted messages and statuses<br />
|
66 |
-
fm whatsapp 2023 apk download send large files and images<br />
|
67 |
-
fm whatsapp 2023 apk download increase quality of media sharing<br />
|
68 |
-
fm whatsapp 2023 apk download backup and restore chats<br />
|
69 |
-
fm whatsapp 2023 apk download transfer data to new phone<br />
|
70 |
-
fm whatsapp 2023 apk download sync with other devices<br />
|
71 |
-
fm whatsapp 2023 apk download group chat and video call features<br />
|
72 |
-
fm whatsapp 2023 apk download broadcast messages and status updates<br />
|
73 |
-
fm whatsapp 2023 apk download pin chats and mark as unread<br />
|
74 |
-
fm whatsapp 2023 apk download mute notifications and block contacts<br />
|
75 |
-
fm whatsapp 2023 apk download dark mode and night mode options<br />
|
76 |
-
fm whatsapp 2023 apk download auto reply and schedule messages features<br />
|
77 |
-
fm whatsapp 2023 apk download lock app and chats with password or fingerprint</p>
|
78 |
-
<h3>Check for Updates</h3>
|
79 |
-
<p>First, you need to check if there is a new version of FM WhatsApp available. To do this, open the app and go to <code>Menu > Fouad Mods > Updates</code>. You will see a message telling you if there is an update or not. If there is an update, you can tap on <code>Download</code> to get the latest APK file.</p>
|
80 |
-
<h3>Download and Install the Latest Version</h3>
|
81 |
-
<p>Next, you need to download and install the latest version of FM WhatsApp APK file on your device. To do this, follow the same steps as above for downloading and installing FM WhatsApp 2023 APK. You don't need to uninstall the previous version or enable unknown sources again. Just overwrite the existing app with the new one and verify your phone number again. You have successfully updated FM WhatsApp to the latest version.</p>
|
82 |
-
<h2>FAQs about FM WhatsApp</h2>
|
83 |
-
<p>Here are some of the frequently asked questions about FM WhatsApp:</p>
|
84 |
-
<table>
|
85 |
-
<tr><th>Question</th><th>Answer</th></tr>
|
86 |
-
<tr><td>Is FM WhatsApp safe to use?</td><td>FM WhatsApp is safe to use as long as you download it from a trusted source and scan it for viruses before installing it. However, it is not an official app and it may violate some of the terms and conditions of WhatsApp. Therefore, use it at your own risk and discretion.</td></tr>
|
87 |
-
<tr><td>Can I use FM WhatsApp with the official app?</td><td>Yes, you can use FM WhatsApp with the official app if you want to have two WhatsApp accounts on the same device. However, you need to use different phone numbers for each account and install them in separate folders.</td></tr>
|
88 |
-
<tr><td>How can I backup my chats on FM WhatsApp?</td><td>You can backup your chats on FM WhatsApp by going to <code>Menu > Settings > Chats > Chat Backup</code>. You can choose to backup your chats locally or on Google Drive. You can also restore your chats from the backup when you reinstall or update FM WhatsApp.</td></tr>
|
89 |
-
<tr><td>How can I change themes on FM WhatsApp?</td><td>You can change themes on FM WhatsApp by going to <code>Menu > Fouad Mods > Universal > Themes</code>. You can choose from hundreds of themes available or download more from the internet. You can also create your own theme by customizing various elements of the app.</td></tr>
|
90 |
-
<tr><td>How can I contact the developer of FM WhatsApp?</td><td>You can contact the developer of FM WhatsApp by going to <code>Menu > Fouad Mods > About > Contact Me</code>. You can send him an email or follow him on social media platforms like Twitter, Instagram, and Telegram.</td></tr>
|
91 |
-
</table>
|
92 |
-
<h2>Conclusion</h2>
|
93 |
-
<p>In conclusion, FM WhatsApp is a great alternative to the official WhatsApp app that offers more features and customization options. You can download and install FM WhatsApp 2023 APK on your Android device by following the steps mentioned in this article. You can also update it to the latest version whenever there is one available. However, you should be aware of the risks involved in using a modded app and use it responsibly. We hope that this article has helped you learn more about FM WhatsApp 2023 APK download and answered some of your questions.</p> 401be4b1e0<br />
|
94 |
-
<br />
|
95 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Free 3D Models of Orange Trees - Easy to Customize and Render.md
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Find and Download Free Orange Tree 3D Models</h1>
|
3 |
-
<p>If you are looking for realistic and high-quality orange tree 3D models for your project, you might be wondering where to find them online. Whether you need them for animation, rendering, game development, or any other purpose, you can save time and money by downloading free 3D models from various websites. In this article, we will show you how to find and download free orange tree 3D models from some of the most popular sources on the web.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What are orange tree 3D models and why are they useful?</h3>
|
6 |
-
<p>An orange tree 3D model is a digital representation of an orange tree that can be used in various applications that require 3D graphics. A 3D model consists of vertices, edges, faces, and textures that define the shape, color, and appearance of the object. A 3D model can also have animations, rigging, lighting, and other features that make it more realistic and interactive.</p>
|
7 |
-
<h2>orange tree 3d model free download</h2><br /><p><b><b>Download File</b> ❤ <a href="https://jinyurl.com/2uNMqi">https://jinyurl.com/2uNMqi</a></b></p><br /><br />
|
8 |
-
<p>Orange tree 3D models are useful for many reasons. For example, they can help you create stunning scenes and environments for your animations or games. They can also help you visualize and design your own garden or landscape. They can even be used for educational purposes, such as teaching students about botany or ecology.</p>
|
9 |
-
<h3>Where can you find free orange tree 3D models online?</h3>
|
10 |
-
<p>There are many websites that offer free 3D models of various objects, including orange trees. However, not all of them are reliable or easy to use. Some of them may have low-quality models, limited formats, or unclear licenses. Therefore, you need to be careful and selective when choosing a website to download free 3D models from.</p>
|
11 |
-
<p>In this article, we will focus on three websites that are well-known and trusted by many 3D artists and enthusiasts. They are TurboSquid, Sketchfab, and CGTrader. These websites have a large collection of free orange tree 3D models that you can browse, download, and use in your projects. We will explain how to use each website and what to look for when downloading a model.</p>
|
12 |
-
<h2>TurboSquid: A Popular Source of Free 3D Models</h2>
|
13 |
-
<h3>What is TurboSquid and how does it work?</h3>
|
14 |
-
<p>TurboSquid is one of the largest and oldest online marketplaces for 3D models. It was founded in 2000 and has over one million models in its catalog. TurboSquid allows anyone to buy or sell 3D models for various purposes. It also has a section dedicated to free 3D models that anyone can download and use without paying anything.</p>
|
15 |
-
<p>TurboSquid works by connecting buyers and sellers of 3D models. Buyers can search for the models they need by using keywords, filters, categories, or collections. They can also preview the models in different views, check the details and ratings, and download them in various formats. Sellers can upload their models to TurboSquid and set their own prices or offer them for free. TurboSquid also has a quality assurance program called CheckMate that certifies the models that meet certain standards of quality and compatibility.</p>
|
16 |
-
<h3>How to search for free orange tree 3D models on TurboSquid?</h3>
|
17 |
-
<p>Searching for free orange tree 3D models on TurboSquid is easy and fast. Here are the steps you need to follow:</p>
|
18 |
-
<h4>Filter by free, orange, and tree keywords</h4>
|
19 |
-
<p>The first step is to go to the <a href="">free 3D models section</a> of TurboSquid. You will see a search bar where you can enter the keywords that describe the model you are looking for. In this case, you can type "free orange tree" and hit enter. You will see a list of results that match your query.</p>
|
20 |
-
<p>orange tree 3d model free download<br />
|
21 |
-
free 3d orange tree models turbosquid<br />
|
22 |
-
free nature orange-tree 3d models for download<br />
|
23 |
-
free 3d orange models turbosquid<br />
|
24 |
-
orange tree 3d model free obj<br />
|
25 |
-
free low poly orange tree 3d model<br />
|
26 |
-
free 3d model of orange tree with fruits<br />
|
27 |
-
free realistic orange tree 3d model<br />
|
28 |
-
free 3d model of orange tree in pot<br />
|
29 |
-
free animated orange tree 3d model<br />
|
30 |
-
free game ready orange tree 3d model<br />
|
31 |
-
free vr ready orange tree 3d model<br />
|
32 |
-
free 3d model of orange tree branch<br />
|
33 |
-
free 3d model of orange tree leaf<br />
|
34 |
-
free 3d model of orange tree flower<br />
|
35 |
-
free 3d model of orange fruit<br />
|
36 |
-
free 3d model of sliced orange<br />
|
37 |
-
free 3d model of peeled orange<br />
|
38 |
-
free 3d model of orange juice<br />
|
39 |
-
free 3d model of orange peel<br />
|
40 |
-
free blender orange tree 3d model<br />
|
41 |
-
free maya orange tree 3d model<br />
|
42 |
-
free c4d orange tree 3d model<br />
|
43 |
-
free max orange tree 3d model<br />
|
44 |
-
free fbx orange tree 3d model<br />
|
45 |
-
free stl orange tree 3d model<br />
|
46 |
-
free gltf orange tree 3d model<br />
|
47 |
-
free usdz orange tree 3d model<br />
|
48 |
-
free dae orange tree 3d model<br />
|
49 |
-
free ztl orange tree 3d model<br />
|
50 |
-
how to make a free orange tree 3d model<br />
|
51 |
-
where to find a free orange tree 3d model<br />
|
52 |
-
best sites for free orange tree 3d models<br />
|
53 |
-
top rated free orange tree 3d models<br />
|
54 |
-
most downloaded free orange tree 3d models<br />
|
55 |
-
most realistic free orange tree 3d models<br />
|
56 |
-
most detailed free orange tree 3d models<br />
|
57 |
-
most optimized free orange tree 3d models<br />
|
58 |
-
most compatible free orange tree 3d models<br />
|
59 |
-
most versatile free orange tree 3d models<br />
|
60 |
-
high quality free orange tree 3d models<br />
|
61 |
-
high resolution free orange tree 3d models<br />
|
62 |
-
high poly free orange tree 3d models<br />
|
63 |
-
low poly cartoon style free orange tree 3d models <br />
|
64 |
-
low poly stylized free orange tree 3d models <br />
|
65 |
-
low poly pixel art style free orange tree 3d models <br />
|
66 |
-
low poly voxel style free orange tree 3d models <br />
|
67 |
-
low poly flat style free orange tree 3d models <br />
|
68 |
-
low poly minimalist style free orange tree 3d models</p>
|
69 |
-
<h4>Sort by best match, quality, or poly count</h4>
|
70 |
-
<p>The next step is to sort the results by the criteria that matter to you. You can use the drop-down menu on the top right corner of the page to choose how to sort the results. You can sort them by best match, quality, or poly count. Best match will show you the models that are most relevant to your query. Quality will show you the models that have the highest ratings or CheckMate certification. Poly count will show you the models that have the lowest or highest number of polygons.</p>
|
71 |
-
<h4>Check the license, format, and details of each model</h4>
|
72 |
-
<p>The final step is to check the license, format, and details of each model before downloading it. You can click on the thumbnail of each model to see more information about it. You will see a page that shows you the preview images, description, specifications, reviews, and related models of the model. You will also see a section that shows you the license, format, and download options of the model.</p>
|
73 |
-
<p>The license tells you how you can use the model in your project. Some models are royalty-free, which means you can use them for any purpose without paying anything. Some models are editorial-only, which means you can only use them for non-commercial purposes such as news or education. Some models have custom licenses, which means you have to read and follow the terms and conditions of the seller.</p>
|
74 |
-
<p>The format tells you what file types are available for the model. Some models have multiple formats, such as OBJ, FBX, 3DS, or STL. Some models have only one format, such as MAX or BLEND. You should choose the format that is compatible with your software or application.</p>
|
75 |
-
<p>The download options tell you how you can get the model on your device. Some models have direct download links, which means you can download them instantly by clicking on them. Some models have email delivery links, which means you have to enter your email address and wait for the link to be sent to you.</p>
|
76 |
-
<h4>Download the model and use it in your project</h4>
|
77 |
-
<p>Once you have checked everything and found the model that suits your needs, you can download it and use it in your project. You should always respect the license and credit the seller if required. You should also check the quality and compatibility of the model before using it in your project. You may need to adjust some settings or parameters to make it look better or fit better in your scene.</p>
|
78 |
-
<h2>Other Websites to Download Free Orange Tree 3D Models</h2>
|
79 |
-
<h3>Sketchfab: A Platform for 3D and VR Content</h3>
|
80 |
-
<h4>How to find and download free orange tree 3D models on Sketchfab?</h4>
|
81 |
-
<p>Sketchfab is another popular platform for 3D and VR content. It was founded in 2012 and has over four million models in its library. Sketchfab allows anyone to upload, view, share, and download 3D models for various purposes. It also has a section dedicated to free 3D models that anyone can download and use without paying anything.</p>
|
82 |
-
<p>Finding and downloading free orange tree 3D models on Sketchfab is similar to TurboSquid. Here are the steps you need to follow:</p>
|
83 |
-
- Go to the <a href="">free 3D models section</a> of Sketchfab. - Type "orange tree" in the search bar and hit enter. - Use the filters on the left side of the page to narrow down your results by category, license, format, poly count, or tags. - Click on the thumbnail of each model to see more information about it. - Check the license, format, details, and preview of each model before downloading it. - Click on the download button on the bottom right corner of each model page. - Choose the format that is compatible with your software or application. - Download the model and use it in your project. <h3>CGTrader: A Marketplace for 3D Assets</h3>
|
84 |
-
<h4>How to find and download free orange tree 3D models on CGTrader?</h4>
|
85 |
-
<p>CGTrader is another marketplace for 3D assets. It was founded in 2011 and has over one million models in its catalog. CGTrader allows anyone to buy or sell 3D models for various purposes. It also has a section dedicated to free 3D models that anyone can download and use without paying anything.</p>
|
86 |
-
<p>Finding and downloading free orange tree 3D models on CGTrader is similar to TurboSquid and Sketchfab. Here are the steps you need to follow:</p>
|
87 |
-
- Go to the <a href="">free 3D models section</a> of CGTrader. - Type "orange tree" in the search bar and hit enter. - Use the filters on the left side of the page to narrow down your results by category, license, format, poly count, or tags. - Click on the thumbnail of each model to see more information about it. - Check the license, format, details, and preview of each model before downloading it. - Click on the download button on the bottom right corner of each model page. - Choose the format that is compatible with your software or application. - Download the model and use it in your project. <h2>Conclusion</h2>
|
88 |
-
<h3>Summary of the main points</h3>
|
89 |
-
<p>In this article, we have shown you how to find and download free orange tree 3D models from some of the most popular websites on the web. We have explained what orange tree 3D models are and why they are useful. We have also given you a step-by-step guide on how to use TurboSquid, Sketchfab, and CGTrader to search for, filter, check, and download free orange tree 3D models for your project.</p>
|
90 |
-
<h3>Call to action and final remarks</h3>
|
91 |
-
<p>We hope you have found this article helpful and informative. If you are looking for realistic and high-quality orange tree 3D models for your project, you can save time and money by downloading them for free from these websites. You can also explore other types of 3D models that are available for free or for a reasonable price.</p>
|
92 |
-
<p>If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you and help you with your 3D modeling needs. Thank you for reading and happy downloading!</p>
|
93 |
-
<h2>FAQs</h2>
|
94 |
-
<h4>What are the benefits of using free orange tree 3D models?</h4>
|
95 |
-
<p>Some of the benefits of using free orange tree 3D models are:</p>
|
96 |
-
<ul>
|
97 |
-
<li>You can save time and money by not having to create or buy your own models.</li>
|
98 |
-
<li>You can enhance the realism and quality of your project by using models that are made by professional 3D artists.</li>
|
99 |
-
<li>You can learn from the models by studying their structure, texture, lighting, and animation.</li>
|
100 |
-
<li>You can support the 3D community by giving credit and feedback to the creators of the models.</li>
|
101 |
-
</ul>
|
102 |
-
<h4>What are the drawbacks of using free orange tree 3D models?</h4>
|
103 |
-
<p>Some of the drawbacks of using free orange tree 3D models are:</p>
|
104 |
-
<ul>
|
105 |
-
<li>You may not find the exact model that matches your vision or requirements.</li>
|
106 |
-
<li>You may have to deal with compatibility issues or errors when importing or exporting the models.</li>
|
107 |
-
<li>You may have to follow certain restrictions or limitations when using the models in your project.</li>
|
108 |
-
<li>You may have to compete with other users who are using the same models in their projects.</li>
|
109 |
-
</ul>
|
110 |
-
<h4>How can I improve the quality and performance of free orange tree 3D models?</h4>
|
111 |
-
<p>Some of the ways you can improve the quality and performance of free orange tree 3D models are:</p>
|
112 |
-
<ul>
|
113 |
-
<li>You can optimize the poly count, texture size, and level of detail of the models to reduce the load on your system.</li>
|
114 |
-
<li>You can adjust the lighting, shading, and rendering settings of your software or application to enhance the appearance of the models.</li>
|
115 |
-
<li>You can modify or customize the models to suit your needs or preferences.</li>
|
116 |
-
<li>You can combine or blend different models to create unique and diverse variations.</li>
|
117 |
-
</ul>
|
118 |
-
<h4>How can I avoid plagiarism or infringement when using free orange tree 3D models?</h4>
|
119 |
-
<p>Some of the ways you can avoid plagiarism or infringement when using free orange tree 3D models are:</p>
|
120 |
-
<ul>
|
121 |
-
<li>You can always check the license and terms of use of each model before downloading and using it in your project.</li>
|
122 |
-
<li>You can always give proper credit and attribution to the original creator or source of the model.</li>
|
123 |
-
<li>You can always use the model for the intended purpose and not for any illegal or unethical activities.</li>
|
124 |
-
<li>You can always respect the rights and reputation of the creator and other users of the model.</li>
|
125 |
-
</ul>
|
126 |
-
<h4>What are some tips and tricks for finding and downloading free orange tree 3D models?</h4>
|
127 |
-
<p>Some of the tips and tricks for finding and downloading free orange tree 3D models are:</p>
|
128 |
-
<ul>
|
129 |
-
<li>You can use specific keywords, phrases, or tags to narrow down your search results.</li>
|
130 |
-
<li>You can use advanced filters, such as category, license, format, poly count, or tags, to refine your search results.</li>
|
131 |
-
<li>You can use collections, favorites, or bookmarks to save and organize the models that you like or want to use later.</li>
|
132 |
-
<li>You can use ratings, reviews, or comments to evaluate the quality and popularity of the models.</li>
|
133 |
-
<li>You can use previews, screenshots, or videos to see how the models look and behave in different situations.</li>
|
134 |
-
</ul></p> 401be4b1e0<br />
|
135 |
-
<br />
|
136 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/paint_by_example/pipeline_paint_by_example.py
DELETED
@@ -1,536 +0,0 @@
|
|
1 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from typing import Callable, List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import paddle
|
20 |
-
import PIL
|
21 |
-
|
22 |
-
from paddlenlp.transformers import CLIPFeatureExtractor
|
23 |
-
|
24 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
25 |
-
from ...pipeline_utils import DiffusionPipeline
|
26 |
-
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
27 |
-
from ...utils import logging
|
28 |
-
from ..stable_diffusion import StableDiffusionPipelineOutput
|
29 |
-
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
30 |
-
from .image_encoder import PaintByExampleImageEncoder
|
31 |
-
|
32 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
33 |
-
|
34 |
-
|
35 |
-
def prepare_mask_and_masked_image(image, mask):
|
36 |
-
"""
|
37 |
-
Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be
|
38 |
-
converted to ``paddle.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
|
39 |
-
``image`` and ``1`` for the ``mask``.
|
40 |
-
|
41 |
-
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
|
42 |
-
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
|
43 |
-
|
44 |
-
Args:
|
45 |
-
image (Union[np.array, PIL.Image, paddle.Tensor]): The image to inpaint.
|
46 |
-
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
|
47 |
-
``paddle.Tensor`` or a ``batch x channels x height x width`` ``paddle.Tensor``.
|
48 |
-
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
|
49 |
-
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
|
50 |
-
``paddle.Tensor`` or a ``batch x 1 x height x width`` ``paddle.Tensor``.
|
51 |
-
|
52 |
-
|
53 |
-
Raises:
|
54 |
-
ValueError: ``paddle.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``paddle.Tensor`` mask
|
55 |
-
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
|
56 |
-
TypeError: ``mask`` is a ``paddle.Tensor`` but ``image`` is not
|
57 |
-
(ot the other way around).
|
58 |
-
|
59 |
-
Returns:
|
60 |
-
tuple[paddle.Tensor]: The pair (mask, masked_image) as ``paddle.Tensor`` with 4
|
61 |
-
dimensions: ``batch x channels x height x width``.
|
62 |
-
"""
|
63 |
-
if isinstance(image, paddle.Tensor):
|
64 |
-
if not isinstance(mask, paddle.Tensor):
|
65 |
-
raise TypeError(f"`image` is a paddle.Tensor but `mask` (type: {type(mask)} is not")
|
66 |
-
|
67 |
-
# Batch single image
|
68 |
-
if image.ndim == 3:
|
69 |
-
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
|
70 |
-
image = image.unsqueeze(0)
|
71 |
-
|
72 |
-
# Batch and add channel dim for single mask
|
73 |
-
if mask.ndim == 2:
|
74 |
-
mask = mask.unsqueeze(0).unsqueeze(0)
|
75 |
-
|
76 |
-
# Batch single mask or add channel dim
|
77 |
-
if mask.ndim == 3:
|
78 |
-
# Batched mask
|
79 |
-
if mask.shape[0] == image.shape[0]:
|
80 |
-
mask = mask.unsqueeze(1)
|
81 |
-
else:
|
82 |
-
mask = mask.unsqueeze(0)
|
83 |
-
|
84 |
-
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
|
85 |
-
assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
|
86 |
-
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
|
87 |
-
assert mask.shape[1] == 1, "Mask image must have a single channel"
|
88 |
-
|
89 |
-
# Check image is in [-1, 1]
|
90 |
-
if image.min() < -1 or image.max() > 1:
|
91 |
-
raise ValueError("Image should be in [-1, 1] range")
|
92 |
-
|
93 |
-
# Check mask is in [0, 1]
|
94 |
-
if mask.min() < 0 or mask.max() > 1:
|
95 |
-
raise ValueError("Mask should be in [0, 1] range")
|
96 |
-
|
97 |
-
# paint-by-example inverses the mask
|
98 |
-
mask = 1 - mask
|
99 |
-
|
100 |
-
# Binarize mask
|
101 |
-
mask[mask < 0.5] = 0
|
102 |
-
mask[mask >= 0.5] = 1
|
103 |
-
|
104 |
-
# Image as float32
|
105 |
-
image = image.cast(paddle.float32)
|
106 |
-
elif isinstance(mask, paddle.Tensor):
|
107 |
-
raise TypeError(f"`mask` is a paddle.Tensor but `image` (type: {type(image)} is not")
|
108 |
-
else:
|
109 |
-
if isinstance(image, PIL.Image.Image):
|
110 |
-
image = [image]
|
111 |
-
|
112 |
-
image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0)
|
113 |
-
image = image.transpose(0, 3, 1, 2)
|
114 |
-
image = paddle.to_tensor(image).cast(paddle.float32) / 127.5 - 1.0
|
115 |
-
|
116 |
-
# preprocess mask
|
117 |
-
if isinstance(mask, PIL.Image.Image):
|
118 |
-
mask = [mask]
|
119 |
-
|
120 |
-
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
|
121 |
-
mask = mask.astype(np.float32) / 255.0
|
122 |
-
|
123 |
-
# paint-by-example inverses the mask
|
124 |
-
mask = 1 - mask
|
125 |
-
|
126 |
-
mask[mask < 0.5] = 0
|
127 |
-
mask[mask >= 0.5] = 1
|
128 |
-
mask = paddle.to_tensor(mask)
|
129 |
-
|
130 |
-
masked_image = image * mask
|
131 |
-
|
132 |
-
return mask, masked_image
|
133 |
-
|
134 |
-
|
135 |
-
class PaintByExamplePipeline(DiffusionPipeline):
|
136 |
-
r"""
|
137 |
-
Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
|
138 |
-
|
139 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
140 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
141 |
-
|
142 |
-
Args:
|
143 |
-
vae ([`AutoencoderKL`]):
|
144 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
145 |
-
text_encoder ([`CLIPTextModel`]):
|
146 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
147 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
148 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
149 |
-
tokenizer (`CLIPTokenizer`):
|
150 |
-
Tokenizer of class
|
151 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
152 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
153 |
-
scheduler ([`SchedulerMixin`]):
|
154 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
155 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
156 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
157 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
158 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
159 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
160 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
161 |
-
"""
|
162 |
-
_optional_components = ["safety_checker"]
|
163 |
-
|
164 |
-
def __init__(
|
165 |
-
self,
|
166 |
-
vae: AutoencoderKL,
|
167 |
-
image_encoder: PaintByExampleImageEncoder,
|
168 |
-
unet: UNet2DConditionModel,
|
169 |
-
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
170 |
-
safety_checker: StableDiffusionSafetyChecker,
|
171 |
-
feature_extractor: CLIPFeatureExtractor,
|
172 |
-
requires_safety_checker: bool = False,
|
173 |
-
):
|
174 |
-
super().__init__()
|
175 |
-
|
176 |
-
self.register_modules(
|
177 |
-
vae=vae,
|
178 |
-
image_encoder=image_encoder,
|
179 |
-
unet=unet,
|
180 |
-
scheduler=scheduler,
|
181 |
-
safety_checker=safety_checker,
|
182 |
-
feature_extractor=feature_extractor,
|
183 |
-
)
|
184 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
185 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
186 |
-
|
187 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
188 |
-
def run_safety_checker(self, image, dtype):
|
189 |
-
if self.safety_checker is not None:
|
190 |
-
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
|
191 |
-
image, has_nsfw_concept = self.safety_checker(
|
192 |
-
images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
|
193 |
-
)
|
194 |
-
else:
|
195 |
-
has_nsfw_concept = None
|
196 |
-
return image, has_nsfw_concept
|
197 |
-
|
198 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
199 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
200 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
201 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
202 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
203 |
-
# and should be between [0, 1]
|
204 |
-
|
205 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
206 |
-
extra_step_kwargs = {}
|
207 |
-
if accepts_eta:
|
208 |
-
extra_step_kwargs["eta"] = eta
|
209 |
-
|
210 |
-
# check if the scheduler accepts generator
|
211 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
212 |
-
if accepts_generator:
|
213 |
-
extra_step_kwargs["generator"] = generator
|
214 |
-
return extra_step_kwargs
|
215 |
-
|
216 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
217 |
-
def decode_latents(self, latents):
|
218 |
-
latents = 1 / 0.18215 * latents
|
219 |
-
image = self.vae.decode(latents).sample
|
220 |
-
image = (image / 2 + 0.5).clip(0, 1)
|
221 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
222 |
-
image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
|
223 |
-
return image
|
224 |
-
|
225 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs
|
226 |
-
def check_inputs(self, image, height, width, callback_steps):
|
227 |
-
if (
|
228 |
-
not isinstance(image, paddle.Tensor)
|
229 |
-
and not isinstance(image, PIL.Image.Image)
|
230 |
-
and not isinstance(image, list)
|
231 |
-
):
|
232 |
-
raise ValueError(
|
233 |
-
"`image` has to be of type `paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
|
234 |
-
f" {type(image)}"
|
235 |
-
)
|
236 |
-
|
237 |
-
if height % 8 != 0 or width % 8 != 0:
|
238 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
239 |
-
|
240 |
-
if (callback_steps is None) or (
|
241 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
242 |
-
):
|
243 |
-
raise ValueError(
|
244 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
245 |
-
f" {type(callback_steps)}."
|
246 |
-
)
|
247 |
-
|
248 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
249 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None):
|
250 |
-
shape = [batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor]
|
251 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
252 |
-
raise ValueError(
|
253 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
254 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
255 |
-
)
|
256 |
-
|
257 |
-
if latents is None:
|
258 |
-
if isinstance(generator, list):
|
259 |
-
shape = [
|
260 |
-
1,
|
261 |
-
] + shape[1:]
|
262 |
-
latents = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
|
263 |
-
latents = paddle.concat(latents, axis=0)
|
264 |
-
else:
|
265 |
-
latents = paddle.randn(shape, generator=generator, dtype=dtype)
|
266 |
-
else:
|
267 |
-
if latents.shape != shape:
|
268 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
269 |
-
|
270 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
271 |
-
latents = latents * self.scheduler.init_noise_sigma
|
272 |
-
return latents
|
273 |
-
|
274 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents
|
275 |
-
def prepare_mask_latents(
|
276 |
-
self, mask, masked_image, batch_size, height, width, dtype, generator, do_classifier_free_guidance
|
277 |
-
):
|
278 |
-
# resize the mask to latents shape as we concatenate the mask to the latents
|
279 |
-
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
280 |
-
# and half precision
|
281 |
-
mask = paddle.nn.functional.interpolate(
|
282 |
-
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
|
283 |
-
)
|
284 |
-
mask = mask.cast(dtype)
|
285 |
-
|
286 |
-
masked_image = masked_image.cast(dtype)
|
287 |
-
|
288 |
-
# encode the mask image into latents space so we can concatenate it to the latents
|
289 |
-
if isinstance(generator, list):
|
290 |
-
masked_image_latents = [
|
291 |
-
self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
|
292 |
-
for i in range(batch_size)
|
293 |
-
]
|
294 |
-
masked_image_latents = paddle.concat(masked_image_latents, axis=0)
|
295 |
-
else:
|
296 |
-
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
|
297 |
-
masked_image_latents = 0.18215 * masked_image_latents
|
298 |
-
|
299 |
-
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
300 |
-
if mask.shape[0] < batch_size:
|
301 |
-
if not batch_size % mask.shape[0] == 0:
|
302 |
-
raise ValueError(
|
303 |
-
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
304 |
-
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
305 |
-
" of masks that you pass is divisible by the total requested batch size."
|
306 |
-
)
|
307 |
-
mask = mask.tile([batch_size // mask.shape[0], 1, 1, 1])
|
308 |
-
if masked_image_latents.shape[0] < batch_size:
|
309 |
-
if not batch_size % masked_image_latents.shape[0] == 0:
|
310 |
-
raise ValueError(
|
311 |
-
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
312 |
-
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
313 |
-
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
314 |
-
)
|
315 |
-
masked_image_latents = masked_image_latents.tile([batch_size // masked_image_latents.shape[0], 1, 1, 1])
|
316 |
-
|
317 |
-
mask = paddle.concat([mask] * 2) if do_classifier_free_guidance else mask
|
318 |
-
masked_image_latents = (
|
319 |
-
paddle.concat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
320 |
-
)
|
321 |
-
|
322 |
-
# aligning device to prevent device errors when concating it with the latent model input
|
323 |
-
masked_image_latents = masked_image_latents.cast(dtype)
|
324 |
-
return mask, masked_image_latents
|
325 |
-
|
326 |
-
def _encode_image(self, image, num_images_per_prompt, do_classifier_free_guidance):
|
327 |
-
# dtype = self.image_encoder.dtype
|
328 |
-
|
329 |
-
if not isinstance(image, paddle.Tensor):
|
330 |
-
image = self.feature_extractor(images=image, return_tensors="pd").pixel_values
|
331 |
-
|
332 |
-
# image = image.cast(dtype)
|
333 |
-
image_embeddings = self.image_encoder(image)
|
334 |
-
|
335 |
-
# duplicate image embeddings for each generation per prompt, using mps friendly method
|
336 |
-
bs_embed, seq_len, _ = image_embeddings.shape
|
337 |
-
image_embeddings = image_embeddings.tile([1, num_images_per_prompt, 1])
|
338 |
-
image_embeddings = image_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
|
339 |
-
|
340 |
-
if do_classifier_free_guidance:
|
341 |
-
uncond_embeddings = self.image_encoder.uncond_vector
|
342 |
-
uncond_embeddings = uncond_embeddings.tile([1, image_embeddings.shape[0], 1])
|
343 |
-
uncond_embeddings = uncond_embeddings.reshape([bs_embed * num_images_per_prompt, 1, -1])
|
344 |
-
|
345 |
-
# For classifier free guidance, we need to do two forward passes.
|
346 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
347 |
-
# to avoid doing two forward passes
|
348 |
-
image_embeddings = paddle.concat([uncond_embeddings, image_embeddings])
|
349 |
-
|
350 |
-
return image_embeddings
|
351 |
-
|
352 |
-
@paddle.no_grad()
|
353 |
-
def __call__(
|
354 |
-
self,
|
355 |
-
example_image: Union[paddle.Tensor, PIL.Image.Image],
|
356 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
357 |
-
mask_image: Union[paddle.Tensor, PIL.Image.Image],
|
358 |
-
height: Optional[int] = None,
|
359 |
-
width: Optional[int] = None,
|
360 |
-
num_inference_steps: int = 50,
|
361 |
-
guidance_scale: float = 5.0,
|
362 |
-
num_images_per_prompt: Optional[int] = 1,
|
363 |
-
eta: float = 0.0,
|
364 |
-
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
|
365 |
-
latents: Optional[paddle.Tensor] = None,
|
366 |
-
output_type: Optional[str] = "pil",
|
367 |
-
return_dict: bool = True,
|
368 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
369 |
-
callback_steps: Optional[int] = 1,
|
370 |
-
):
|
371 |
-
r"""
|
372 |
-
Function invoked when calling the pipeline for generation.
|
373 |
-
|
374 |
-
Args:
|
375 |
-
example_image (`paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
|
376 |
-
The exemplar image to guide the image generation.
|
377 |
-
image (`paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
|
378 |
-
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
379 |
-
be masked out with `mask_image` and repainted according to `prompt`.
|
380 |
-
mask_image (`paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`):
|
381 |
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
382 |
-
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
383 |
-
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
384 |
-
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
385 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
386 |
-
The height in pixels of the generated image.
|
387 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
388 |
-
The width in pixels of the generated image.
|
389 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
390 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
391 |
-
expense of slower inference.
|
392 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
393 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
394 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
395 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
396 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
397 |
-
usually at the expense of lower image quality.
|
398 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
399 |
-
The number of images to generate per prompt.
|
400 |
-
eta (`float`, *optional*, defaults to 0.0):
|
401 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
402 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
403 |
-
generator (`torch.Generator`, *optional*):
|
404 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
405 |
-
to make generation deterministic.
|
406 |
-
latents (`paddle.Tensor`, *optional*):
|
407 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
408 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
409 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
410 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
411 |
-
The output format of the generate image. Choose between
|
412 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
413 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
414 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
415 |
-
plain tuple.
|
416 |
-
callback (`Callable`, *optional*):
|
417 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
418 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
419 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
420 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
421 |
-
called at every step.
|
422 |
-
|
423 |
-
Returns:
|
424 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
425 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
426 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
427 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
428 |
-
(nsfw) content, according to the `safety_checker`.
|
429 |
-
"""
|
430 |
-
# 1. Define call parameters
|
431 |
-
if isinstance(image, PIL.Image.Image):
|
432 |
-
batch_size = 1
|
433 |
-
elif isinstance(image, list):
|
434 |
-
batch_size = len(image)
|
435 |
-
else:
|
436 |
-
batch_size = image.shape[0]
|
437 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
438 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
439 |
-
# corresponds to doing no classifier free guidance.
|
440 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
441 |
-
|
442 |
-
# 2. Preprocess mask and image
|
443 |
-
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
|
444 |
-
height, width = masked_image.shape[-2:]
|
445 |
-
|
446 |
-
# 3. Check inputs
|
447 |
-
self.check_inputs(example_image, height, width, callback_steps)
|
448 |
-
|
449 |
-
# 4. Encode input image
|
450 |
-
image_embeddings = self._encode_image(example_image, num_images_per_prompt, do_classifier_free_guidance)
|
451 |
-
|
452 |
-
# 5. set timesteps
|
453 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
454 |
-
timesteps = self.scheduler.timesteps
|
455 |
-
|
456 |
-
# 6. Prepare latent variables
|
457 |
-
num_channels_latents = self.vae.config.latent_channels
|
458 |
-
latents = self.prepare_latents(
|
459 |
-
batch_size * num_images_per_prompt,
|
460 |
-
num_channels_latents,
|
461 |
-
height,
|
462 |
-
width,
|
463 |
-
image_embeddings.dtype,
|
464 |
-
generator,
|
465 |
-
latents,
|
466 |
-
)
|
467 |
-
|
468 |
-
# 7. Prepare mask latent variables
|
469 |
-
mask, masked_image_latents = self.prepare_mask_latents(
|
470 |
-
mask,
|
471 |
-
masked_image,
|
472 |
-
batch_size * num_images_per_prompt,
|
473 |
-
height,
|
474 |
-
width,
|
475 |
-
image_embeddings.dtype,
|
476 |
-
generator,
|
477 |
-
do_classifier_free_guidance,
|
478 |
-
)
|
479 |
-
|
480 |
-
# 8. Check that sizes of mask, masked image and latents match
|
481 |
-
num_channels_mask = mask.shape[1]
|
482 |
-
num_channels_masked_image = masked_image_latents.shape[1]
|
483 |
-
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
484 |
-
raise ValueError(
|
485 |
-
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
486 |
-
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
487 |
-
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
488 |
-
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
|
489 |
-
" `pipeline.unet` or your `mask_image` or `image` input."
|
490 |
-
)
|
491 |
-
|
492 |
-
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
493 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
494 |
-
|
495 |
-
# 10. Denoising loop
|
496 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
497 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
498 |
-
for i, t in enumerate(timesteps):
|
499 |
-
# expand the latents if we are doing classifier free guidance
|
500 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
501 |
-
|
502 |
-
# concat latents, mask, masked_image_latents in the channel dimension
|
503 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
504 |
-
latent_model_input = paddle.concat([latent_model_input, masked_image_latents, mask], axis=1)
|
505 |
-
|
506 |
-
# predict the noise residual
|
507 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample
|
508 |
-
|
509 |
-
# perform guidance
|
510 |
-
if do_classifier_free_guidance:
|
511 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
512 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
513 |
-
|
514 |
-
# compute the previous noisy sample x_t -> x_t-1
|
515 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
516 |
-
|
517 |
-
# call the callback, if provided
|
518 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
519 |
-
progress_bar.update()
|
520 |
-
if callback is not None and i % callback_steps == 0:
|
521 |
-
callback(i, t, latents)
|
522 |
-
|
523 |
-
# 11. Post-processing
|
524 |
-
image = self.decode_latents(latents)
|
525 |
-
|
526 |
-
# 12. Run safety checker
|
527 |
-
image, has_nsfw_concept = self.run_safety_checker(image, image_embeddings.dtype)
|
528 |
-
|
529 |
-
# 13. Convert to PIL
|
530 |
-
if output_type == "pil":
|
531 |
-
image = self.numpy_to_pil(image)
|
532 |
-
|
533 |
-
if not return_dict:
|
534 |
-
return (image, has_nsfw_concept)
|
535 |
-
|
536 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 🕸️📈Graph NLP Matplotlib NetworkX Streamlit PyViz Graphviz🩺
|
3 |
-
emoji: 📉🕸️📈
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.2.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/docs/training_tips_ko.md
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
RVC 훈련에 대한 설명과 팁들
|
2 |
-
======================================
|
3 |
-
본 팁에서는 어떻게 데이터 훈련이 이루어지고 있는지 설명합니다.
|
4 |
-
|
5 |
-
# 훈련의 흐름
|
6 |
-
GUI의 훈련 탭의 단계를 따라 설명합니다.
|
7 |
-
|
8 |
-
## step1
|
9 |
-
실험 이름을 지정합니다. 또한, 모델이 피치(소리의 높낮이)를 고려해야 하는지 여부를 여기에서 설정할 수도 있습니다..
|
10 |
-
각 실험을 위한 데이터는 `/logs/experiment name/`에 배치됩니다..
|
11 |
-
|
12 |
-
## step2a
|
13 |
-
음성 파일을 불러오고 전처리합니다.
|
14 |
-
|
15 |
-
### 음성 파일 불러오기
|
16 |
-
음성 파일이 있는 폴더를 지정하면 해당 폴더에 있는 음성 파일이 자동으로 가져와집니다.
|
17 |
-
예를 들어 `C:Users\hoge\voices`를 지정하면 `C:Users\hoge\voices\voice.mp3`가 읽히지만 `C:Users\hoge\voices\dir\voice.mp3`는 읽히지 않습니다.
|
18 |
-
|
19 |
-
음성 로드에는 내부적으로 ffmpeg를 이용하고 있으므로, ffmpeg로 대응하고 있는 확장자라면 자동적으로 읽힙니다.
|
20 |
-
ffmpeg에서 int16으로 변환한 후 float32로 변환하고 -1과 1 사이에 정규화됩니다.
|
21 |
-
|
22 |
-
### 잡음 제거
|
23 |
-
음성 파일에 대해 scipy의 filtfilt를 이용하여 잡음을 처리합니다.
|
24 |
-
|
25 |
-
### 음성 분할
|
26 |
-
입력한 음성 파일은 먼저 일정 기간(max_sil_kept=5초?)보다 길게 무음이 지속되는 부분을 감지하여 음성을 분할합니다.무음으로 음성을 분할한 후에는 0.3초의 overlap을 포함하여 4초마다 음성을 분할합니다.4초 이내에 구분된 음성은 음량의 정규화를 실시한 후 wav 파일을 `/logs/실험명/0_gt_wavs`로, 거기에서 16k의 샘플링 레이트로 변환해 `/logs/실험명/1_16k_wavs`에 wav 파일로 저장합니다.
|
27 |
-
|
28 |
-
## step2b
|
29 |
-
### 피치 추출
|
30 |
-
wav 파일에서 피치(소리의 높낮이) 정보를 추출합니다. parselmouth나 pyworld에 내장되어 있는 메서드으로 피치 정보(=f0)를 추출해, `/logs/실험명/2a_f0`에 저장합니다. 그 후 피치 정보를 로그로 변환하여 1~255 정수로 변환하고 `/logs/실험명/2b-f0nsf`에 저장합니다.
|
31 |
-
|
32 |
-
### feature_print 추출
|
33 |
-
HuBERT를 이용하여 wav 파일을 미리 embedding으로 변환합니다. `/logs/실험명/1_16k_wavs`에 저장한 wav 파일을 읽고 HuBERT에서 wav 파일을 256차원 feature들로 변환한 후 npy 형식으로 `/logs/실험명/3_feature256`에 저장합니다.
|
34 |
-
|
35 |
-
## step3
|
36 |
-
모델의 훈련을 진행합니다.
|
37 |
-
|
38 |
-
### 초보자용 용어 해설
|
39 |
-
심층학습(딥러닝)에서는 데이터셋을 분할하여 조금씩 학습을 진행합니다.한 번의 모델 업데이트(step) 단계 당 batch_size개의 데이터를 탐색하여 예측과 오차를 수정합니다. 데이터셋 전부에 대해 이 작업을 한 번 수행하는 이를 하나의 epoch라고 계산합니다.
|
40 |
-
|
41 |
-
따라서 학습 시간은 단계당 학습 시간 x (데이터셋 내 데이터의 수 / batch size) x epoch 수가 소요됩니다. 일반적으로 batch size가 클수록 학습이 안정적이게 됩니다. (step당 학습 시간 ÷ batch size)는 작아지지만 GPU 메모리를 더 많이 사용합니다. GPU RAM은 nvidia-smi 명령어를 통해 확인할 수 있습니다. 실행 환경에 따라 배치 크기를 최대한 늘리면 짧은 시간 내에 학습이 가능합니다.
|
42 |
-
|
43 |
-
### 사전 학습된 모델 지정
|
44 |
-
RVC는 적은 데이터셋으로도 훈련이 가능하도록 사전 훈련된 가중치에서 모델 훈련을 시작합니다. 기본적으로 `rvc-location/pretrained/f0G40k.pth` 및 `rvc-location/pretrained/f0D40k.pth`를 불러옵니다. 학습을 할 시에, 모델 파라미터는 각 save_every_epoch별로 `logs/experiment name/G_{}.pth` 와 `logs/experiment name/D_{}.pth`로 저장이 되는데, 이 경로를 지정함으로써 학습을 재개하거나, 다른 실험에서 학습한 모델의 가중치에서 학습을 시작할 수 있습니다.
|
45 |
-
|
46 |
-
### index의 학습
|
47 |
-
RVC에서는 학습시에 사용된 HuBERT의 feature값을 저장하고, 추론 시에는 학습 시 사용한 feature값과 유사한 feature 값을 탐색해 추론을 진행합니다. 이 탐색을 고속으로 수행하기 위해 사전에 index을 학습하게 됩니다.
|
48 |
-
Index 학습에는 근사 근접 탐색법 라이브러리인 Faiss를 사용하게 됩니다. `/logs/실험명/3_feature256`의 feature값을 불러와, 이를 모두 결합시킨 feature값을 `/logs/실험명/total_fea.npy`로서 저장, 그것을 사용해 학습한 index를`/logs/실험명/add_XXX.index`로 저장합니다.
|
49 |
-
|
50 |
-
### 버튼 설명
|
51 |
-
- モデルのトレーニング (모델 학습): step2b까지 실행한 후, 이 버튼을 눌러 모델을 학습합니다.
|
52 |
-
- 特徴インデックスのトレーニング (특징 지수 훈련): 모델의 훈련 후, index를 학습합니다.
|
53 |
-
- ワンクリックトレーニング (원클릭 트레이닝): step2b까지의 모델 훈련, feature index 훈련을 일괄로 실시합니다.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/zero_shot.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
# NOTE: This script is currently not supported for CLAP.
|
2 |
-
import logging
|
3 |
-
from contextlib import suppress
|
4 |
-
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
from open_clip import tokenize
|
10 |
-
from .imagenet_zeroshot_data import imagenet_classnames, openai_imagenet_template
|
11 |
-
|
12 |
-
|
13 |
-
def zero_shot_classifier(model, classnames, templates, args):
|
14 |
-
with torch.no_grad():
|
15 |
-
zeroshot_weights = []
|
16 |
-
for classname in tqdm(classnames):
|
17 |
-
texts = [template(classname) for template in templates] # format with class
|
18 |
-
texts = tokenize(texts).to(args.device) # tokenize
|
19 |
-
if args.distributed and not args.horovod:
|
20 |
-
class_embeddings = model.module.encode_text(texts)
|
21 |
-
else:
|
22 |
-
class_embeddings = model.encode_text(texts)
|
23 |
-
class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
|
24 |
-
class_embedding /= class_embedding.norm()
|
25 |
-
zeroshot_weights.append(class_embedding)
|
26 |
-
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device)
|
27 |
-
return zeroshot_weights
|
28 |
-
|
29 |
-
|
30 |
-
def accuracy(output, target, topk=(1,)):
|
31 |
-
pred = output.topk(max(topk), 1, True, True)[1].t()
|
32 |
-
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
33 |
-
return [
|
34 |
-
float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy())
|
35 |
-
for k in topk
|
36 |
-
]
|
37 |
-
|
38 |
-
|
39 |
-
def run(model, classifier, dataloader, args):
|
40 |
-
autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress
|
41 |
-
with torch.no_grad():
|
42 |
-
top1, top5, n = 0.0, 0.0, 0.0
|
43 |
-
for images, target in tqdm(dataloader, unit_scale=args.batch_size):
|
44 |
-
images = images.to(args.device)
|
45 |
-
target = target.to(args.device)
|
46 |
-
|
47 |
-
with autocast():
|
48 |
-
# predict
|
49 |
-
if args.distributed and not args.horovod:
|
50 |
-
image_features = model.module.encode_image(images)
|
51 |
-
else:
|
52 |
-
image_features = model.encode_image(images)
|
53 |
-
image_features = F.normalize(image_features, dim=-1)
|
54 |
-
logits = 100.0 * image_features @ classifier
|
55 |
-
|
56 |
-
# measure accuracy
|
57 |
-
acc1, acc5 = accuracy(logits, target, topk=(1, 5))
|
58 |
-
top1 += acc1
|
59 |
-
top5 += acc5
|
60 |
-
n += images.size(0)
|
61 |
-
|
62 |
-
top1 = top1 / n
|
63 |
-
top5 = top5 / n
|
64 |
-
return top1, top5
|
65 |
-
|
66 |
-
|
67 |
-
def zero_shot_eval(model, data, epoch, args):
|
68 |
-
if "imagenet-val" not in data and "imagenet-v2" not in data:
|
69 |
-
return {}
|
70 |
-
if args.zeroshot_frequency == 0:
|
71 |
-
return {}
|
72 |
-
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
|
73 |
-
return {}
|
74 |
-
|
75 |
-
logging.info("Starting zero-shot imagenet.")
|
76 |
-
|
77 |
-
logging.info("Building zero-shot classifier")
|
78 |
-
classifier = zero_shot_classifier(
|
79 |
-
model, imagenet_classnames, openai_imagenet_template, args
|
80 |
-
)
|
81 |
-
|
82 |
-
logging.info("Using classifier")
|
83 |
-
results = {}
|
84 |
-
if "imagenet-val" in data:
|
85 |
-
top1, top5 = run(model, classifier, data["imagenet-val"].dataloader, args)
|
86 |
-
results["imagenet-zeroshot-val-top1"] = top1
|
87 |
-
results["imagenet-zeroshot-val-top5"] = top5
|
88 |
-
if "imagenet-v2" in data:
|
89 |
-
top1, top5 = run(model, classifier, data["imagenet-v2"].dataloader, args)
|
90 |
-
results["imagenetv2-zeroshot-val-top1"] = top1
|
91 |
-
results["imagenetv2-zeroshot-val-top5"] = top5
|
92 |
-
|
93 |
-
logging.info("Finished zero-shot imagenet.")
|
94 |
-
|
95 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Software_Company/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Software Company
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.44.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Software_Company/src/agents/Component/ToolComponent.py
DELETED
@@ -1,887 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
import uuid
|
3 |
-
from text2vec import semantic_search
|
4 |
-
from utils import (
|
5 |
-
get_relevant_history,
|
6 |
-
load_knowledge_base_qa,
|
7 |
-
load_knowledge_base_UnstructuredFile,
|
8 |
-
get_embedding,
|
9 |
-
extract,
|
10 |
-
)
|
11 |
-
import json
|
12 |
-
from typing import Dict, List
|
13 |
-
import os
|
14 |
-
from googleapiclient.discovery import build
|
15 |
-
import requests
|
16 |
-
from selenium import webdriver
|
17 |
-
from selenium.webdriver.common.by import By
|
18 |
-
from selenium.webdriver.support.ui import WebDriverWait
|
19 |
-
from selenium.webdriver.support import expected_conditions as EC
|
20 |
-
from bs4 import BeautifulSoup
|
21 |
-
import base64
|
22 |
-
import re
|
23 |
-
from datetime import datetime, timedelta
|
24 |
-
from typing import Tuple, List, Any, Dict
|
25 |
-
from email.mime.text import MIMEText
|
26 |
-
from email.mime.multipart import MIMEMultipart
|
27 |
-
from google.auth.transport.requests import Request
|
28 |
-
from google.oauth2.credentials import Credentials
|
29 |
-
from google_auth_oauthlib.flow import InstalledAppFlow
|
30 |
-
from googleapiclient.discovery import build
|
31 |
-
from googleapiclient.errors import HttpError
|
32 |
-
from tqdm import tqdm
|
33 |
-
|
34 |
-
class ToolComponent:
|
35 |
-
def __init__(self):
|
36 |
-
pass
|
37 |
-
|
38 |
-
@abstractmethod
|
39 |
-
def func(self):
|
40 |
-
pass
|
41 |
-
|
42 |
-
class KnowledgeBaseComponent(ToolComponent):
|
43 |
-
"""
|
44 |
-
Inject knowledge base
|
45 |
-
top_k : Top_k with the highest matching degree
|
46 |
-
type : "QA" or others
|
47 |
-
knowledge_base(json_path) : knowledge_base_path
|
48 |
-
"""
|
49 |
-
def __init__(self, top_k, type, knowledge_base):
|
50 |
-
super().__init__()
|
51 |
-
self.top_k = top_k
|
52 |
-
self.type = type
|
53 |
-
self.knowledge_base = knowledge_base
|
54 |
-
|
55 |
-
if self.type == "QA":
|
56 |
-
(
|
57 |
-
self.kb_embeddings,
|
58 |
-
self.kb_questions,
|
59 |
-
self.kb_answers,
|
60 |
-
self.kb_chunks,
|
61 |
-
) = load_knowledge_base_qa(self.knowledge_base)
|
62 |
-
else:
|
63 |
-
self.kb_embeddings, self.kb_chunks = load_knowledge_base_UnstructuredFile(
|
64 |
-
self.knowledge_base
|
65 |
-
)
|
66 |
-
|
67 |
-
def func(self, agent):
|
68 |
-
query = (
|
69 |
-
agent.long_term_memory[-1]["content"]
|
70 |
-
if len(agent.long_term_memory) > 0
|
71 |
-
else ""
|
72 |
-
)
|
73 |
-
knowledge = ""
|
74 |
-
query = extract(query, "query")
|
75 |
-
query_embedding = get_embedding(query)
|
76 |
-
hits = semantic_search(query_embedding, self.kb_embeddings, top_k=50)
|
77 |
-
hits = hits[0]
|
78 |
-
temp = []
|
79 |
-
if self.type == "QA":
|
80 |
-
for hit in hits:
|
81 |
-
matching_idx = hit["corpus_id"]
|
82 |
-
if self.kb_chunks[matching_idx] in temp:
|
83 |
-
pass
|
84 |
-
else:
|
85 |
-
knowledge = (
|
86 |
-
knowledge
|
87 |
-
+ f"question:{self.kb_questions[matching_idx]},answer:{self.kb_answers[matching_idx]}\n\n"
|
88 |
-
)
|
89 |
-
temp.append(self.kb_answers[matching_idx])
|
90 |
-
if len(temp) == 1:
|
91 |
-
break
|
92 |
-
print(hits[0]["score"])
|
93 |
-
score = hits[0]["score"]
|
94 |
-
if score < 0.5:
|
95 |
-
return {"prompt": "No matching knowledge base"}
|
96 |
-
else:
|
97 |
-
return {"prompt": "The relevant content is: " + knowledge + "\n"}
|
98 |
-
else:
|
99 |
-
for hit in hits:
|
100 |
-
matching_idx = hit["corpus_id"]
|
101 |
-
if self.kb_chunks[matching_idx] in temp:
|
102 |
-
pass
|
103 |
-
else:
|
104 |
-
knowledge = knowledge + f"{self.kb_answers[matching_idx]}\n\n"
|
105 |
-
temp.append(self.kb_answers[matching_idx])
|
106 |
-
if len(temp) == self.top_k:
|
107 |
-
break
|
108 |
-
print(hits[0]["score"])
|
109 |
-
score = hits[0]["score"]
|
110 |
-
if score < 0.5:
|
111 |
-
return {"prompt": "No matching knowledge base"}
|
112 |
-
else:
|
113 |
-
print(knowledge)
|
114 |
-
return {"prompt": "The relevant content is: " + knowledge + "\n"}
|
115 |
-
|
116 |
-
|
117 |
-
class StaticComponent(ToolComponent):
|
118 |
-
"Return static response"
|
119 |
-
def __init__(self, output):
|
120 |
-
super().__init__()
|
121 |
-
self.output = output
|
122 |
-
|
123 |
-
def func(self, agent):
|
124 |
-
outputdict = {"response": self.output}
|
125 |
-
return outputdict
|
126 |
-
|
127 |
-
|
128 |
-
class ExtractComponent(ToolComponent):
|
129 |
-
"""
|
130 |
-
Extract keywords based on the current scene and store them in the environment
|
131 |
-
extract_words(list) : Keywords to be extracted
|
132 |
-
system_prompt & last_prompt : Prompt to extract keywords
|
133 |
-
"""
|
134 |
-
def __init__(
|
135 |
-
self,
|
136 |
-
extract_words,
|
137 |
-
system_prompt,
|
138 |
-
last_prompt=None,
|
139 |
-
):
|
140 |
-
super().__init__()
|
141 |
-
self.extract_words = extract_words
|
142 |
-
self.system_prompt = system_prompt
|
143 |
-
self.default_prompt = (
|
144 |
-
"Please strictly adhere to the following format for outputting:\n"
|
145 |
-
)
|
146 |
-
for extract_word in extract_words:
|
147 |
-
self.default_prompt += (
|
148 |
-
f"<{extract_word}> the content you need to extract </{extract_word}>"
|
149 |
-
)
|
150 |
-
self.last_prompt = last_prompt if last_prompt else self.default_prompt
|
151 |
-
|
152 |
-
def func(self, agent):
|
153 |
-
response = agent.LLM.get_response(
|
154 |
-
agent.long_term_memory,
|
155 |
-
self.system_prompt,
|
156 |
-
self.last_prompt,
|
157 |
-
stream=False,
|
158 |
-
)
|
159 |
-
for extract_word in self.extract_words:
|
160 |
-
key = extract(response, extract_word)
|
161 |
-
key = key if key else response
|
162 |
-
agent.environment.shared_memory[extract_word] = key
|
163 |
-
|
164 |
-
return {}
|
165 |
-
|
166 |
-
|
167 |
-
"""Search sources: chatgpt/search engines/specific search sources/can even be multimodal (if it comes to clothing)"""
|
168 |
-
|
169 |
-
|
170 |
-
class WebSearchComponent(ToolComponent):
|
171 |
-
"""search engines"""
|
172 |
-
|
173 |
-
__ENGINE_NAME__: List = ["google", "bing"]
|
174 |
-
|
175 |
-
def __init__(self, engine_name: str, api: Dict):
|
176 |
-
"""
|
177 |
-
:param engine_name: The name of the search engine used
|
178 |
-
:param api: Pass in a dictionary, such as {"bing":"key1", "google":"key2", ...}, of course each value can also be a list, or more complicated
|
179 |
-
"""
|
180 |
-
super(WebSearchComponent, self).__init__()
|
181 |
-
"""Determine whether the key and engine_name of the api are legal"""
|
182 |
-
|
183 |
-
assert engine_name in WebSearchComponent.__ENGINE_NAME__
|
184 |
-
for api_name in api:
|
185 |
-
assert api_name in WebSearchComponent.__ENGINE_NAME__
|
186 |
-
|
187 |
-
self.api = api
|
188 |
-
self.engine_name = engine_name
|
189 |
-
|
190 |
-
self.search: Dict = {"bing": self._bing_search, "google": self._google_search}
|
191 |
-
|
192 |
-
def _bing_search(self, query: str, **kwargs):
|
193 |
-
"""Initialize search hyperparameters"""
|
194 |
-
subscription_key = self.api["bing"]
|
195 |
-
search_url = "https://api.bing.microsoft.com/v7.0/search"
|
196 |
-
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
|
197 |
-
params = {
|
198 |
-
"q": query,
|
199 |
-
"textDecorations": True,
|
200 |
-
"textFormat": "HTML",
|
201 |
-
"count": 10,
|
202 |
-
}
|
203 |
-
"""start searching"""
|
204 |
-
response = requests.get(search_url, headers=headers, params=params)
|
205 |
-
response.raise_for_status()
|
206 |
-
results = response.json()["webPages"]["value"]
|
207 |
-
"""execute"""
|
208 |
-
metadata_results = []
|
209 |
-
for result in results:
|
210 |
-
metadata_result = {
|
211 |
-
"snippet": result["snippet"],
|
212 |
-
"title": result["name"],
|
213 |
-
"link": result["url"],
|
214 |
-
}
|
215 |
-
metadata_results.append(metadata_result)
|
216 |
-
return {"meta data": metadata_results}
|
217 |
-
|
218 |
-
def _google_search(self, query: str, **kwargs):
|
219 |
-
"""Initialize search hyperparameters"""
|
220 |
-
api_key = self.api[self.engine_name]["api_key"]
|
221 |
-
cse_id = self.api[self.engine_name]["cse_id"]
|
222 |
-
service = build("customsearch", "v1", developerKey=api_key)
|
223 |
-
"""start searching"""
|
224 |
-
results = (
|
225 |
-
service.cse().list(q=query, cx=cse_id, num=10, **kwargs).execute()["items"]
|
226 |
-
)
|
227 |
-
"""execute"""
|
228 |
-
metadata_results = []
|
229 |
-
for result in results:
|
230 |
-
metadata_result = {
|
231 |
-
"snippet": result["snippet"],
|
232 |
-
"title": result["title"],
|
233 |
-
"link": result["link"],
|
234 |
-
}
|
235 |
-
metadata_results.append(metadata_result)
|
236 |
-
return {"meta data": metadata_results}
|
237 |
-
|
238 |
-
def func(self, agent, **kwargs) -> Dict:
|
239 |
-
query = (
|
240 |
-
agent.long_term_memory[-1]["content"]
|
241 |
-
if len(agent.long_term_memory) > 0
|
242 |
-
else " "
|
243 |
-
)
|
244 |
-
response = agent.LLM.get_response(
|
245 |
-
None,
|
246 |
-
system_prompt=f"Please analyze the provided conversation and identify keywords that can be used for a search engine query. Format the output as <keywords>extracted keywords</keywords>:\nConversation:\n{query}",
|
247 |
-
stream=False,
|
248 |
-
)
|
249 |
-
response = extract(response, "keywords")
|
250 |
-
query = response if response else query
|
251 |
-
|
252 |
-
search_results = self.search[self.engine_name](query=query, **kwargs)
|
253 |
-
information = ""
|
254 |
-
for i in search_results["meta data"][:5]:
|
255 |
-
information += i["snippet"]
|
256 |
-
return {
|
257 |
-
"prompt": "You can refer to the following information to reply:\n"
|
258 |
-
+ information
|
259 |
-
}
|
260 |
-
|
261 |
-
def convert_search_engine_to(self, engine_name):
|
262 |
-
assert engine_name in WebSearchComponent.__ENGINE_NAME__
|
263 |
-
self.engine_name = engine_name
|
264 |
-
|
265 |
-
|
266 |
-
class WebCrawlComponent(ToolComponent):
|
267 |
-
"""Open a single web page for crawling"""
|
268 |
-
|
269 |
-
def __init__(self):
|
270 |
-
super(WebCrawlComponent, self).__init__()
|
271 |
-
|
272 |
-
def func(self, agent_dict) -> Dict:
|
273 |
-
url = agent_dict["url"]
|
274 |
-
print(f"crawling {url} ......")
|
275 |
-
content = ""
|
276 |
-
"""Crawling content from url may need to be carried out according to different websites, such as wiki, baidu, zhihu, etc."""
|
277 |
-
driver = webdriver.Chrome()
|
278 |
-
try:
|
279 |
-
"""open url"""
|
280 |
-
driver.get(url)
|
281 |
-
|
282 |
-
"""wait 20 second"""
|
283 |
-
wait = WebDriverWait(driver, 20)
|
284 |
-
wait.until(EC.presence_of_element_located((By.TAG_NAME, "body")))
|
285 |
-
|
286 |
-
"""crawl code"""
|
287 |
-
page_source = driver.page_source
|
288 |
-
|
289 |
-
"""parse"""
|
290 |
-
soup = BeautifulSoup(page_source, "html.parser")
|
291 |
-
|
292 |
-
"""concatenate"""
|
293 |
-
for paragraph in soup.find_all("p"):
|
294 |
-
content = f"{content}\n{paragraph.get_text()}"
|
295 |
-
except Exception as e:
|
296 |
-
print("Error:", e)
|
297 |
-
finally:
|
298 |
-
"""quit"""
|
299 |
-
driver.quit()
|
300 |
-
return {"content": content.strip()}
|
301 |
-
|
302 |
-
|
303 |
-
class MailComponent(ToolComponent):
|
304 |
-
__VALID_ACTION__ = ["read", "send"]
|
305 |
-
|
306 |
-
def __init__(
|
307 |
-
self, cfg_file: str, default_action: str = "read", name: str = "e-mail"
|
308 |
-
):
|
309 |
-
"""'../config/google_mail.json'"""
|
310 |
-
super(MailComponent, self).__init__(name)
|
311 |
-
self.name = name
|
312 |
-
assert (
|
313 |
-
default_action.lower() in self.__VALID_ACTION__
|
314 |
-
), f"Action `{default_action}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
|
315 |
-
self.action = default_action.lower()
|
316 |
-
self.credential = self._login(cfg_file)
|
317 |
-
|
318 |
-
def _login(self, cfg_file: str):
|
319 |
-
SCOPES = [
|
320 |
-
"https://www.googleapis.com/auth/gmail.readonly",
|
321 |
-
"https://www.googleapis.com/auth/gmail.send",
|
322 |
-
]
|
323 |
-
creds = None
|
324 |
-
if os.path.exists("token.json"):
|
325 |
-
print("Login Successfully!")
|
326 |
-
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
|
327 |
-
if not creds or not creds.valid:
|
328 |
-
print("Please authorize in an open browser.")
|
329 |
-
if creds and creds.expired and creds.refresh_token:
|
330 |
-
creds.refresh(Request())
|
331 |
-
else:
|
332 |
-
flow = InstalledAppFlow.from_client_secrets_file(cfg_file, SCOPES)
|
333 |
-
creds = flow.run_local_server(port=0)
|
334 |
-
# Save the credentials for the next run
|
335 |
-
with open("token.json", "w") as token:
|
336 |
-
token.write(creds.to_json())
|
337 |
-
return creds
|
338 |
-
|
339 |
-
def _read(self, mail_dict: dict):
|
340 |
-
credential = self.credential
|
341 |
-
state = mail_dict["state"] if "state" in mail_dict else None
|
342 |
-
time_between = (
|
343 |
-
mail_dict["time_between"] if "time_between" in mail_dict else None
|
344 |
-
)
|
345 |
-
sender_mail = mail_dict["sender_mail"] if "sender_mail" in mail_dict else None
|
346 |
-
only_both = mail_dict["only_both"] if "only_both" in mail_dict else False
|
347 |
-
order_by_time = (
|
348 |
-
mail_dict["order_by_time"] if "order_by_time" in mail_dict else "descend"
|
349 |
-
)
|
350 |
-
include_word = (
|
351 |
-
mail_dict["include_word"] if "include_word" in mail_dict else None
|
352 |
-
)
|
353 |
-
exclude_word = (
|
354 |
-
mail_dict["exclude_word"] if "exclude_word" in mail_dict else None
|
355 |
-
)
|
356 |
-
MAX_SEARCH_CNT = (
|
357 |
-
mail_dict["MAX_SEARCH_CNT"] if "MAX_SEARCH_CNT" in mail_dict else 50
|
358 |
-
)
|
359 |
-
number = mail_dict["number"] if "number" in mail_dict else 10
|
360 |
-
if state is None:
|
361 |
-
state = "all"
|
362 |
-
if time_between is not None:
|
363 |
-
assert isinstance(time_between, tuple)
|
364 |
-
assert len(time_between) == 2
|
365 |
-
assert state in ["all", "unread", "read", "sent"]
|
366 |
-
if only_both:
|
367 |
-
assert sender_mail is not None
|
368 |
-
if sender_mail is not None:
|
369 |
-
assert isinstance(sender_mail, str)
|
370 |
-
assert credential
|
371 |
-
assert order_by_time in ["descend", "ascend"]
|
372 |
-
|
373 |
-
def generate_query():
|
374 |
-
query = ""
|
375 |
-
if state in ["unread", "read"]:
|
376 |
-
query = f"is:{state}"
|
377 |
-
if state in ["sent"]:
|
378 |
-
query = f"in:{state}"
|
379 |
-
if only_both:
|
380 |
-
query = f"{query} from:{sender_mail} OR to:{sender_mail}"
|
381 |
-
if sender_mail is not None and not only_both:
|
382 |
-
query = f"{query} from:({sender_mail})"
|
383 |
-
if include_word is not None:
|
384 |
-
query = f"{query} {include_word}"
|
385 |
-
if exclude_word is not None:
|
386 |
-
query = f"{query} -{exclude_word}"
|
387 |
-
if time_between is not None:
|
388 |
-
TIME_FORMAT = "%Y/%m/%d"
|
389 |
-
t1, t2 = time_between
|
390 |
-
if t1 == "now":
|
391 |
-
t1 = datetime.now().strftime(TIME_FORMAT)
|
392 |
-
if t2 == "now":
|
393 |
-
t2 = datetime.now().strftime(TIME_FORMAT)
|
394 |
-
if isinstance(t1, str) and isinstance(t2, str):
|
395 |
-
t1 = datetime.strptime(t1, TIME_FORMAT)
|
396 |
-
t2 = datetime.strptime(t2, TIME_FORMAT)
|
397 |
-
elif isinstance(t1, str) and isinstance(t2, int):
|
398 |
-
t1 = datetime.strptime(t1, TIME_FORMAT)
|
399 |
-
t2 = t1 + timedelta(days=t2)
|
400 |
-
elif isinstance(t1, int) and isinstance(t2, str):
|
401 |
-
t2 = datetime.strptime(t2, TIME_FORMAT)
|
402 |
-
t1 = t2 + timedelta(days=t1)
|
403 |
-
else:
|
404 |
-
assert False, "invalid time"
|
405 |
-
if t1 > t2:
|
406 |
-
t1, t2 = t2, t1
|
407 |
-
query = f"{query} after:{t1.strftime(TIME_FORMAT)} before:{t2.strftime(TIME_FORMAT)}"
|
408 |
-
return query.strip()
|
409 |
-
|
410 |
-
def sort_by_time(data: List[Dict]):
|
411 |
-
if order_by_time == "descend":
|
412 |
-
reverse = True
|
413 |
-
else:
|
414 |
-
reverse = False
|
415 |
-
sorted_data = sorted(
|
416 |
-
data,
|
417 |
-
key=lambda x: datetime.strptime(x["time"], "%Y-%m-%d %H:%M:%S"),
|
418 |
-
reverse=reverse,
|
419 |
-
)
|
420 |
-
return sorted_data
|
421 |
-
|
422 |
-
try:
|
423 |
-
service = build("gmail", "v1", credentials=credential)
|
424 |
-
results = (
|
425 |
-
service.users()
|
426 |
-
.messages()
|
427 |
-
.list(userId="me", labelIds=["INBOX"], q=generate_query())
|
428 |
-
.execute()
|
429 |
-
)
|
430 |
-
|
431 |
-
messages = results.get("messages", [])
|
432 |
-
email_data = list()
|
433 |
-
|
434 |
-
if not messages:
|
435 |
-
print("No eligible emails.")
|
436 |
-
return None
|
437 |
-
else:
|
438 |
-
pbar = tqdm(total=min(MAX_SEARCH_CNT, len(messages)))
|
439 |
-
for cnt, message in enumerate(messages):
|
440 |
-
pbar.update(1)
|
441 |
-
if cnt >= MAX_SEARCH_CNT:
|
442 |
-
break
|
443 |
-
msg = (
|
444 |
-
service.users()
|
445 |
-
.messages()
|
446 |
-
.get(
|
447 |
-
userId="me",
|
448 |
-
id=message["id"],
|
449 |
-
format="full",
|
450 |
-
metadataHeaders=None,
|
451 |
-
)
|
452 |
-
.execute()
|
453 |
-
)
|
454 |
-
|
455 |
-
subject = ""
|
456 |
-
for header in msg["payload"]["headers"]:
|
457 |
-
if header["name"] == "Subject":
|
458 |
-
subject = header["value"]
|
459 |
-
break
|
460 |
-
|
461 |
-
sender = ""
|
462 |
-
for header in msg["payload"]["headers"]:
|
463 |
-
if header["name"] == "From":
|
464 |
-
sender = re.findall(
|
465 |
-
r"\b[\w\.-]+@[\w\.-]+\.\w+\b", header["value"]
|
466 |
-
)[0]
|
467 |
-
break
|
468 |
-
body = ""
|
469 |
-
if "parts" in msg["payload"]:
|
470 |
-
for part in msg["payload"]["parts"]:
|
471 |
-
if part["mimeType"] == "text/plain":
|
472 |
-
data = part["body"]["data"]
|
473 |
-
body = base64.urlsafe_b64decode(data).decode("utf-8")
|
474 |
-
break
|
475 |
-
|
476 |
-
email_info = {
|
477 |
-
"sender": sender,
|
478 |
-
"time": datetime.fromtimestamp(
|
479 |
-
int(msg["internalDate"]) / 1000
|
480 |
-
).strftime("%Y-%m-%d %H:%M:%S"),
|
481 |
-
"subject": subject,
|
482 |
-
"body": body,
|
483 |
-
}
|
484 |
-
email_data.append(email_info)
|
485 |
-
pbar.close()
|
486 |
-
email_data = sort_by_time(email_data)[0:number]
|
487 |
-
return {"results": email_data}
|
488 |
-
except Exception as e:
|
489 |
-
print(e)
|
490 |
-
return None
|
491 |
-
|
492 |
-
def _send(self, mail_dict: dict):
|
493 |
-
recipient_mail = mail_dict["recipient_mail"]
|
494 |
-
subject = mail_dict["subject"]
|
495 |
-
body = mail_dict["body"]
|
496 |
-
credential = self.credential
|
497 |
-
service = build("gmail", "v1", credentials=credential)
|
498 |
-
|
499 |
-
message = MIMEMultipart()
|
500 |
-
message["to"] = recipient_mail
|
501 |
-
message["subject"] = subject
|
502 |
-
|
503 |
-
message.attach(MIMEText(body, "plain"))
|
504 |
-
|
505 |
-
raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")
|
506 |
-
try:
|
507 |
-
message = (
|
508 |
-
service.users()
|
509 |
-
.messages()
|
510 |
-
.send(userId="me", body={"raw": raw_message})
|
511 |
-
.execute()
|
512 |
-
)
|
513 |
-
return {"state": True}
|
514 |
-
except HttpError as error:
|
515 |
-
print(error)
|
516 |
-
return {"state": False}
|
517 |
-
|
518 |
-
def func(self, mail_dict: dict):
|
519 |
-
if "action" in mail_dict:
|
520 |
-
assert mail_dict["action"].lower() in self.__VALID_ACTION__
|
521 |
-
self.action = mail_dict["action"]
|
522 |
-
functions = {"read": self._read, "send": self._send}
|
523 |
-
return functions[self.action](mail_dict)
|
524 |
-
|
525 |
-
def convert_action_to(self, action_name: str):
|
526 |
-
assert (
|
527 |
-
action_name.lower() in self.__VALID_ACTION__
|
528 |
-
), f"Action `{action_name}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
|
529 |
-
self.action = action_name.lower()
|
530 |
-
|
531 |
-
|
532 |
-
class WeatherComponet(ToolComponent):
|
533 |
-
def __init__(self, api_key, name="weather", TIME_FORMAT="%Y-%m-%d"):
|
534 |
-
super(WeatherComponet, self).__init__(name)
|
535 |
-
self.name = name
|
536 |
-
self.TIME_FORMAT = TIME_FORMAT
|
537 |
-
self.api_key = api_key
|
538 |
-
|
539 |
-
def _parse(self, data):
|
540 |
-
dict_data: dict = {}
|
541 |
-
for item in data["data"]:
|
542 |
-
date = item["datetime"]
|
543 |
-
dict_data[date] = {}
|
544 |
-
if "weather" in item:
|
545 |
-
dict_data[date]["description"] = item["weather"]["description"]
|
546 |
-
mapping = {
|
547 |
-
"temp": "temperature",
|
548 |
-
"max_temp": "max_temperature",
|
549 |
-
"min_temp": "min_temperature",
|
550 |
-
"precip": "accumulated_precipitation",
|
551 |
-
}
|
552 |
-
for key in ["temp", "max_temp", "min_temp", "precip"]:
|
553 |
-
if key in item:
|
554 |
-
dict_data[date][mapping[key]] = item[key]
|
555 |
-
return dict_data
|
556 |
-
|
557 |
-
def _query(self, city_name, country_code, start_date, end_date):
|
558 |
-
"""https://www.weatherbit.io/api/historical-weather-daily"""
|
559 |
-
# print(datetime.strftime(start_date, self.TIME_FORMAT), datetime.strftime(datetime.now(), self.TIME_FORMAT), end_date, datetime.strftime(datetime.now()+timedelta(days=1), self.TIME_FORMAT))
|
560 |
-
if start_date == datetime.strftime(
|
561 |
-
datetime.now(), self.TIME_FORMAT
|
562 |
-
) and end_date == datetime.strftime(
|
563 |
-
datetime.now() + timedelta(days=1), self.TIME_FORMAT
|
564 |
-
):
|
565 |
-
"""today"""
|
566 |
-
url = f"https://api.weatherbit.io/v2.0/current?city={city_name}&country={country_code}&key={self.api_key}"
|
567 |
-
else:
|
568 |
-
url = f"https://api.weatherbit.io/v2.0/history/daily?&city={city_name}&country={country_code}&start_date={start_date}&end_date={end_date}&key={self.api_key}"
|
569 |
-
response = requests.get(url)
|
570 |
-
data = response.json()
|
571 |
-
return self._parse(data)
|
572 |
-
|
573 |
-
def func(self, weather_dict: Dict) -> Dict:
|
574 |
-
TIME_FORMAT = self.TIME_FORMAT
|
575 |
-
# Beijing, Shanghai
|
576 |
-
city_name = weather_dict["city_name"]
|
577 |
-
# CN, US
|
578 |
-
country_code = weather_dict["country_code"]
|
579 |
-
# 2020-02-02
|
580 |
-
start_date = datetime.strftime(
|
581 |
-
datetime.strptime(weather_dict["start_date"], self.TIME_FORMAT),
|
582 |
-
self.TIME_FORMAT,
|
583 |
-
)
|
584 |
-
end_date = weather_dict["end_date"] if "end_date" in weather_dict else None
|
585 |
-
if end_date is None:
|
586 |
-
end_date = datetime.strftime(
|
587 |
-
datetime.strptime(start_date, TIME_FORMAT) + timedelta(days=-1),
|
588 |
-
TIME_FORMAT,
|
589 |
-
)
|
590 |
-
else:
|
591 |
-
end_date = datetime.strftime(
|
592 |
-
datetime.strptime(weather_dict["end_date"], self.TIME_FORMAT),
|
593 |
-
self.TIME_FORMAT,
|
594 |
-
)
|
595 |
-
if datetime.strptime(start_date, TIME_FORMAT) > datetime.strptime(
|
596 |
-
end_date, TIME_FORMAT
|
597 |
-
):
|
598 |
-
start_date, end_date = end_date, start_date
|
599 |
-
assert start_date != end_date
|
600 |
-
return self._query(city_name, country_code, start_date, end_date)
|
601 |
-
|
602 |
-
|
603 |
-
class TranslateComponent(ToolComponent):
|
604 |
-
__SUPPORT_LANGUAGE__ = [
|
605 |
-
"af",
|
606 |
-
"am",
|
607 |
-
"ar",
|
608 |
-
"as",
|
609 |
-
"az",
|
610 |
-
"ba",
|
611 |
-
"bg",
|
612 |
-
"bn",
|
613 |
-
"bo",
|
614 |
-
"bs",
|
615 |
-
"ca",
|
616 |
-
"cs",
|
617 |
-
"cy",
|
618 |
-
"da",
|
619 |
-
"de",
|
620 |
-
"dsb",
|
621 |
-
"dv",
|
622 |
-
"el",
|
623 |
-
"en",
|
624 |
-
"es",
|
625 |
-
"et",
|
626 |
-
"eu",
|
627 |
-
"fa",
|
628 |
-
"fi",
|
629 |
-
"fil",
|
630 |
-
"fj",
|
631 |
-
"fo",
|
632 |
-
"fr",
|
633 |
-
"fr-CA",
|
634 |
-
"ga",
|
635 |
-
"gl",
|
636 |
-
"gom",
|
637 |
-
"gu",
|
638 |
-
"ha",
|
639 |
-
"he",
|
640 |
-
"hi",
|
641 |
-
"hr",
|
642 |
-
"hsb",
|
643 |
-
"ht",
|
644 |
-
"hu",
|
645 |
-
"hy",
|
646 |
-
"id",
|
647 |
-
"ig",
|
648 |
-
"ikt",
|
649 |
-
"is",
|
650 |
-
"it",
|
651 |
-
"iu",
|
652 |
-
"iu-Latn",
|
653 |
-
"ja",
|
654 |
-
"ka",
|
655 |
-
"kk",
|
656 |
-
"km",
|
657 |
-
"kmr",
|
658 |
-
"kn",
|
659 |
-
"ko",
|
660 |
-
"ku",
|
661 |
-
"ky",
|
662 |
-
"ln",
|
663 |
-
"lo",
|
664 |
-
"lt",
|
665 |
-
"lug",
|
666 |
-
"lv",
|
667 |
-
"lzh",
|
668 |
-
"mai",
|
669 |
-
"mg",
|
670 |
-
"mi",
|
671 |
-
"mk",
|
672 |
-
"ml",
|
673 |
-
"mn-Cyrl",
|
674 |
-
"mn-Mong",
|
675 |
-
"mr",
|
676 |
-
"ms",
|
677 |
-
"mt",
|
678 |
-
"mww",
|
679 |
-
"my",
|
680 |
-
"nb",
|
681 |
-
"ne",
|
682 |
-
"nl",
|
683 |
-
"nso",
|
684 |
-
"nya",
|
685 |
-
"or",
|
686 |
-
"otq",
|
687 |
-
"pa",
|
688 |
-
"pl",
|
689 |
-
"prs",
|
690 |
-
"ps",
|
691 |
-
"pt",
|
692 |
-
"pt-PT",
|
693 |
-
"ro",
|
694 |
-
"ru",
|
695 |
-
"run",
|
696 |
-
"rw",
|
697 |
-
"sd",
|
698 |
-
"si",
|
699 |
-
"sk",
|
700 |
-
"sl",
|
701 |
-
"sm",
|
702 |
-
"sn",
|
703 |
-
"so",
|
704 |
-
"sq",
|
705 |
-
"sr-Cyrl",
|
706 |
-
"sr-Latn",
|
707 |
-
"st",
|
708 |
-
"sv",
|
709 |
-
"sw",
|
710 |
-
"ta",
|
711 |
-
"te",
|
712 |
-
"th",
|
713 |
-
"ti",
|
714 |
-
"tk",
|
715 |
-
"tlh-Latn",
|
716 |
-
"tlh-Piqd",
|
717 |
-
"tn",
|
718 |
-
"to",
|
719 |
-
"tr",
|
720 |
-
"tt",
|
721 |
-
"ty",
|
722 |
-
"ug",
|
723 |
-
"uk",
|
724 |
-
"ur",
|
725 |
-
"uz",
|
726 |
-
"vi",
|
727 |
-
"xh",
|
728 |
-
"yo",
|
729 |
-
"yua",
|
730 |
-
"yue",
|
731 |
-
"zh-Hans",
|
732 |
-
"zh-Hant",
|
733 |
-
"zu",
|
734 |
-
]
|
735 |
-
|
736 |
-
def __init__(
|
737 |
-
self, api_key, location, default_target_language="zh-cn", name="translate"
|
738 |
-
):
|
739 |
-
super(TranslateComponent, self).__init__(name)
|
740 |
-
self.name = name
|
741 |
-
self.api_key = api_key
|
742 |
-
self.location = location
|
743 |
-
self.default_target_language = default_target_language
|
744 |
-
|
745 |
-
def func(self, translate_dict: Dict) -> Dict:
|
746 |
-
content = translate_dict["content"]
|
747 |
-
target_language = self.default_target_language
|
748 |
-
if "target_language" in translate_dict:
|
749 |
-
target_language = translate_dict["target_language"]
|
750 |
-
assert (
|
751 |
-
target_language in self.__SUPPORT_LANGUAGE__
|
752 |
-
), f"language `{target_language}` is not supported."
|
753 |
-
|
754 |
-
endpoint = "https://api.cognitive.microsofttranslator.com"
|
755 |
-
|
756 |
-
path = "/translate"
|
757 |
-
constructed_url = endpoint + path
|
758 |
-
|
759 |
-
params = {"api-version": "3.0", "to": target_language}
|
760 |
-
|
761 |
-
headers = {
|
762 |
-
"Ocp-Apim-Subscription-Key": self.api_key,
|
763 |
-
"Ocp-Apim-Subscription-Region": self.location,
|
764 |
-
"Content-type": "application/json",
|
765 |
-
"X-ClientTraceId": str(uuid.uuid4()),
|
766 |
-
}
|
767 |
-
|
768 |
-
body = [{"text": content}]
|
769 |
-
|
770 |
-
request = requests.post(
|
771 |
-
constructed_url, params=params, headers=headers, json=body
|
772 |
-
)
|
773 |
-
response = request.json()
|
774 |
-
response = json.dumps(
|
775 |
-
response,
|
776 |
-
sort_keys=True,
|
777 |
-
ensure_ascii=False,
|
778 |
-
indent=4,
|
779 |
-
separators=(",", ": "),
|
780 |
-
)
|
781 |
-
response = eval(response)
|
782 |
-
return {"result": response[0]["translations"][0]["text"]}
|
783 |
-
|
784 |
-
|
785 |
-
class APIComponent(ToolComponent):
|
786 |
-
def __init__(self):
|
787 |
-
super(APIComponent, self).__init__()
|
788 |
-
|
789 |
-
def func(self, agent) -> Dict:
|
790 |
-
pass
|
791 |
-
|
792 |
-
|
793 |
-
class FunctionComponent(ToolComponent):
|
794 |
-
def __init__(
|
795 |
-
self,
|
796 |
-
functions,
|
797 |
-
function_call="auto",
|
798 |
-
response_type="response",
|
799 |
-
your_function=None,
|
800 |
-
):
|
801 |
-
super().__init__()
|
802 |
-
self.functions = functions
|
803 |
-
self.function_call = function_call
|
804 |
-
self.parameters = {}
|
805 |
-
self.available_functions = {}
|
806 |
-
self.response_type = response_type
|
807 |
-
if your_function:
|
808 |
-
function_name = your_function["name"]
|
809 |
-
function_content = your_function["content"]
|
810 |
-
exec(function_content)
|
811 |
-
self.available_functions[function_name] = eval(function_name)
|
812 |
-
|
813 |
-
for function in self.functions:
|
814 |
-
self.parameters[function["name"]] = list(
|
815 |
-
function["parameters"]["properties"].keys()
|
816 |
-
)
|
817 |
-
self.available_functions[function["name"]] = eval(function["name"])
|
818 |
-
|
819 |
-
def func(self, agent):
|
820 |
-
messages = agent.long_term_memory
|
821 |
-
outputdict = {}
|
822 |
-
query = agent.long_term_memory[-1].content if len(agent.long_term_memory) > 0 else " "
|
823 |
-
relevant_history = get_relevant_history(
|
824 |
-
query,
|
825 |
-
agent.long_term_memory[:-1],
|
826 |
-
agent.chat_embeddings[:-1],
|
827 |
-
)
|
828 |
-
response = agent.LLM.get_response(
|
829 |
-
messages,
|
830 |
-
None,
|
831 |
-
functions=self.functions,
|
832 |
-
stream=False,
|
833 |
-
function_call=self.function_call,
|
834 |
-
relevant_history=relevant_history,
|
835 |
-
)
|
836 |
-
response_message = response
|
837 |
-
if response_message.get("function_call"):
|
838 |
-
function_name = response_message["function_call"]["name"]
|
839 |
-
fuction_to_call = self.available_functions[function_name]
|
840 |
-
function_args = json.loads(response_message["function_call"]["arguments"])
|
841 |
-
input_args = {}
|
842 |
-
for args_name in self.parameters[function_name]:
|
843 |
-
input_args[args_name] = function_args.get(args_name)
|
844 |
-
function_response = fuction_to_call(**input_args)
|
845 |
-
if self.response_type == "response":
|
846 |
-
outputdict["response"] = function_response
|
847 |
-
elif self.response_type == "prompt":
|
848 |
-
outputdict["prompt"] = function_response
|
849 |
-
|
850 |
-
return outputdict
|
851 |
-
|
852 |
-
|
853 |
-
class CodeComponent(ToolComponent):
|
854 |
-
def __init__(self, file_name, keyword) -> None:
|
855 |
-
super().__init__()
|
856 |
-
self.file_name = file_name
|
857 |
-
self.keyword = keyword
|
858 |
-
self.system_prompt = (
|
859 |
-
"you need to extract the modified code as completely as possible."
|
860 |
-
)
|
861 |
-
self.last_prompt = (
|
862 |
-
f"Please strictly adhere to the following format for outputting: \n"
|
863 |
-
)
|
864 |
-
self.last_prompt += (
|
865 |
-
f"<{self.keyword}> the content you need to extract </{self.keyword}>"
|
866 |
-
)
|
867 |
-
|
868 |
-
def func(self, agent):
|
869 |
-
response = agent.LLM.get_response(
|
870 |
-
agent.long_term_memory,
|
871 |
-
self.system_prompt,
|
872 |
-
self.last_prompt,
|
873 |
-
stream=False,
|
874 |
-
)
|
875 |
-
code = extract(response, self.keyword)
|
876 |
-
code = code if code else response
|
877 |
-
os.makedirs("output_code", exist_ok=True)
|
878 |
-
file_name = "output_code/" + self.file_name
|
879 |
-
codes = code.split("\n")
|
880 |
-
if codes[0] == "```python":
|
881 |
-
codes.remove(codes[0])
|
882 |
-
if codes[-1] == "```":
|
883 |
-
codes.remove(codes[-1])
|
884 |
-
code = "\n".join(codes)
|
885 |
-
with open(file_name, "w", encoding="utf-8") as f:
|
886 |
-
f.write(code)
|
887 |
-
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abeer123/Pokemon_Digimon/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Pokemon Digimon
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.11.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Equing.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
from abc import ABC, abstractmethod
|
5 |
-
|
6 |
-
import requests
|
7 |
-
|
8 |
-
from ..typing import Any, CreateResult
|
9 |
-
from .base_provider import BaseProvider
|
10 |
-
|
11 |
-
|
12 |
-
class Equing(BaseProvider):
|
13 |
-
url: str = 'https://next.eqing.tech/'
|
14 |
-
working = False
|
15 |
-
supports_stream = True
|
16 |
-
supports_gpt_35_turbo = True
|
17 |
-
supports_gpt_4 = False
|
18 |
-
|
19 |
-
@staticmethod
|
20 |
-
@abstractmethod
|
21 |
-
def create_completion(
|
22 |
-
model: str,
|
23 |
-
messages: list[dict[str, str]],
|
24 |
-
stream: bool, **kwargs: Any) -> CreateResult:
|
25 |
-
|
26 |
-
headers = {
|
27 |
-
'authority' : 'next.eqing.tech',
|
28 |
-
'accept' : 'text/event-stream',
|
29 |
-
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
30 |
-
'cache-control' : 'no-cache',
|
31 |
-
'content-type' : 'application/json',
|
32 |
-
'origin' : 'https://next.eqing.tech',
|
33 |
-
'plugins' : '0',
|
34 |
-
'pragma' : 'no-cache',
|
35 |
-
'referer' : 'https://next.eqing.tech/',
|
36 |
-
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
37 |
-
'sec-ch-ua-mobile' : '?0',
|
38 |
-
'sec-ch-ua-platform': '"macOS"',
|
39 |
-
'sec-fetch-dest' : 'empty',
|
40 |
-
'sec-fetch-mode' : 'cors',
|
41 |
-
'sec-fetch-site' : 'same-origin',
|
42 |
-
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
43 |
-
'usesearch' : 'false',
|
44 |
-
'x-requested-with' : 'XMLHttpRequest'
|
45 |
-
}
|
46 |
-
|
47 |
-
json_data = {
|
48 |
-
'messages' : messages,
|
49 |
-
'stream' : stream,
|
50 |
-
'model' : model,
|
51 |
-
'temperature' : kwargs.get('temperature', 0.5),
|
52 |
-
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
53 |
-
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
54 |
-
'top_p' : kwargs.get('top_p', 1),
|
55 |
-
}
|
56 |
-
|
57 |
-
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
|
58 |
-
headers=headers, json=json_data, stream=stream)
|
59 |
-
|
60 |
-
if not stream:
|
61 |
-
yield response.json()["choices"][0]["message"]["content"]
|
62 |
-
return
|
63 |
-
|
64 |
-
for line in response.iter_content(chunk_size=1024):
|
65 |
-
if line:
|
66 |
-
if b'content' in line:
|
67 |
-
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
68 |
-
token = line_json['choices'][0]['delta'].get('content')
|
69 |
-
if token:
|
70 |
-
yield token
|
71 |
-
|
72 |
-
@classmethod
|
73 |
-
@property
|
74 |
-
def params(cls):
|
75 |
-
params = [
|
76 |
-
("model", "str"),
|
77 |
-
("messages", "list[dict[str, str]]"),
|
78 |
-
("stream", "bool"),
|
79 |
-
]
|
80 |
-
param = ", ".join([": ".join(p) for p in params])
|
81 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/.github/CODE_OF_CONDUCT.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
# Code of Conduct
|
2 |
-
|
3 |
-
## 1. Purpose
|
4 |
-
|
5 |
-
A primary goal of Phaser is to be inclusive to the largest number of contributors, with the most varied and diverse backgrounds possible. As such, we are committed to providing a friendly, safe and welcoming environment for all, regardless of gender, sexual orientation, ability, ethnicity, socioeconomic status, and religion (or lack thereof).
|
6 |
-
|
7 |
-
This code of conduct outlines our expectations for all those who participate in our community, as well as the consequences for unacceptable behavior.
|
8 |
-
|
9 |
-
We invite all those who participate in Phaser to help us create safe and positive experiences for everyone.
|
10 |
-
|
11 |
-
## 2. Open Source Citizenship
|
12 |
-
|
13 |
-
A supplemental goal of this Code of Conduct is to increase open source citizenship by encouraging participants to recognize and strengthen the relationships between our actions and their effects on our community.
|
14 |
-
|
15 |
-
Communities mirror the societies in which they exist and positive action is essential to counteract the many forms of inequality and abuses of power that exist in society.
|
16 |
-
|
17 |
-
If you see someone who is making an extra effort to ensure our community is welcoming, friendly, and encourages all participants to contribute to the fullest extent, we want to know.
|
18 |
-
|
19 |
-
## 3. Expected Behavior
|
20 |
-
|
21 |
-
The following behaviors are expected and requested of all community members:
|
22 |
-
|
23 |
-
* Participate in an authentic and active way. In doing so, you contribute to the health and longevity of this community.
|
24 |
-
* Exercise consideration and respect in your speech and actions.
|
25 |
-
* Attempt collaboration before conflict.
|
26 |
-
* Refrain from demeaning, discriminatory, or harassing behavior and speech.
|
27 |
-
* Be mindful of your surroundings and of your fellow participants. Alert community leaders if you notice a dangerous situation, someone in distress, or violations of this Code of Conduct, even if they seem inconsequential.
|
28 |
-
* Remember that community event venues may be shared with members of the public; please be respectful to all patrons of these locations.
|
29 |
-
|
30 |
-
## 4. Unacceptable Behavior
|
31 |
-
|
32 |
-
The following behaviors are considered harassment and are unacceptable within our community:
|
33 |
-
|
34 |
-
* Violence, threats of violence or violent language directed against another person.
|
35 |
-
* Sexist, racist, homophobic, transphobic, ableist or otherwise discriminatory jokes and language.
|
36 |
-
* Posting or displaying sexually explicit or violent material.
|
37 |
-
* Posting or threatening to post other people’s personally identifying information ("doxing").
|
38 |
-
* Personal insults, particularly those related to gender, sexual orientation, race, religion, or disability.
|
39 |
-
* Inappropriate photography or recording.
|
40 |
-
* Inappropriate physical contact. You should have someone’s consent before touching them.
|
41 |
-
* Unwelcome sexual attention. This includes, sexualized comments or jokes; inappropriate touching, groping, and unwelcomed sexual advances.
|
42 |
-
* Deliberate intimidation, stalking or following (online or in person).
|
43 |
-
* Advocating for, or encouraging, any of the above behavior.
|
44 |
-
* Sustained disruption of community events, including talks and presentations.
|
45 |
-
|
46 |
-
## 5. Consequences of Unacceptable Behavior
|
47 |
-
|
48 |
-
Unacceptable behavior from any community member, including sponsors and those with decision-making authority, will not be tolerated.
|
49 |
-
|
50 |
-
Anyone asked to stop unacceptable behavior is expected to comply immediately.
|
51 |
-
|
52 |
-
If a community member engages in unacceptable behavior, the community organizers may take any action they deem appropriate, up to and including a temporary ban or permanent expulsion from the community without warning (and without refund in the case of a paid event).
|
53 |
-
|
54 |
-
## 6. Reporting Guidelines
|
55 |
-
|
56 |
-
If you are subject to or witness unacceptable behavior, or have any other concerns, please notify a community organizer as soon as possible. [email protected].
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
Additionally, community organizers are available to help community members engage with local law enforcement or to otherwise help those experiencing unacceptable behavior feel safe. In the context of in-person events, organizers will also provide escorts as desired by the person experiencing distress.
|
61 |
-
|
62 |
-
## 7. Addressing Grievances
|
63 |
-
|
64 |
-
If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify Photon Storm Ltd with a concise description of your grievance. Your grievance will be handled in accordance with our existing governing policies.
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
## 8. Scope
|
69 |
-
|
70 |
-
We expect all community participants (contributors, paid or otherwise; sponsors; and other guests) to abide by this Code of Conduct in all community venues–online and in-person–as well as in all one-on-one communications pertaining to community business.
|
71 |
-
|
72 |
-
This code of conduct and its related procedures also applies to unacceptable behavior occurring outside the scope of community activities when such behavior has the potential to adversely affect the safety and well-being of community members.
|
73 |
-
|
74 |
-
## 9. Contact info
|
75 |
-
|
76 | |
77 |
-
|
78 |
-
## 10. License and attribution
|
79 |
-
|
80 |
-
This Code of Conduct is distributed under a [Creative Commons Attribution-ShareAlike license](http://creativecommons.org/licenses/by-sa/3.0/).
|
81 |
-
|
82 |
-
Portions of text derived from the [Django Code of Conduct](https://www.djangoproject.com/conduct/) and the [Geek Feminism Anti-Harassment Policy](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy).
|
83 |
-
|
84 |
-
Retrieved on November 22, 2016 from [http://citizencodeofconduct.org/](http://citizencodeofconduct.org/)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.d.ts
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import Audio from './audio/Audio';
|
2 |
-
import Ball from './ball/Ball';
|
3 |
-
import Bars from './bars/Bars';
|
4 |
-
import Box from './box/Box';
|
5 |
-
import Clock from './clock/Clock';
|
6 |
-
import Cube from './cube/Cube';
|
7 |
-
import Custom from './custom/Custom';
|
8 |
-
import Dots from './dots/Dots';
|
9 |
-
import Facebook from './facebook/Facebook';
|
10 |
-
import Grid from './grid/Grid';
|
11 |
-
import Los from './los/Los';
|
12 |
-
import Orbit from './orbit/Orbit';
|
13 |
-
import Oval from './oval/Oval';
|
14 |
-
import Pie from './pie/Pie';
|
15 |
-
import Puff from './puff/Puff';
|
16 |
-
import Radio from './radio/Radio';
|
17 |
-
import Rings from './rings/Rings';
|
18 |
-
import Spinner from './spinner/Spinner';
|
19 |
-
|
20 |
-
export {
|
21 |
-
Audio,
|
22 |
-
Ball,
|
23 |
-
Bars,
|
24 |
-
Box,
|
25 |
-
Clock,
|
26 |
-
Cube,
|
27 |
-
Custom,
|
28 |
-
Dots,
|
29 |
-
Facebook,
|
30 |
-
Grid,
|
31 |
-
Los,
|
32 |
-
Orbit,
|
33 |
-
Oval,
|
34 |
-
Pie,
|
35 |
-
Puff,
|
36 |
-
Radio,
|
37 |
-
Rings,
|
38 |
-
Spinner
|
39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py
DELETED
@@ -1,413 +0,0 @@
|
|
1 |
-
# Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
import math
|
15 |
-
import warnings
|
16 |
-
from dataclasses import dataclass
|
17 |
-
from typing import List, Optional, Tuple, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
from scipy import integrate
|
22 |
-
|
23 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
24 |
-
from ..utils import BaseOutput
|
25 |
-
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
|
26 |
-
|
27 |
-
|
28 |
-
@dataclass
|
29 |
-
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete
|
30 |
-
class LMSDiscreteSchedulerOutput(BaseOutput):
|
31 |
-
"""
|
32 |
-
Output class for the scheduler's step function output.
|
33 |
-
|
34 |
-
Args:
|
35 |
-
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
36 |
-
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
|
37 |
-
denoising loop.
|
38 |
-
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
|
39 |
-
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
|
40 |
-
`pred_original_sample` can be used to preview progress or for guidance.
|
41 |
-
"""
|
42 |
-
|
43 |
-
prev_sample: torch.FloatTensor
|
44 |
-
pred_original_sample: Optional[torch.FloatTensor] = None
|
45 |
-
|
46 |
-
|
47 |
-
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
48 |
-
def betas_for_alpha_bar(
|
49 |
-
num_diffusion_timesteps,
|
50 |
-
max_beta=0.999,
|
51 |
-
alpha_transform_type="cosine",
|
52 |
-
):
|
53 |
-
"""
|
54 |
-
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
55 |
-
(1-beta) over time from t = [0,1].
|
56 |
-
|
57 |
-
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
58 |
-
to that part of the diffusion process.
|
59 |
-
|
60 |
-
|
61 |
-
Args:
|
62 |
-
num_diffusion_timesteps (`int`): the number of betas to produce.
|
63 |
-
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
64 |
-
prevent singularities.
|
65 |
-
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
66 |
-
Choose from `cosine` or `exp`
|
67 |
-
|
68 |
-
Returns:
|
69 |
-
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
70 |
-
"""
|
71 |
-
if alpha_transform_type == "cosine":
|
72 |
-
|
73 |
-
def alpha_bar_fn(t):
|
74 |
-
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
75 |
-
|
76 |
-
elif alpha_transform_type == "exp":
|
77 |
-
|
78 |
-
def alpha_bar_fn(t):
|
79 |
-
return math.exp(t * -12.0)
|
80 |
-
|
81 |
-
else:
|
82 |
-
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
|
83 |
-
|
84 |
-
betas = []
|
85 |
-
for i in range(num_diffusion_timesteps):
|
86 |
-
t1 = i / num_diffusion_timesteps
|
87 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
88 |
-
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
89 |
-
return torch.tensor(betas, dtype=torch.float32)
|
90 |
-
|
91 |
-
|
92 |
-
class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
|
93 |
-
"""
|
94 |
-
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
|
95 |
-
Katherine Crowson:
|
96 |
-
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
|
97 |
-
|
98 |
-
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
|
99 |
-
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
|
100 |
-
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
|
101 |
-
[`~SchedulerMixin.from_pretrained`] functions.
|
102 |
-
|
103 |
-
Args:
|
104 |
-
num_train_timesteps (`int`): number of diffusion steps used to train the model.
|
105 |
-
beta_start (`float`): the starting `beta` value of inference.
|
106 |
-
beta_end (`float`): the final `beta` value.
|
107 |
-
beta_schedule (`str`):
|
108 |
-
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
109 |
-
`linear` or `scaled_linear`.
|
110 |
-
trained_betas (`np.ndarray`, optional):
|
111 |
-
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
|
112 |
-
use_karras_sigmas (`bool`, *optional*, defaults to `False`):
|
113 |
-
This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the
|
114 |
-
noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence
|
115 |
-
of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf.
|
116 |
-
prediction_type (`str`, default `epsilon`, optional):
|
117 |
-
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
|
118 |
-
process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
|
119 |
-
https://imagen.research.google/video/paper.pdf)
|
120 |
-
timestep_spacing (`str`, default `"linspace"`):
|
121 |
-
The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
|
122 |
-
Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
|
123 |
-
steps_offset (`int`, default `0`):
|
124 |
-
an offset added to the inference steps. You can use a combination of `offset=1` and
|
125 |
-
`set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
|
126 |
-
stable diffusion.
|
127 |
-
"""
|
128 |
-
|
129 |
-
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
130 |
-
order = 1
|
131 |
-
|
132 |
-
@register_to_config
|
133 |
-
def __init__(
|
134 |
-
self,
|
135 |
-
num_train_timesteps: int = 1000,
|
136 |
-
beta_start: float = 0.0001,
|
137 |
-
beta_end: float = 0.02,
|
138 |
-
beta_schedule: str = "linear",
|
139 |
-
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
140 |
-
use_karras_sigmas: Optional[bool] = False,
|
141 |
-
prediction_type: str = "epsilon",
|
142 |
-
timestep_spacing: str = "linspace",
|
143 |
-
steps_offset: int = 0,
|
144 |
-
):
|
145 |
-
if trained_betas is not None:
|
146 |
-
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
147 |
-
elif beta_schedule == "linear":
|
148 |
-
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
149 |
-
elif beta_schedule == "scaled_linear":
|
150 |
-
# this schedule is very specific to the latent diffusion model.
|
151 |
-
self.betas = (
|
152 |
-
torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
153 |
-
)
|
154 |
-
elif beta_schedule == "squaredcos_cap_v2":
|
155 |
-
# Glide cosine schedule
|
156 |
-
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
157 |
-
else:
|
158 |
-
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
|
159 |
-
|
160 |
-
self.alphas = 1.0 - self.betas
|
161 |
-
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
162 |
-
|
163 |
-
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
|
164 |
-
sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
|
165 |
-
self.sigmas = torch.from_numpy(sigmas)
|
166 |
-
|
167 |
-
# setable values
|
168 |
-
self.num_inference_steps = None
|
169 |
-
self.use_karras_sigmas = use_karras_sigmas
|
170 |
-
self.set_timesteps(num_train_timesteps, None)
|
171 |
-
self.derivatives = []
|
172 |
-
self.is_scale_input_called = False
|
173 |
-
|
174 |
-
@property
|
175 |
-
def init_noise_sigma(self):
|
176 |
-
# standard deviation of the initial noise distribution
|
177 |
-
if self.config.timestep_spacing in ["linspace", "trailing"]:
|
178 |
-
return self.sigmas.max()
|
179 |
-
|
180 |
-
return (self.sigmas.max() ** 2 + 1) ** 0.5
|
181 |
-
|
182 |
-
def scale_model_input(
|
183 |
-
self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
|
184 |
-
) -> torch.FloatTensor:
|
185 |
-
"""
|
186 |
-
Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm.
|
187 |
-
|
188 |
-
Args:
|
189 |
-
sample (`torch.FloatTensor`): input sample
|
190 |
-
timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain
|
191 |
-
|
192 |
-
Returns:
|
193 |
-
`torch.FloatTensor`: scaled input sample
|
194 |
-
"""
|
195 |
-
if isinstance(timestep, torch.Tensor):
|
196 |
-
timestep = timestep.to(self.timesteps.device)
|
197 |
-
step_index = (self.timesteps == timestep).nonzero().item()
|
198 |
-
sigma = self.sigmas[step_index]
|
199 |
-
sample = sample / ((sigma**2 + 1) ** 0.5)
|
200 |
-
self.is_scale_input_called = True
|
201 |
-
return sample
|
202 |
-
|
203 |
-
def get_lms_coefficient(self, order, t, current_order):
|
204 |
-
"""
|
205 |
-
Compute a linear multistep coefficient.
|
206 |
-
|
207 |
-
Args:
|
208 |
-
order (TODO):
|
209 |
-
t (TODO):
|
210 |
-
current_order (TODO):
|
211 |
-
"""
|
212 |
-
|
213 |
-
def lms_derivative(tau):
|
214 |
-
prod = 1.0
|
215 |
-
for k in range(order):
|
216 |
-
if current_order == k:
|
217 |
-
continue
|
218 |
-
prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k])
|
219 |
-
return prod
|
220 |
-
|
221 |
-
integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0]
|
222 |
-
|
223 |
-
return integrated_coeff
|
224 |
-
|
225 |
-
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
|
226 |
-
"""
|
227 |
-
Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
|
228 |
-
|
229 |
-
Args:
|
230 |
-
num_inference_steps (`int`):
|
231 |
-
the number of diffusion steps used when generating samples with a pre-trained model.
|
232 |
-
device (`str` or `torch.device`, optional):
|
233 |
-
the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
234 |
-
"""
|
235 |
-
self.num_inference_steps = num_inference_steps
|
236 |
-
|
237 |
-
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
|
238 |
-
if self.config.timestep_spacing == "linspace":
|
239 |
-
timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[
|
240 |
-
::-1
|
241 |
-
].copy()
|
242 |
-
elif self.config.timestep_spacing == "leading":
|
243 |
-
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
|
244 |
-
# creates integer timesteps by multiplying by ratio
|
245 |
-
# casting to int to avoid issues when num_inference_step is power of 3
|
246 |
-
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float)
|
247 |
-
timesteps += self.config.steps_offset
|
248 |
-
elif self.config.timestep_spacing == "trailing":
|
249 |
-
step_ratio = self.config.num_train_timesteps / self.num_inference_steps
|
250 |
-
# creates integer timesteps by multiplying by ratio
|
251 |
-
# casting to int to avoid issues when num_inference_step is power of 3
|
252 |
-
timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(float)
|
253 |
-
timesteps -= 1
|
254 |
-
else:
|
255 |
-
raise ValueError(
|
256 |
-
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
|
257 |
-
)
|
258 |
-
|
259 |
-
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
|
260 |
-
log_sigmas = np.log(sigmas)
|
261 |
-
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
|
262 |
-
|
263 |
-
if self.use_karras_sigmas:
|
264 |
-
sigmas = self._convert_to_karras(in_sigmas=sigmas)
|
265 |
-
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
|
266 |
-
|
267 |
-
sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
|
268 |
-
|
269 |
-
self.sigmas = torch.from_numpy(sigmas).to(device=device)
|
270 |
-
if str(device).startswith("mps"):
|
271 |
-
# mps does not support float64
|
272 |
-
self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32)
|
273 |
-
else:
|
274 |
-
self.timesteps = torch.from_numpy(timesteps).to(device=device)
|
275 |
-
|
276 |
-
self.derivatives = []
|
277 |
-
|
278 |
-
# copied from diffusers.schedulers.scheduling_euler_discrete._sigma_to_t
|
279 |
-
def _sigma_to_t(self, sigma, log_sigmas):
|
280 |
-
# get log sigma
|
281 |
-
log_sigma = np.log(sigma)
|
282 |
-
|
283 |
-
# get distribution
|
284 |
-
dists = log_sigma - log_sigmas[:, np.newaxis]
|
285 |
-
|
286 |
-
# get sigmas range
|
287 |
-
low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
|
288 |
-
high_idx = low_idx + 1
|
289 |
-
|
290 |
-
low = log_sigmas[low_idx]
|
291 |
-
high = log_sigmas[high_idx]
|
292 |
-
|
293 |
-
# interpolate sigmas
|
294 |
-
w = (low - log_sigma) / (low - high)
|
295 |
-
w = np.clip(w, 0, 1)
|
296 |
-
|
297 |
-
# transform interpolation to time range
|
298 |
-
t = (1 - w) * low_idx + w * high_idx
|
299 |
-
t = t.reshape(sigma.shape)
|
300 |
-
return t
|
301 |
-
|
302 |
-
# copied from diffusers.schedulers.scheduling_euler_discrete._convert_to_karras
|
303 |
-
def _convert_to_karras(self, in_sigmas: torch.FloatTensor) -> torch.FloatTensor:
|
304 |
-
"""Constructs the noise schedule of Karras et al. (2022)."""
|
305 |
-
|
306 |
-
sigma_min: float = in_sigmas[-1].item()
|
307 |
-
sigma_max: float = in_sigmas[0].item()
|
308 |
-
|
309 |
-
rho = 7.0 # 7.0 is the value used in the paper
|
310 |
-
ramp = np.linspace(0, 1, self.num_inference_steps)
|
311 |
-
min_inv_rho = sigma_min ** (1 / rho)
|
312 |
-
max_inv_rho = sigma_max ** (1 / rho)
|
313 |
-
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
|
314 |
-
return sigmas
|
315 |
-
|
316 |
-
def step(
|
317 |
-
self,
|
318 |
-
model_output: torch.FloatTensor,
|
319 |
-
timestep: Union[float, torch.FloatTensor],
|
320 |
-
sample: torch.FloatTensor,
|
321 |
-
order: int = 4,
|
322 |
-
return_dict: bool = True,
|
323 |
-
) -> Union[LMSDiscreteSchedulerOutput, Tuple]:
|
324 |
-
"""
|
325 |
-
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
326 |
-
process from the learned model outputs (most often the predicted noise).
|
327 |
-
|
328 |
-
Args:
|
329 |
-
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
|
330 |
-
timestep (`float`): current timestep in the diffusion chain.
|
331 |
-
sample (`torch.FloatTensor`):
|
332 |
-
current instance of sample being created by diffusion process.
|
333 |
-
order: coefficient for multi-step inference.
|
334 |
-
return_dict (`bool`): option for returning tuple rather than LMSDiscreteSchedulerOutput class
|
335 |
-
|
336 |
-
Returns:
|
337 |
-
[`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] or `tuple`:
|
338 |
-
[`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
|
339 |
-
When returning a tuple, the first element is the sample tensor.
|
340 |
-
|
341 |
-
"""
|
342 |
-
if not self.is_scale_input_called:
|
343 |
-
warnings.warn(
|
344 |
-
"The `scale_model_input` function should be called before `step` to ensure correct denoising. "
|
345 |
-
"See `StableDiffusionPipeline` for a usage example."
|
346 |
-
)
|
347 |
-
|
348 |
-
if isinstance(timestep, torch.Tensor):
|
349 |
-
timestep = timestep.to(self.timesteps.device)
|
350 |
-
step_index = (self.timesteps == timestep).nonzero().item()
|
351 |
-
sigma = self.sigmas[step_index]
|
352 |
-
|
353 |
-
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
|
354 |
-
if self.config.prediction_type == "epsilon":
|
355 |
-
pred_original_sample = sample - sigma * model_output
|
356 |
-
elif self.config.prediction_type == "v_prediction":
|
357 |
-
# * c_out + input * c_skip
|
358 |
-
pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
|
359 |
-
elif self.config.prediction_type == "sample":
|
360 |
-
pred_original_sample = model_output
|
361 |
-
else:
|
362 |
-
raise ValueError(
|
363 |
-
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
|
364 |
-
)
|
365 |
-
|
366 |
-
# 2. Convert to an ODE derivative
|
367 |
-
derivative = (sample - pred_original_sample) / sigma
|
368 |
-
self.derivatives.append(derivative)
|
369 |
-
if len(self.derivatives) > order:
|
370 |
-
self.derivatives.pop(0)
|
371 |
-
|
372 |
-
# 3. Compute linear multistep coefficients
|
373 |
-
order = min(step_index + 1, order)
|
374 |
-
lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)]
|
375 |
-
|
376 |
-
# 4. Compute previous sample based on the derivatives path
|
377 |
-
prev_sample = sample + sum(
|
378 |
-
coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives))
|
379 |
-
)
|
380 |
-
|
381 |
-
if not return_dict:
|
382 |
-
return (prev_sample,)
|
383 |
-
|
384 |
-
return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
|
385 |
-
|
386 |
-
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise
|
387 |
-
def add_noise(
|
388 |
-
self,
|
389 |
-
original_samples: torch.FloatTensor,
|
390 |
-
noise: torch.FloatTensor,
|
391 |
-
timesteps: torch.FloatTensor,
|
392 |
-
) -> torch.FloatTensor:
|
393 |
-
# Make sure sigmas and timesteps have the same device and dtype as original_samples
|
394 |
-
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
|
395 |
-
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
|
396 |
-
# mps does not support float64
|
397 |
-
schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
|
398 |
-
timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
|
399 |
-
else:
|
400 |
-
schedule_timesteps = self.timesteps.to(original_samples.device)
|
401 |
-
timesteps = timesteps.to(original_samples.device)
|
402 |
-
|
403 |
-
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
|
404 |
-
|
405 |
-
sigma = sigmas[step_indices].flatten()
|
406 |
-
while len(sigma.shape) < len(original_samples.shape):
|
407 |
-
sigma = sigma.unsqueeze(-1)
|
408 |
-
|
409 |
-
noisy_samples = original_samples + noise * sigma
|
410 |
-
return noisy_samples
|
411 |
-
|
412 |
-
def __len__(self):
|
413 |
-
return self.config.num_train_timesteps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
5 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
|
3 |
-
from pip._internal.index.package_finder import PackageFinder
|
4 |
-
from pip._internal.metadata.base import BaseDistribution
|
5 |
-
from pip._internal.req import InstallRequirement
|
6 |
-
|
7 |
-
|
8 |
-
class AbstractDistribution(metaclass=abc.ABCMeta):
|
9 |
-
"""A base class for handling installable artifacts.
|
10 |
-
|
11 |
-
The requirements for anything installable are as follows:
|
12 |
-
|
13 |
-
- we must be able to determine the requirement name
|
14 |
-
(or we can't correctly handle the non-upgrade case).
|
15 |
-
|
16 |
-
- for packages with setup requirements, we must also be able
|
17 |
-
to determine their requirements without installing additional
|
18 |
-
packages (for the same reason as run-time dependencies)
|
19 |
-
|
20 |
-
- we must be able to create a Distribution object exposing the
|
21 |
-
above metadata.
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(self, req: InstallRequirement) -> None:
|
25 |
-
super().__init__()
|
26 |
-
self.req = req
|
27 |
-
|
28 |
-
@abc.abstractmethod
|
29 |
-
def get_metadata_distribution(self) -> BaseDistribution:
|
30 |
-
raise NotImplementedError()
|
31 |
-
|
32 |
-
@abc.abstractmethod
|
33 |
-
def prepare_distribution_metadata(
|
34 |
-
self,
|
35 |
-
finder: PackageFinder,
|
36 |
-
build_isolation: bool,
|
37 |
-
check_build_deps: bool,
|
38 |
-
) -> None:
|
39 |
-
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/requirements.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
# This file is dual licensed under the terms of the Apache License, Version
|
2 |
-
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
3 |
-
# for complete details.
|
4 |
-
|
5 |
-
import re
|
6 |
-
import string
|
7 |
-
import urllib.parse
|
8 |
-
from typing import List, Optional as TOptional, Set
|
9 |
-
|
10 |
-
from pip._vendor.pyparsing import ( # noqa
|
11 |
-
Combine,
|
12 |
-
Literal as L,
|
13 |
-
Optional,
|
14 |
-
ParseException,
|
15 |
-
Regex,
|
16 |
-
Word,
|
17 |
-
ZeroOrMore,
|
18 |
-
originalTextFor,
|
19 |
-
stringEnd,
|
20 |
-
stringStart,
|
21 |
-
)
|
22 |
-
|
23 |
-
from .markers import MARKER_EXPR, Marker
|
24 |
-
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
|
25 |
-
|
26 |
-
|
27 |
-
class InvalidRequirement(ValueError):
|
28 |
-
"""
|
29 |
-
An invalid requirement was found, users should refer to PEP 508.
|
30 |
-
"""
|
31 |
-
|
32 |
-
|
33 |
-
ALPHANUM = Word(string.ascii_letters + string.digits)
|
34 |
-
|
35 |
-
LBRACKET = L("[").suppress()
|
36 |
-
RBRACKET = L("]").suppress()
|
37 |
-
LPAREN = L("(").suppress()
|
38 |
-
RPAREN = L(")").suppress()
|
39 |
-
COMMA = L(",").suppress()
|
40 |
-
SEMICOLON = L(";").suppress()
|
41 |
-
AT = L("@").suppress()
|
42 |
-
|
43 |
-
PUNCTUATION = Word("-_.")
|
44 |
-
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
|
45 |
-
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
|
46 |
-
|
47 |
-
NAME = IDENTIFIER("name")
|
48 |
-
EXTRA = IDENTIFIER
|
49 |
-
|
50 |
-
URI = Regex(r"[^ ]+")("url")
|
51 |
-
URL = AT + URI
|
52 |
-
|
53 |
-
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
|
54 |
-
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
|
55 |
-
|
56 |
-
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
57 |
-
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
58 |
-
|
59 |
-
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
|
60 |
-
VERSION_MANY = Combine(
|
61 |
-
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
|
62 |
-
)("_raw_spec")
|
63 |
-
_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
|
64 |
-
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
|
65 |
-
|
66 |
-
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
|
67 |
-
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
|
68 |
-
|
69 |
-
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
|
70 |
-
MARKER_EXPR.setParseAction(
|
71 |
-
lambda s, l, t: Marker(s[t._original_start : t._original_end])
|
72 |
-
)
|
73 |
-
MARKER_SEPARATOR = SEMICOLON
|
74 |
-
MARKER = MARKER_SEPARATOR + MARKER_EXPR
|
75 |
-
|
76 |
-
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
|
77 |
-
URL_AND_MARKER = URL + Optional(MARKER)
|
78 |
-
|
79 |
-
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
|
80 |
-
|
81 |
-
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
|
82 |
-
# pyparsing isn't thread safe during initialization, so we do it eagerly, see
|
83 |
-
# issue #104
|
84 |
-
REQUIREMENT.parseString("x[]")
|
85 |
-
|
86 |
-
|
87 |
-
class Requirement:
|
88 |
-
"""Parse a requirement.
|
89 |
-
|
90 |
-
Parse a given requirement string into its parts, such as name, specifier,
|
91 |
-
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
|
92 |
-
string.
|
93 |
-
"""
|
94 |
-
|
95 |
-
# TODO: Can we test whether something is contained within a requirement?
|
96 |
-
# If so how do we do that? Do we need to test against the _name_ of
|
97 |
-
# the thing as well as the version? What about the markers?
|
98 |
-
# TODO: Can we normalize the name and extra name?
|
99 |
-
|
100 |
-
def __init__(self, requirement_string: str) -> None:
|
101 |
-
try:
|
102 |
-
req = REQUIREMENT.parseString(requirement_string)
|
103 |
-
except ParseException as e:
|
104 |
-
raise InvalidRequirement(
|
105 |
-
f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
|
106 |
-
)
|
107 |
-
|
108 |
-
self.name: str = req.name
|
109 |
-
if req.url:
|
110 |
-
parsed_url = urllib.parse.urlparse(req.url)
|
111 |
-
if parsed_url.scheme == "file":
|
112 |
-
if urllib.parse.urlunparse(parsed_url) != req.url:
|
113 |
-
raise InvalidRequirement("Invalid URL given")
|
114 |
-
elif not (parsed_url.scheme and parsed_url.netloc) or (
|
115 |
-
not parsed_url.scheme and not parsed_url.netloc
|
116 |
-
):
|
117 |
-
raise InvalidRequirement(f"Invalid URL: {req.url}")
|
118 |
-
self.url: TOptional[str] = req.url
|
119 |
-
else:
|
120 |
-
self.url = None
|
121 |
-
self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
|
122 |
-
self.specifier: SpecifierSet = SpecifierSet(req.specifier)
|
123 |
-
self.marker: TOptional[Marker] = req.marker if req.marker else None
|
124 |
-
|
125 |
-
def __str__(self) -> str:
|
126 |
-
parts: List[str] = [self.name]
|
127 |
-
|
128 |
-
if self.extras:
|
129 |
-
formatted_extras = ",".join(sorted(self.extras))
|
130 |
-
parts.append(f"[{formatted_extras}]")
|
131 |
-
|
132 |
-
if self.specifier:
|
133 |
-
parts.append(str(self.specifier))
|
134 |
-
|
135 |
-
if self.url:
|
136 |
-
parts.append(f"@ {self.url}")
|
137 |
-
if self.marker:
|
138 |
-
parts.append(" ")
|
139 |
-
|
140 |
-
if self.marker:
|
141 |
-
parts.append(f"; {self.marker}")
|
142 |
-
|
143 |
-
return "".join(parts)
|
144 |
-
|
145 |
-
def __repr__(self) -> str:
|
146 |
-
return f"<Requirement('{self}')>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_timer.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Timer context manager, only used in debug.
|
3 |
-
|
4 |
-
"""
|
5 |
-
|
6 |
-
from time import time
|
7 |
-
|
8 |
-
import contextlib
|
9 |
-
from typing import Generator
|
10 |
-
|
11 |
-
|
12 |
-
@contextlib.contextmanager
|
13 |
-
def timer(subject: str = "time") -> Generator[None, None, None]:
|
14 |
-
"""print the elapsed time. (only used in debugging)"""
|
15 |
-
start = time()
|
16 |
-
yield
|
17 |
-
elapsed = time() - start
|
18 |
-
elapsed_ms = elapsed * 1000
|
19 |
-
print(f"{subject} elapsed {elapsed_ms:.1f}ms")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/bot/bot.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Auto-replay chat robot abstract class
|
3 |
-
"""
|
4 |
-
|
5 |
-
|
6 |
-
class Bot(object):
|
7 |
-
def reply(self, query, context=None):
|
8 |
-
"""
|
9 |
-
bot auto-reply content
|
10 |
-
:param req: received message
|
11 |
-
:return: reply content
|
12 |
-
"""
|
13 |
-
raise NotImplementedError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/i18n/scan_i18n.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import ast
|
2 |
-
import glob
|
3 |
-
import json
|
4 |
-
from collections import OrderedDict
|
5 |
-
|
6 |
-
|
7 |
-
def extract_i18n_strings(node):
|
8 |
-
i18n_strings = []
|
9 |
-
|
10 |
-
if (
|
11 |
-
isinstance(node, ast.Call)
|
12 |
-
and isinstance(node.func, ast.Name)
|
13 |
-
and node.func.id == "i18n"
|
14 |
-
):
|
15 |
-
for arg in node.args:
|
16 |
-
if isinstance(arg, ast.Str):
|
17 |
-
i18n_strings.append(arg.s)
|
18 |
-
|
19 |
-
for child_node in ast.iter_child_nodes(node):
|
20 |
-
i18n_strings.extend(extract_i18n_strings(child_node))
|
21 |
-
|
22 |
-
return i18n_strings
|
23 |
-
|
24 |
-
|
25 |
-
# scan the directory for all .py files (recursively)
|
26 |
-
# for each file, parse the code into an AST
|
27 |
-
# for each AST, extract the i18n strings
|
28 |
-
|
29 |
-
strings = []
|
30 |
-
for filename in glob.iglob("**/*.py", recursive=True):
|
31 |
-
with open(filename, "r") as f:
|
32 |
-
code = f.read()
|
33 |
-
if "I18nAuto" in code:
|
34 |
-
tree = ast.parse(code)
|
35 |
-
i18n_strings = extract_i18n_strings(tree)
|
36 |
-
print(filename, len(i18n_strings))
|
37 |
-
strings.extend(i18n_strings)
|
38 |
-
code_keys = set(strings)
|
39 |
-
"""
|
40 |
-
n_i18n.py
|
41 |
-
gui_v1.py 26
|
42 |
-
app.py 16
|
43 |
-
infer-web.py 147
|
44 |
-
scan_i18n.py 0
|
45 |
-
i18n.py 0
|
46 |
-
lib/train/process_ckpt.py 1
|
47 |
-
"""
|
48 |
-
print()
|
49 |
-
print("Total unique:", len(code_keys))
|
50 |
-
|
51 |
-
|
52 |
-
standard_file = "i18n/locale/zh_CN.json"
|
53 |
-
with open(standard_file, "r", encoding="utf-8") as f:
|
54 |
-
standard_data = json.load(f, object_pairs_hook=OrderedDict)
|
55 |
-
standard_keys = set(standard_data.keys())
|
56 |
-
|
57 |
-
# Define the standard file name
|
58 |
-
unused_keys = standard_keys - code_keys
|
59 |
-
print("Unused keys:", len(unused_keys))
|
60 |
-
for unused_key in unused_keys:
|
61 |
-
print("\t", unused_key)
|
62 |
-
|
63 |
-
missing_keys = code_keys - standard_keys
|
64 |
-
print("Missing keys:", len(missing_keys))
|
65 |
-
for missing_key in missing_keys:
|
66 |
-
print("\t", missing_key)
|
67 |
-
|
68 |
-
code_keys_dict = OrderedDict()
|
69 |
-
for s in strings:
|
70 |
-
code_keys_dict[s] = s
|
71 |
-
|
72 |
-
# write back
|
73 |
-
with open(standard_file, "w", encoding="utf-8") as f:
|
74 |
-
json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True)
|
75 |
-
f.write("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Amp Letras De Fuera De Mi Vientre Por Prospa Ochimana.md
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar & Letras de Fuera de mi vientre por Prospa Ochimana</h1>
|
3 |
-
<p>¿Estás buscando una poderosa e inspiradora canción gospel que despierte tu espíritu y te llene de alegría? Si es así, entonces deberías escuchar Out of My Belly de Prospa Ochimana. Esta canción es una obra maestra que bendecirá tu vida y te acercará a Dios.</p>
|
4 |
-
<p>En este artículo, te diremos todo lo que necesitas saber sobre esta canción, incluyendo cómo descargarla, cuáles son las letras, y cuál es el significado detrás de ellas. ¡Vamos a empezar! </p>
|
5 |
-
<h2>descargar amp; letras de fuera de mi vientre por prospa ochimana</h2><br /><p><b><b>Download File</b> ->>> <a href="https://bltlly.com/2v6MLe">https://bltlly.com/2v6MLe</a></b></p><br /><br />
|
6 |
-
<h2>¿Qué está fuera de mi vientre acerca de? </h2>
|
7 |
-
<p>Fuera de mi vientre es una canción que expresa el deseo de liberar el río de agua viva que fluye desde dentro de cada creyente. La canción está basada en las palabras de Jesús en Juan 7:38, donde dijo, "El que cree en Mí, como la Escritura ha dicho, de su corazón fluirán ríos de agua viva." </p>
|
8 |
-
<p>La canción declara que cada vez que este río fluye, la vida se libera. Cada cosa muerta vuelve a la vida a medida que hacen contacto con este río. Es un río que da vida que sana, entrega, restaura y transforma. La canción también invita a todos los que tienen sed a venir a Jesús y beber de este río. </p>
|
9 |
-
<h2>¿Quién es Prospa Ochimana? </h2>
|
10 |
-
<p>Prospa Ochimana es un cantante de gospel y compositor nigeriano que es conocido por ser un adorador. También es el CEO de Tornveil Music International, un sello discográfico gospel en Nigeria que abrió en enero de 2020. </p>
|
11 |
-
<p>Prospa Ochimana proviene de la tribu Ankpa, estado de Kogi, Nigeria, pero actualmente vive en Abuja. Nació el 6 de noviembre y se graduó de la Universidad Estatal de Nasarawa, Keffi, donde obtuvo un título en Lingüística.</p>
|
12 |
-
|
13 |
-
<h2>¿Por qué es popular Fuera de mi vientre? </h2>
|
14 |
-
<p>Out of My Belly es una de las canciones más populares de Prospa Ochimana. Fue lanzado en noviembre de 2020 y desde entonces ha ganado millones de visitas y descargas en línea. La canción también ha sido interpretada en vivo en varios eventos y conciertos por Prospa Ochimana y otros cantantes de gospel. </p>
|
15 |
-
<p>La razón por la que esta canción es popular es porque resuena con muchas personas que tienen hambre de más de Dios y Su presencia. La canción también inspira a la gente a aprovechar su potencial y propósito como vasos de la gloria de Dios. La canción también tiene una melodía pegadiza y un mensaje poderoso que eleva y anima a los oyentes. </p>
|
16 |
-
<p></p>
|
17 |
-
<h2>¿Dónde se puede descargar Fuera de mi vientre? </h2>
|
18 |
-
<p>Si quieres descargar Out of My Belly de Prospa Ochimana, tienes varias opciones para elegir. Puede descargarlo desde su sitio web oficial , o desde otras plataformas como YouTube , Spotify , Apple Music , Amazon Music , y más. </p>
|
19 |
-
<h2>¿Cuáles son los beneficios de descargar Out of My Belly? </h2>
|
20 |
-
<p>Descargar Out of My Belly de Prospa Ochimana tiene muchos beneficios para ti como oyente. Algunos de ellos son:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Puedes escuchar la canción sin conexión en cualquier momento y en cualquier lugar que quieras. </li>
|
23 |
-
<li>Puedes disfrutar del audio y video de alta calidad de la canción. </li>
|
24 |
-
<li>Puedes compartir la canción con tus amigos y familiares a través de las redes sociales u otros medios. </li>
|
25 |
-
<li>Puedes apoyar al artista y su ministerio comprando su música. </li>
|
26 |
-
<li>Puedes experimentar el poder y la presencia de Dios mientras escuchas la canción. </li>
|
27 |
-
</ul>
|
28 |
-
<h2>¿Cuáles son las letras de Fuera de mi vientre? </h2>
|
29 |
-
<p>Las letras de Fuera de mi vientre por Prospa Ochimana son las siguientes:</p>
|
30 |
-
<código>
|
31 |
-
|
32 |
-
<h2>¿Cuál es el significado de la letra? </h2>
|
33 |
-
<p>El significado de las letras de Out of My Belly de Prospa Ochimana es que cada creyente tiene una fuente de vida y poder dentro de ellos, que es el Espíritu Santo. El Espíritu Santo es el río que fluye de dentro de nosotros y nos da todo lo que necesitamos. Él es quien nos sana, nos libera, nos restaura y nos transforma. Él es también el que nos permite ser una bendición para los demás al liberar Su vida a través de nosotros. </p>
|
34 |
-
<p>La canción también nos recuerda que necesitamos venir a Jesús y beber de Él si tenemos sed de más de Él. Él es la fuente de agua viva que satisface nuestros anhelos más profundos. Él es también el que nos invita a creer en Él y recibir Su promesa de ríos de agua viva que fluye de nuestros corazones. </p>
|
35 |
-
<h2>¿Cómo puedes cantar junto con Out of My Belly? </h2>
|
36 |
-
<p>Si quieres cantar junto con Out of My Belly de Prospa Ochimana, puedes seguir estos pasos:</p>
|
37 |
-
<ol>
|
38 |
-
<li>Descarga la canción desde cualquier plataforma que prefieras. </li>
|
39 |
-
<li>Escucha la canción y aprende la melodía y la letra. </li>
|
40 |
-
<li>Encuentra una versión de karaoke o instrumental de la canción en línea o crea la tuya propia usando una aplicación o software. </li>
|
41 |
-
<li>Practica el canto junto con el karaoke o la versión instrumental hasta que lo domines. </li>
|
42 |
-
<li>Canta junto con la canción original y disfruta! </li>
|
43 |
-
</ol>
|
44 |
-
<h2>Conclusión</h2>
|
45 |
-
<p>En conclusión, Out of My Belly de Prospa Ochimana es una maravillosa canción gospel que te inspirará a liberar el río de agua viva que fluye desde tu interior. La canción es también un testimonio de cómo Dios puede usar a cualquiera que esté dispuesto a ser Su recipiente. La canción está disponible para su descarga en varias plataformas y tiene letras increíbles que transmiten un mensaje poderoso. Esperamos que haya disfrutado de este artículo y haya aprendido algo nuevo. Si lo hizo, por favor compártalo con sus amigos y familiares. Y no te olvides de descargar y cantar junto con Out of My Belly de Prospa Ochimana! </p>
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
|
48 |
-
<h3>Q: ¿Cuándo fue liberado Out of My Belly? </h3>
|
49 |
-
<p>A: Out of My Belly fue lanzado el 27 de noviembre de 2020. </p>
|
50 |
-
<h3>Q: ¿Quién produjo Fuera de mi vientre? </h3>
|
51 |
-
<p>A: Fuera de mi vientre fue producido por Sunny Pee.</p>
|
52 |
-
<h3>P: ¿Cuántas visitas tiene Out of My Belly en YouTube? </h3>
|
53 |
-
<p>A: A partir del 20 de junio de 2023, Out of My Belly tiene más de 20 de junio de 2023, Out of My Belly tiene más de 1.1 millones de visitas en YouTube. El video oficial de la canción fue subido por Prospa Ochimana el 27 de noviembre de 2020. El video muestra a Prospa Ochimana cantando la canción con una banda en vivo y un coro en un entorno de estudio. El video también tiene subtítulos para la letra de la canción. Puede ver el video aquí o haciendo clic en la imagen de abajo. <img src="( 1 )" alt="Fuera de mi vientre por Prospa Ochimana video de YouTube">
|
54 |
-
También hay otras versiones de la canción en YouTube, como un video lírico y una presentación en vivo . Puedes echarles un vistazo si quieres ver diferentes formas de presentar la canción. Espero que hayas disfrutado de este artículo y hayas aprendido algo nuevo. Si lo hiciste, por favor compártelo con tus amigos y familiares. ¡Y no olvides descargar y cantar junto con Out of My Belly de Prospa Ochimana! </p> 64aa2da5cf<br />
|
55 |
-
<br />
|
56 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigChungux/Pet_Survey2/app.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
### ----------------------------- ###
|
2 |
-
### libraries ###
|
3 |
-
### ----------------------------- ###
|
4 |
-
|
5 |
-
import gradio as gr
|
6 |
-
import pandas as pd
|
7 |
-
import numpy as np
|
8 |
-
from sklearn.model_selection import train_test_split
|
9 |
-
from sklearn.linear_model import LogisticRegression
|
10 |
-
from sklearn import metrics
|
11 |
-
|
12 |
-
|
13 |
-
### ------------------------------ ###
|
14 |
-
### data transformation ###
|
15 |
-
### ------------------------------ ###
|
16 |
-
|
17 |
-
# load dataset
|
18 |
-
uncleaned_data = pd.read_csv('data.csv')
|
19 |
-
|
20 |
-
# remove timestamp from dataset (always first column)
|
21 |
-
uncleaned_data = uncleaned_data.iloc[: , 1:]
|
22 |
-
data = pd.DataFrame()
|
23 |
-
|
24 |
-
# keep track of which columns are categorical and what
|
25 |
-
# those columns' value mappings are
|
26 |
-
# structure: {colname1: {...}, colname2: {...} }
|
27 |
-
cat_value_dicts = {}
|
28 |
-
final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1]
|
29 |
-
|
30 |
-
# for each column...
|
31 |
-
for (colname, colval) in uncleaned_data.iteritems():
|
32 |
-
|
33 |
-
# check if col is already a number; if so, add col directly
|
34 |
-
# to new dataframe and skip to next column
|
35 |
-
if isinstance(colval.values[0], (np.integer, float)):
|
36 |
-
data[colname] = uncleaned_data[colname].copy()
|
37 |
-
continue
|
38 |
-
|
39 |
-
# structure: {0: "lilac", 1: "blue", ...}
|
40 |
-
new_dict = {}
|
41 |
-
val = 0 # first index per column
|
42 |
-
transformed_col_vals = [] # new numeric datapoints
|
43 |
-
|
44 |
-
# if not, for each item in that column...
|
45 |
-
for (row, item) in enumerate(colval.values):
|
46 |
-
|
47 |
-
# if item is not in this col's dict...
|
48 |
-
if item not in new_dict:
|
49 |
-
new_dict[item] = val
|
50 |
-
val += 1
|
51 |
-
|
52 |
-
# then add numerical value to transformed dataframe
|
53 |
-
transformed_col_vals.append(new_dict[item])
|
54 |
-
|
55 |
-
# reverse dictionary only for final col (0, 1) => (vals)
|
56 |
-
if colname == final_colname:
|
57 |
-
new_dict = {value : key for (key, value) in new_dict.items()}
|
58 |
-
|
59 |
-
cat_value_dicts[colname] = new_dict
|
60 |
-
data[colname] = transformed_col_vals
|
61 |
-
|
62 |
-
|
63 |
-
### -------------------------------- ###
|
64 |
-
### model training ###
|
65 |
-
### -------------------------------- ###
|
66 |
-
|
67 |
-
# select features and predicton; automatically selects last column as prediction
|
68 |
-
cols = len(data.columns)
|
69 |
-
num_features = cols - 1
|
70 |
-
x = data.iloc[: , :num_features]
|
71 |
-
y = data.iloc[: , num_features:]
|
72 |
-
|
73 |
-
# split data into training and testing sets
|
74 |
-
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
|
75 |
-
|
76 |
-
# instantiate the model (using default parameters)
|
77 |
-
model = LogisticRegression()
|
78 |
-
model.fit(x_train, y_train.values.ravel())
|
79 |
-
y_pred = model.predict(x_test)
|
80 |
-
|
81 |
-
|
82 |
-
### -------------------------------- ###
|
83 |
-
### article generation ###
|
84 |
-
### -------------------------------- ###
|
85 |
-
# borrow file reading function from reader.py
|
86 |
-
|
87 |
-
def get_feat():
|
88 |
-
feats = [abs(x) for x in model.coef_[0]]
|
89 |
-
max_val = max(feats)
|
90 |
-
idx = feats.index(max_val)
|
91 |
-
return data.columns[idx]
|
92 |
-
|
93 |
-
acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%"
|
94 |
-
most_imp_feat = get_feat()
|
95 |
-
# info = get_article(acc, most_imp_feat)
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
### ------------------------------- ###
|
100 |
-
### interface creation ###
|
101 |
-
### ------------------------------- ###
|
102 |
-
|
103 |
-
|
104 |
-
# predictor for generic number of features
|
105 |
-
def general_predictor(*args):
|
106 |
-
features = []
|
107 |
-
|
108 |
-
# transform categorical input
|
109 |
-
for colname, arg in zip(data.columns, args):
|
110 |
-
if (colname in cat_value_dicts):
|
111 |
-
features.append(cat_value_dicts[colname][arg])
|
112 |
-
else:
|
113 |
-
features.append(arg)
|
114 |
-
|
115 |
-
# predict single datapoint
|
116 |
-
new_input = [features]
|
117 |
-
result = model.predict(new_input)
|
118 |
-
return cat_value_dicts[final_colname][result[0]]
|
119 |
-
|
120 |
-
# add data labels to replace those lost via star-args
|
121 |
-
|
122 |
-
|
123 |
-
block = gr.Blocks()
|
124 |
-
|
125 |
-
with open('info.md') as f:
|
126 |
-
with block:
|
127 |
-
gr.Markdown(f.readline())
|
128 |
-
gr.Markdown('Take the quiz to get a personalized recommendation using AI.')
|
129 |
-
|
130 |
-
with gr.Row():
|
131 |
-
with gr.Box():
|
132 |
-
inputls = []
|
133 |
-
for colname in data.columns:
|
134 |
-
# skip last column
|
135 |
-
if colname == final_colname:
|
136 |
-
continue
|
137 |
-
|
138 |
-
# access categories dict if data is categorical
|
139 |
-
# otherwise, just use a number input
|
140 |
-
if colname in cat_value_dicts:
|
141 |
-
radio_options = list(cat_value_dicts[colname].keys())
|
142 |
-
inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname))
|
143 |
-
else:
|
144 |
-
# add numerical input
|
145 |
-
inputls.append(gr.inputs.Number(label=colname))
|
146 |
-
gr.Markdown("<br />")
|
147 |
-
|
148 |
-
submit = gr.Button("Click to see your personalized result!", variant="primary")
|
149 |
-
gr.Markdown("<br />")
|
150 |
-
output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here")
|
151 |
-
|
152 |
-
submit.click(fn=general_predictor, inputs=inputls, outputs=output)
|
153 |
-
gr.Markdown("<br />")
|
154 |
-
|
155 |
-
with gr.Row():
|
156 |
-
with gr.Box():
|
157 |
-
gr.Markdown(f"<h3>Accuracy: </h3>{acc}")
|
158 |
-
with gr.Box():
|
159 |
-
gr.Markdown(f"<h3>Most important feature: </h3>{most_imp_feat}")
|
160 |
-
|
161 |
-
gr.Markdown("<br />")
|
162 |
-
|
163 |
-
with gr.Box():
|
164 |
-
gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for <em>that dataset</em>. Model accuracy and most important feature can be helpful for understanding how the model works, but <em>should not be considered absolute facts about the real world</em>.''')
|
165 |
-
|
166 |
-
with gr.Box():
|
167 |
-
with open('info.md') as f:
|
168 |
-
f.readline()
|
169 |
-
gr.Markdown(f.read())
|
170 |
-
|
171 |
-
# show the interface
|
172 |
-
block.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/realesrgan/utils.py
DELETED
@@ -1,280 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import queue
|
6 |
-
import threading
|
7 |
-
import torch
|
8 |
-
from basicsr.utils.download_util import load_file_from_url
|
9 |
-
from torch.nn import functional as F
|
10 |
-
|
11 |
-
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
12 |
-
|
13 |
-
|
14 |
-
class RealESRGANer():
|
15 |
-
"""A helper class for upsampling images with RealESRGAN.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
|
19 |
-
model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
|
20 |
-
model (nn.Module): The defined network. Default: None.
|
21 |
-
tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
|
22 |
-
input images into tiles, and then process each of them. Finally, they will be merged into one image.
|
23 |
-
0 denotes for do not use tile. Default: 0.
|
24 |
-
tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
|
25 |
-
pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
|
26 |
-
half (float): Whether to use half precision during inference. Default: False.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self, scale, model_path, model=None, tile=0, tile_pad=10, pre_pad=10, half=False):
|
30 |
-
self.scale = scale
|
31 |
-
self.tile_size = tile
|
32 |
-
self.tile_pad = tile_pad
|
33 |
-
self.pre_pad = pre_pad
|
34 |
-
self.mod_scale = None
|
35 |
-
self.half = half
|
36 |
-
|
37 |
-
# initialize model
|
38 |
-
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
39 |
-
# if the model_path starts with https, it will first download models to the folder: realesrgan/weights
|
40 |
-
if model_path.startswith('https://'):
|
41 |
-
model_path = load_file_from_url(
|
42 |
-
url=model_path, model_dir=os.path.join(ROOT_DIR, 'realesrgan/weights'), progress=True, file_name=None)
|
43 |
-
loadnet = torch.load(model_path, map_location=torch.device('cpu'))
|
44 |
-
# prefer to use params_ema
|
45 |
-
if 'params_ema' in loadnet:
|
46 |
-
keyname = 'params_ema'
|
47 |
-
else:
|
48 |
-
keyname = 'params'
|
49 |
-
model.load_state_dict(loadnet[keyname], strict=True)
|
50 |
-
model.eval()
|
51 |
-
self.model = model.to(self.device)
|
52 |
-
if self.half:
|
53 |
-
self.model = self.model.half()
|
54 |
-
|
55 |
-
def pre_process(self, img):
|
56 |
-
"""Pre-process, such as pre-pad and mod pad, so that the images can be divisible
|
57 |
-
"""
|
58 |
-
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
|
59 |
-
self.img = img.unsqueeze(0).to(self.device)
|
60 |
-
if self.half:
|
61 |
-
self.img = self.img.half()
|
62 |
-
|
63 |
-
# pre_pad
|
64 |
-
if self.pre_pad != 0:
|
65 |
-
self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
|
66 |
-
# mod pad for divisible borders
|
67 |
-
if self.scale == 2:
|
68 |
-
self.mod_scale = 2
|
69 |
-
elif self.scale == 1:
|
70 |
-
self.mod_scale = 4
|
71 |
-
if self.mod_scale is not None:
|
72 |
-
self.mod_pad_h, self.mod_pad_w = 0, 0
|
73 |
-
_, _, h, w = self.img.size()
|
74 |
-
if (h % self.mod_scale != 0):
|
75 |
-
self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
|
76 |
-
if (w % self.mod_scale != 0):
|
77 |
-
self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
|
78 |
-
self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
|
79 |
-
|
80 |
-
def process(self):
|
81 |
-
# model inference
|
82 |
-
self.output = self.model(self.img)
|
83 |
-
|
84 |
-
def tile_process(self):
|
85 |
-
"""It will first crop input images to tiles, and then process each tile.
|
86 |
-
Finally, all the processed tiles are merged into one images.
|
87 |
-
|
88 |
-
Modified from: https://github.com/ata4/esrgan-launcher
|
89 |
-
"""
|
90 |
-
batch, channel, height, width = self.img.shape
|
91 |
-
output_height = height * self.scale
|
92 |
-
output_width = width * self.scale
|
93 |
-
output_shape = (batch, channel, output_height, output_width)
|
94 |
-
|
95 |
-
# start with black image
|
96 |
-
self.output = self.img.new_zeros(output_shape)
|
97 |
-
tiles_x = math.ceil(width / self.tile_size)
|
98 |
-
tiles_y = math.ceil(height / self.tile_size)
|
99 |
-
|
100 |
-
# loop over all tiles
|
101 |
-
for y in range(tiles_y):
|
102 |
-
for x in range(tiles_x):
|
103 |
-
# extract tile from input image
|
104 |
-
ofs_x = x * self.tile_size
|
105 |
-
ofs_y = y * self.tile_size
|
106 |
-
# input tile area on total image
|
107 |
-
input_start_x = ofs_x
|
108 |
-
input_end_x = min(ofs_x + self.tile_size, width)
|
109 |
-
input_start_y = ofs_y
|
110 |
-
input_end_y = min(ofs_y + self.tile_size, height)
|
111 |
-
|
112 |
-
# input tile area on total image with padding
|
113 |
-
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
|
114 |
-
input_end_x_pad = min(input_end_x + self.tile_pad, width)
|
115 |
-
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
|
116 |
-
input_end_y_pad = min(input_end_y + self.tile_pad, height)
|
117 |
-
|
118 |
-
# input tile dimensions
|
119 |
-
input_tile_width = input_end_x - input_start_x
|
120 |
-
input_tile_height = input_end_y - input_start_y
|
121 |
-
tile_idx = y * tiles_x + x + 1
|
122 |
-
input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
|
123 |
-
|
124 |
-
# upscale tile
|
125 |
-
try:
|
126 |
-
with torch.no_grad():
|
127 |
-
output_tile = self.model(input_tile)
|
128 |
-
except RuntimeError as error:
|
129 |
-
print('Error', error)
|
130 |
-
print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
|
131 |
-
|
132 |
-
# output tile area on total image
|
133 |
-
output_start_x = input_start_x * self.scale
|
134 |
-
output_end_x = input_end_x * self.scale
|
135 |
-
output_start_y = input_start_y * self.scale
|
136 |
-
output_end_y = input_end_y * self.scale
|
137 |
-
|
138 |
-
# output tile area without padding
|
139 |
-
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
|
140 |
-
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
|
141 |
-
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
|
142 |
-
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
|
143 |
-
|
144 |
-
# put tile into output image
|
145 |
-
self.output[:, :, output_start_y:output_end_y,
|
146 |
-
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
|
147 |
-
output_start_x_tile:output_end_x_tile]
|
148 |
-
|
149 |
-
def post_process(self):
|
150 |
-
# remove extra pad
|
151 |
-
if self.mod_scale is not None:
|
152 |
-
_, _, h, w = self.output.size()
|
153 |
-
self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
|
154 |
-
# remove prepad
|
155 |
-
if self.pre_pad != 0:
|
156 |
-
_, _, h, w = self.output.size()
|
157 |
-
self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
|
158 |
-
return self.output
|
159 |
-
|
160 |
-
@torch.no_grad()
|
161 |
-
def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'):
|
162 |
-
h_input, w_input = img.shape[0:2]
|
163 |
-
# img: numpy
|
164 |
-
img = img.astype(np.float32)
|
165 |
-
if np.max(img) > 256: # 16-bit image
|
166 |
-
max_range = 65535
|
167 |
-
print('\tInput is a 16-bit image')
|
168 |
-
else:
|
169 |
-
max_range = 255
|
170 |
-
img = img / max_range
|
171 |
-
if len(img.shape) == 2: # gray image
|
172 |
-
img_mode = 'L'
|
173 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
174 |
-
elif img.shape[2] == 4: # RGBA image with alpha channel
|
175 |
-
img_mode = 'RGBA'
|
176 |
-
alpha = img[:, :, 3]
|
177 |
-
img = img[:, :, 0:3]
|
178 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
179 |
-
if alpha_upsampler == 'realesrgan':
|
180 |
-
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
|
181 |
-
else:
|
182 |
-
img_mode = 'RGB'
|
183 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
184 |
-
|
185 |
-
# ------------------- process image (without the alpha channel) ------------------- #
|
186 |
-
self.pre_process(img)
|
187 |
-
if self.tile_size > 0:
|
188 |
-
self.tile_process()
|
189 |
-
else:
|
190 |
-
self.process()
|
191 |
-
output_img = self.post_process()
|
192 |
-
output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
193 |
-
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
|
194 |
-
if img_mode == 'L':
|
195 |
-
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
|
196 |
-
|
197 |
-
# ------------------- process the alpha channel if necessary ------------------- #
|
198 |
-
if img_mode == 'RGBA':
|
199 |
-
if alpha_upsampler == 'realesrgan':
|
200 |
-
self.pre_process(alpha)
|
201 |
-
if self.tile_size > 0:
|
202 |
-
self.tile_process()
|
203 |
-
else:
|
204 |
-
self.process()
|
205 |
-
output_alpha = self.post_process()
|
206 |
-
output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
207 |
-
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
|
208 |
-
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
|
209 |
-
else: # use the cv2 resize for alpha channel
|
210 |
-
h, w = alpha.shape[0:2]
|
211 |
-
output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
|
212 |
-
|
213 |
-
# merge the alpha channel
|
214 |
-
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
|
215 |
-
output_img[:, :, 3] = output_alpha
|
216 |
-
|
217 |
-
# ------------------------------ return ------------------------------ #
|
218 |
-
if max_range == 65535: # 16-bit image
|
219 |
-
output = (output_img * 65535.0).round().astype(np.uint16)
|
220 |
-
else:
|
221 |
-
output = (output_img * 255.0).round().astype(np.uint8)
|
222 |
-
|
223 |
-
if outscale is not None and outscale != float(self.scale):
|
224 |
-
output = cv2.resize(
|
225 |
-
output, (
|
226 |
-
int(w_input * outscale),
|
227 |
-
int(h_input * outscale),
|
228 |
-
), interpolation=cv2.INTER_LANCZOS4)
|
229 |
-
|
230 |
-
return output, img_mode
|
231 |
-
|
232 |
-
|
233 |
-
class PrefetchReader(threading.Thread):
|
234 |
-
"""Prefetch images.
|
235 |
-
|
236 |
-
Args:
|
237 |
-
img_list (list[str]): A image list of image paths to be read.
|
238 |
-
num_prefetch_queue (int): Number of prefetch queue.
|
239 |
-
"""
|
240 |
-
|
241 |
-
def __init__(self, img_list, num_prefetch_queue):
|
242 |
-
super().__init__()
|
243 |
-
self.que = queue.Queue(num_prefetch_queue)
|
244 |
-
self.img_list = img_list
|
245 |
-
|
246 |
-
def run(self):
|
247 |
-
for img_path in self.img_list:
|
248 |
-
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
|
249 |
-
self.que.put(img)
|
250 |
-
|
251 |
-
self.que.put(None)
|
252 |
-
|
253 |
-
def __next__(self):
|
254 |
-
next_item = self.que.get()
|
255 |
-
if next_item is None:
|
256 |
-
raise StopIteration
|
257 |
-
return next_item
|
258 |
-
|
259 |
-
def __iter__(self):
|
260 |
-
return self
|
261 |
-
|
262 |
-
|
263 |
-
class IOConsumer(threading.Thread):
|
264 |
-
|
265 |
-
def __init__(self, opt, que, qid):
|
266 |
-
super().__init__()
|
267 |
-
self._queue = que
|
268 |
-
self.qid = qid
|
269 |
-
self.opt = opt
|
270 |
-
|
271 |
-
def run(self):
|
272 |
-
while True:
|
273 |
-
msg = self._queue.get()
|
274 |
-
if isinstance(msg, str) and msg == 'quit':
|
275 |
-
break
|
276 |
-
|
277 |
-
output = msg['output']
|
278 |
-
save_path = msg['save_path']
|
279 |
-
cv2.imwrite(save_path, output)
|
280 |
-
print(f'IO worker {self.qid} is done.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene_model-checkpoint.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from transformers import PreTrainedModel
|
2 |
-
from .config import MonoSceneConfig
|
3 |
-
from monoscene.monoscene import MonoScene
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
class MonoSceneModel(PreTrainedModel):
|
8 |
-
config_class = ResnetConfig
|
9 |
-
|
10 |
-
def __init__(self, config):
|
11 |
-
super().__init__(config)
|
12 |
-
self.model = MonoScene(
|
13 |
-
dataset=config.dataset,
|
14 |
-
n_classes=config.n_classes,
|
15 |
-
feature=config.feature,
|
16 |
-
project_scale=config.project_scale,
|
17 |
-
full_scene_size=config.full_scene_size
|
18 |
-
)
|
19 |
-
|
20 |
-
|
21 |
-
def forward(self, tensor):
|
22 |
-
return self.model.forward(tensor)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chris4K/llms_compare/app.py
DELETED
@@ -1,274 +0,0 @@
|
|
1 |
-
import os, requests
|
2 |
-
import gradio as gr
|
3 |
-
HF_READ_API_KEY = os.environ["HF_READ_API_KEY"]
|
4 |
-
|
5 |
-
### This code loads the models and undertakes inference locally ###
|
6 |
-
|
7 |
-
# from transformers import GPTNeoForCausalLM, GPT2Tokenizer
|
8 |
-
# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
9 |
-
# model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
10 |
-
# tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
11 |
-
# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
|
12 |
-
# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small")
|
13 |
-
|
14 |
-
model_list = ['google/flan-t5-small', 'google/flan-t5-base', 'google/flan-t5-large', 'google/flan-t5-xl', 'google/flan-t5-xxl',
|
15 |
-
'gpt2-medium', 'gpt2-large', 'gpt2-xl',
|
16 |
-
'EleutherAI/gpt-neo-1.3B', 'EleutherAI/gpt-neo-2.7B', 'EleutherAI/gpt-neo-6b', 'EleutherAI/gpt-neox-20b',
|
17 |
-
'bigscience/bloom-1b7', 'bigscience/bloom-3b', 'bigscience/bloom-7b1'
|
18 |
-
]
|
19 |
-
|
20 |
-
def load_model(model_name):
|
21 |
-
if model_name == 'EleutherAI/gpt-neo-2.7B' or model_name == 'gpt2-medium' or model_name == 'gpt2-large':
|
22 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
23 |
-
else:
|
24 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
-
tokenizer.pad_token = tokenizer.eos_token
|
27 |
-
# tokenizer.padding_side = "left"
|
28 |
-
return model, tokenizer
|
29 |
-
|
30 |
-
def maybe_is_truncated(s):
|
31 |
-
punct = [".", "!", "?", '"']
|
32 |
-
if s[-1] in punct:
|
33 |
-
return False
|
34 |
-
return True
|
35 |
-
|
36 |
-
def load_and_generate(model_name, prompt):
|
37 |
-
model, tokenizer = load_model(model_name)
|
38 |
-
|
39 |
-
temperature=0.25
|
40 |
-
tokens = tokenizer(prompt, return_tensors="pt")
|
41 |
-
max_length = len(tokens.input_ids[0])+5
|
42 |
-
input_ids = tokens.input_ids
|
43 |
-
attention_mask = tokens.attention_mask
|
44 |
-
# see huggingface.co/docs/transformers/main_classes/text_generation
|
45 |
-
gen_tokens = model.generate(
|
46 |
-
input_ids=input_ids,
|
47 |
-
attention_mask=attention_mask,
|
48 |
-
pad_token_id=tokenizer.eos_token_id,
|
49 |
-
do_sample=True,
|
50 |
-
temperature=temperature,
|
51 |
-
# max_length=max_length,
|
52 |
-
max_new_tokens=max_length,
|
53 |
-
# use_cache=False,
|
54 |
-
# penalty_alpha=0.1,
|
55 |
-
# top_k=100,
|
56 |
-
# early_stopping=False
|
57 |
-
)
|
58 |
-
gen_text = tokenizer.batch_decode(gen_tokens)[0]
|
59 |
-
|
60 |
-
max_times = 20
|
61 |
-
while maybe_is_truncated(gen_text) and max_times > 0:
|
62 |
-
tokens = tokenizer(gen_text, return_tensors="pt")
|
63 |
-
max_length = len(tokens.input_ids[0])+5
|
64 |
-
input_ids = tokens.input_ids
|
65 |
-
attention_mask = tokens.attention_mask
|
66 |
-
|
67 |
-
gen_tokens = model.generate(
|
68 |
-
input_ids=input_ids,
|
69 |
-
attention_mask=attention_mask,
|
70 |
-
pad_token_id=tokenizer.eos_token_id,
|
71 |
-
do_sample=True,
|
72 |
-
temperature=temperature,
|
73 |
-
max_length=max_length,
|
74 |
-
# max_new_tokens=100,
|
75 |
-
# use_cache=True,
|
76 |
-
# penalty_alpha=0.1,
|
77 |
-
# top_k=100,
|
78 |
-
# early_stopping=False
|
79 |
-
)
|
80 |
-
|
81 |
-
gen_text = tokenizer.batch_decode(gen_tokens)[0]
|
82 |
-
|
83 |
-
max_times -= 1
|
84 |
-
|
85 |
-
return gen_text.replace("<pad>", "").replace("</s>", "")
|
86 |
-
|
87 |
-
### This code for the inference api ###
|
88 |
-
|
89 |
-
def generate_from_api(query, model_name, temperature, max_tokens):
|
90 |
-
headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}",
|
91 |
-
"wait_for_model": "true",
|
92 |
-
"temperature": str(temperature),
|
93 |
-
"max_tokens": str(max_tokens),
|
94 |
-
"max_time": str(120)}
|
95 |
-
|
96 |
-
model_api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
97 |
-
|
98 |
-
payload = {"inputs": query}
|
99 |
-
response = requests.post(model_api_url, headers=headers, json=payload)
|
100 |
-
while response.status_code != 200:
|
101 |
-
response = requests.post(model_api_url, headers=headers, json=payload)
|
102 |
-
return response.json()[0]['generated_text']
|
103 |
-
|
104 |
-
def generate_from_api_check(query, model_name, temperature, max_tokens):
|
105 |
-
headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}",
|
106 |
-
"wait_for_model": "true",
|
107 |
-
"temperature": str(temperature),
|
108 |
-
"max_tokens": str(max_tokens),
|
109 |
-
"max_time": str(120)}
|
110 |
-
|
111 |
-
model_api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
112 |
-
|
113 |
-
payload = {"inputs": query}
|
114 |
-
response = requests.post(model_api_url, headers=headers, json=payload)
|
115 |
-
while response.status_code != 200:
|
116 |
-
response = requests.post(model_api_url, headers=headers, json=payload)
|
117 |
-
|
118 |
-
max_times = 20
|
119 |
-
gen_text = response.json()[0]['generated_text']
|
120 |
-
while maybe_is_truncated(gen_text) and max_times > 0:
|
121 |
-
headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}",
|
122 |
-
"wait_for_model": "true",
|
123 |
-
"temperature": str(temperature),
|
124 |
-
"max_tokens": str(max_tokens + len(gen_text)),
|
125 |
-
"max_time": str(120)}
|
126 |
-
payload = {"inputs": query + ' ' + gen_text}
|
127 |
-
response = requests.post(model_api_url, headers=headers, json=payload)
|
128 |
-
while response.status_code != 200:
|
129 |
-
response = requests.post(model_api_url, headers=headers, json=payload)
|
130 |
-
gen_text = response.json()[0]['generated_text']
|
131 |
-
max_times -= 1
|
132 |
-
|
133 |
-
return gen_text
|
134 |
-
|
135 |
-
|
136 |
-
with gr.Blocks(css='style.css') as demo:
|
137 |
-
gr.HTML("""
|
138 |
-
<div style="text-align: center; max-width: 1240px; margin: 0 auto;">
|
139 |
-
<h1 style="font-weight: 200; font-size: 20px; margin-bottom:8px; margin-top:0px;">
|
140 |
-
Different Strokes (Prompts) for Different Folks (LLMs)
|
141 |
-
</h1>
|
142 |
-
<hr style="margin-bottom:5px; margin-top:5px;">
|
143 |
-
<h4 style="font-weight: 50; font-size: 14px; margin-bottom:0px; margin-top:0px;">
|
144 |
-
After reading <a href="https://github.com/dair-ai/Prompt-Engineering-Guide">Prompt Engineering Guide</a>, which is a good guide when starting to learn about prompts for large language models (LLMs), specifically OpenAI's LLMs, I was interested in seeing the results with for other LLMs. Hence, did up a simple demonstration of different prompts for different popular LLMs of different sizes. The prompt examples are taken from the Prompt Engineering Guide, and the LLMs that you can select below are all available on Hugging Face. If you are interested in comparing them with the prompts from OpenAI's model, you can refer to the writeup in the <a href="https://github.com/dair-ai/Prompt-Engineering-Guide">Prompt Engineering Guide</a> itself.
|
145 |
-
</h4>
|
146 |
-
<hr style="margin-bottom:5px; margin-top:5px;">
|
147 |
-
<h5 style="font-weight: 50; font-size: 12px; margin-bottom:0px; margin-top:0px;">
|
148 |
-
Note: Larger models will take a while, especially on the first run.
|
149 |
-
</h5>
|
150 |
-
</div>
|
151 |
-
""")
|
152 |
-
|
153 |
-
with gr.Column(elem_id="col-container"):
|
154 |
-
with gr.Row(variant="compact"):
|
155 |
-
|
156 |
-
model_name = gr.Dropdown(
|
157 |
-
model_list,
|
158 |
-
label="Select model",
|
159 |
-
value=model_list[0],
|
160 |
-
).style(
|
161 |
-
container=False,
|
162 |
-
)
|
163 |
-
|
164 |
-
temperature = gr.Slider(
|
165 |
-
0.1, 100.0, value=1.0, label="Temperature",
|
166 |
-
).style(
|
167 |
-
container=False,
|
168 |
-
)
|
169 |
-
|
170 |
-
max_tokens = gr.Slider(
|
171 |
-
10, 2250, step=1, value=100, label="Max. tokens (in output)",
|
172 |
-
).style(
|
173 |
-
container=False,
|
174 |
-
)
|
175 |
-
|
176 |
-
check_truncated = gr.Checkbox(
|
177 |
-
label="Check for truncated output",
|
178 |
-
value=False,
|
179 |
-
).style(
|
180 |
-
container=False,
|
181 |
-
)
|
182 |
-
|
183 |
-
with gr.Row(variant="compact"):
|
184 |
-
prompt = gr.Textbox(
|
185 |
-
label="Enter your prompt",
|
186 |
-
show_label=False,
|
187 |
-
# max_lines=2,
|
188 |
-
placeholder="Select your prompt from the examples below",
|
189 |
-
).style(
|
190 |
-
container=False,
|
191 |
-
)
|
192 |
-
process = gr.Button("Generate").style(full_width=False)
|
193 |
-
|
194 |
-
with gr.Row():
|
195 |
-
output=gr.Textbox(
|
196 |
-
label="LLM output",
|
197 |
-
show_label=True)
|
198 |
-
|
199 |
-
gr.HTML("""
|
200 |
-
<div>
|
201 |
-
<h4 style="font-weight: 50; font-size: 14px; margin-bottom:0px; margin-top:0px;">
|
202 |
-
Prompt examples. Select the prompt you would like to test, and it will appear (properly formatted) in the input box above.
|
203 |
-
</h4>
|
204 |
-
</div>
|
205 |
-
""")
|
206 |
-
with gr.Tab("Introduction"):
|
207 |
-
example_set_1 = gr.Examples(label = 'Simple Prompt vs. Instruct then Prompt.',
|
208 |
-
examples=["The sky is ", "Complete the following sentence: The sky is ",],
|
209 |
-
inputs=[prompt])
|
210 |
-
example_set_2 = gr.Examples(label = 'Few Shot Prompt.',
|
211 |
-
examples=["This is awesome! // Positive\nThis is bad! // Negative\nWow that movie was rad! // Positive\nWhat a horrible show! //",],
|
212 |
-
inputs=[prompt])
|
213 |
-
example_set_3 = gr.Examples(label = 'Explicitly Specify the Instruction',
|
214 |
-
examples=["### Instruction ###\nTranslate the text below to Spanish:\nText: 'hello!'",],
|
215 |
-
inputs=[prompt])
|
216 |
-
example_set_4 = gr.Examples(label = 'Be Very Specific',
|
217 |
-
examples=["Extract the name of places in the following text.\nDesired format:\nPlace: <comma_separated_list_of_company_names>\nInput: 'Although these developments are encouraging to researchers, much is still a mystery. “We often have a black box between the brain and the effect we see in the periphery,” says Henrique Veiga-Fernandes, a neuroimmunologist at the Champalimaud Centre for the Unknown in Lisbon. “If we want to use it in the therapeutic context, we actually need to understand the mechanism.'",],
|
218 |
-
inputs=[prompt])
|
219 |
-
example_set_5 = gr.Examples(label = 'Precision',
|
220 |
-
examples=["Explain the concept of deep learning. Keep the explanation short, only a few sentences, and don't be too descriptive.", "Use 2-3 sentences to explain the concept of deep learning to a high school student."],
|
221 |
-
inputs=[prompt])
|
222 |
-
example_set_6 = gr.Examples(label = 'Focus on What LLM Should Do',
|
223 |
-
examples=["The following is an agent that recommends movies to a customer. The agent is responsible to recommend a movie from the top global trending movies. It should refrain from asking users for their preferences and avoid asking for personal information. If the agent doesn't have a movie to recommend, it should respond 'Sorry, couldn't find a movie to recommend today.'.\nCustomer: Please recommend a movie based on my interests.\nAgent:"],
|
224 |
-
inputs=[prompt])
|
225 |
-
|
226 |
-
with gr.Tab("Basic Tasks"):
|
227 |
-
example_set_7 = gr.Examples(label = 'Explain vs. Summarize',
|
228 |
-
examples=["Explain antibiotics.\nA:", "Antibiotics are a type of medication used to treat bacterial infections. They work by either killing the bacteria or preventing them from reproducing, allowing the body’s immune system to fight off the infection. Antibiotics are usually taken orally in the form of pills, capsules, or liquid solutions, or sometimes administered intravenously. They are not effective against viral infections, and using them inappropriately can lead to antibiotic resistance.\nExplain the above in one sentence:",],
|
229 |
-
inputs=[prompt])
|
230 |
-
example_set_8 = gr.Examples(label = 'Information Extraction',
|
231 |
-
examples=["Author-contribution statements and acknowledgements in research papers should state clearly and specifically whether, and to what extent, the authors used AI technologies such as ChatGPT in the preparation of their manuscript and analysis. They should also indicate which LLMs were used. This will alert editors and reviewers to scrutinize manuscripts more carefully for potential biases, inaccuracies and improper source crediting. Likewise, scientific journals should be transparent about their use of LLMs, for example when selecting submitted manuscripts.\nMention the large language model based product mentioned in the paragraph above:",],
|
232 |
-
inputs=[prompt])
|
233 |
-
example_set_9 = gr.Examples(label = 'Question and Answer',
|
234 |
-
examples=["Answer the question based on the context below. Keep the answer short and concise. Respond 'Unsure about answer' if not sure about the answer.\nContext: Teplizumab traces its roots to a New Jersey drug company called Ortho Pharmaceutical. There, scientists generated an early version of the antibody, dubbed OKT3. Originally sourced from mice, the molecule was able to bind to the surface of T cells and limit their cell-killing potential. In 1986, it was approved to help prevent organ rejection after kidney transplants, making it the first therapeutic antibody allowed for human use.\nQuestion: What was OKT3 originally sourced from?\nAnswer:",],
|
235 |
-
inputs=[prompt])
|
236 |
-
example_set_10 = gr.Examples(label = 'Text Classification',
|
237 |
-
examples=["Classify the text into neutral, negative or positive.\nText: I think the food was okay.\nSentiment:","Classify the text into neutral, negative or positive.\nText: I think the vacation is okay.\nSentiment: neutral\nText: I think the food was okay.\nSentiment:"],
|
238 |
-
inputs=[prompt])
|
239 |
-
example_set_11 = gr.Examples(label = 'Conversation',
|
240 |
-
examples=["The following is a conversation with an AI research assistant. The assistant tone is technical and scientific.\nHuman: Hello, who are you?\nAI: Greeting! I am an AI research assistant. How can I help you today?\nHuman: Can you tell me about the creation of blackholes?\nAI:", "The following is a conversation with an AI research assistant. The assistant answers should be easy to understand even by primary school students.\nHuman: Hello, who are you?\nAI: Greeting! I am an AI research assistant. How can I help you today?\nHuman: Can you tell me about the creation of black holes?\nAI: "],
|
241 |
-
inputs=[prompt])
|
242 |
-
example_set_12 = gr.Examples(label = 'Reasoning',
|
243 |
-
examples=["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA: ", "The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nSolve by breaking the problem into steps. First, identify the odd numbers, add them, and indicate whether the result is odd or even."],
|
244 |
-
inputs=[prompt])
|
245 |
-
|
246 |
-
|
247 |
-
with gr.Tab("Interesting Techniques"):
|
248 |
-
example_set_13 = gr.Examples(label = 'Zero Shot, i.e., no examples at all',
|
249 |
-
examples=["Classify the text into neutral, negative or positive.\nText: I think the vacation is okay.\nSentiment:",],
|
250 |
-
inputs=[prompt])
|
251 |
-
example_set_14 = gr.Examples(label = 'Few Shot, i.e., only a few examples',
|
252 |
-
examples=["The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.\nA: The answer is False.\n\nThe odd numbers in this group add up to an even number: 17, 10, 19, 4, 8, 12, 24.\nA: The answer is True.\n\nThe odd numbers in this group add up to an even number: 16, 11, 14, 4, 8, 13, 24.\nA: The answer is True.\n\nThe odd numbers in this group add up to an even number: 17, 9, 10, 12, 13, 4, 2.\nA: The answer is False.\n\nThe odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA: ",],
|
253 |
-
inputs=[prompt])
|
254 |
-
example_set_15 = gr.Examples(label = 'Chain of Thought, i.e., go through a series of rational steps',
|
255 |
-
examples=["The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.\nA: Adding all the odd numbers (9, 15, 1) gives 25. The answer is False.\n\nThe odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA:",],
|
256 |
-
inputs=[prompt])
|
257 |
-
example_set_16 = gr.Examples(label = 'Zero Shot Chain of Thought, i.e., think step by step, but no examples provided',
|
258 |
-
examples=["I went to the market and bought 10 apples. I gave 2 apples to the neighbor and 2 to the repairman. I then went and bought 5 more apples and ate 1. How many apples did I remain with?\nLet's think step by step.",],
|
259 |
-
inputs=[prompt])
|
260 |
-
example_set_17 = gr.Examples(label = 'Self Consistency, i.e., give examples to encourage the model to be consistent',
|
261 |
-
examples=["Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done,there will be 21 trees. How many trees did the grove workers plant today?\nA: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted.\nSo, they must have planted 21 - 15 = 6 trees. The answer is 6.\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n\nQ: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: She bought 5 bagels for $3 each. This means she spent 5\n\nQ: When I was 6 my sister was half my age. Now I’m 70 how old is my sister?\nA:",],
|
262 |
-
inputs=[prompt])
|
263 |
-
example_set_18 = gr.Examples(label = 'Generating Knowledge, i.e., use examples to generate knowledge',
|
264 |
-
examples=["Input: Greece is larger than mexico.\nKnowledge: Greece is approximately 131,957 sq km, while Mexico is approximately 1,964,375 sq km, making Mexico 1,389% larger than Greece.\n\nInput: Glasses always fog up.\nKnowledge: Condensation occurs on eyeglass lenses when water vapor from your sweat, breath, and ambient humidity lands on a cold surface, cools, and then changes into tiny drops of liquid, forming a film that you see as fog. Your lenses will be relatively cool compared to your breath, especially when the outside air is cold.\n\nInput: A fish is capable of thinking.\nKnowledge: Fish are more intelligent than they appear. In many areas, such as memory, their cognitive powers match or exceed those of ’higher’ vertebrates including non-human primates. Fish’s long-term memories help them keep track of complex social relationships.\n\nInput: A common effect of smoking lots of cigarettes in one’s lifetime is a higher than normal chance of getting lung cancer.\nKnowledge: Those who consistently averaged less than one cigarette per day over their lifetime had nine times the risk of dying from lung cancer than never smokers. Among people who smoked between one and 10 cigarettes per day, the risk of dying from lung cancer was nearly 12 times higher than that of never smokers.\n\nInput: Part of golf is trying to get a higher point total than others.\nKnowledge:",],
|
265 |
-
inputs=[prompt])
|
266 |
-
|
267 |
-
# process.click(load_and_generate, inputs=[model_name, prompt], outputs=[output])
|
268 |
-
if check_truncated:
|
269 |
-
process.click(generate_from_api_check, inputs=[prompt, model_name, temperature, max_tokens], outputs=[output])
|
270 |
-
else:
|
271 |
-
process.click(generate_from_api, inputs=[prompt, model_name, temperature, max_tokens], outputs=[output])
|
272 |
-
|
273 |
-
# demo.launch(server_port=8080)
|
274 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/static/andrew_alpha.js
DELETED
@@ -1,208 +0,0 @@
|
|
1 |
-
// Get token from cookie
|
2 |
-
function getCookie(name) {
|
3 |
-
let cookieValue = null;
|
4 |
-
if (document.cookie && document.cookie !== '') {
|
5 |
-
const cookies = document.cookie.split(';');
|
6 |
-
for (let i = 0; i < cookies.length; i++) {
|
7 |
-
const cookie = cookies[i].trim();
|
8 |
-
// Does this cookie string begin with the name we want?
|
9 |
-
if (cookie.substring(0, name.length + 1) === (name + '=')) {
|
10 |
-
cookieValue = decodeURIComponent(cookie.substring(name.length + 1));
|
11 |
-
break;
|
12 |
-
}
|
13 |
-
}
|
14 |
-
}
|
15 |
-
return cookieValue;
|
16 |
-
}
|
17 |
-
|
18 |
-
// Get the video element
|
19 |
-
const video = document.getElementById("videoElement");
|
20 |
-
const captureButton = document.getElementById("captureButton");
|
21 |
-
const uploadButton = document.getElementById("uploadButton");
|
22 |
-
const capturedFrame = document.getElementById("capturedFrame");
|
23 |
-
const webcamFeed = document.getElementById("webcamFeed");
|
24 |
-
const processedFrame = document.getElementById("processedFrame");
|
25 |
-
// Get CSRF token from cookie
|
26 |
-
const csrftoken = getCookie('csrftoken');
|
27 |
-
// Get reference to form
|
28 |
-
const form = document.getElementById('myForm');
|
29 |
-
|
30 |
-
// Check if the browser supports getUserMedia
|
31 |
-
if (navigator.mediaDevices.getUserMedia) {
|
32 |
-
// Request access to the webcam
|
33 |
-
navigator.mediaDevices
|
34 |
-
.getUserMedia({ video: true })
|
35 |
-
.then(function (stream) {
|
36 |
-
// Set the video source to the stream from the webcam
|
37 |
-
video.srcObject = stream;
|
38 |
-
})
|
39 |
-
.catch(function (error) {
|
40 |
-
console.error("Error accessing the webcam:", error);
|
41 |
-
const message = document.createElement("p");
|
42 |
-
webcamFeed.innerHTML = "No webcam detected.";
|
43 |
-
document.body.appendChild(message);
|
44 |
-
});
|
45 |
-
} else {
|
46 |
-
console.error("getUserMedia is not supported by this browser");
|
47 |
-
}
|
48 |
-
|
49 |
-
|
50 |
-
// Variable to store latest captured frame URL
|
51 |
-
let latestFrameURL;
|
52 |
-
|
53 |
-
// Add click handler to capture button
|
54 |
-
captureButton.addEventListener("click", function() {
|
55 |
-
|
56 |
-
// Remove previously displayed captured frame (if any)
|
57 |
-
while (capturedFrame.firstChild) {
|
58 |
-
capturedFrame.firstChild.remove();
|
59 |
-
}
|
60 |
-
|
61 |
-
// Clear processed image display
|
62 |
-
while (processedFrame.firstChild) {
|
63 |
-
processedFrame.firstChild.remove();
|
64 |
-
}
|
65 |
-
|
66 |
-
// Create canvas element
|
67 |
-
const canvas = document.createElement("canvas");
|
68 |
-
const context = canvas.getContext("2d");
|
69 |
-
|
70 |
-
// Set canvas dimensions to match video
|
71 |
-
canvas.width = video.videoWidth;
|
72 |
-
canvas.height = video.videoHeight;
|
73 |
-
|
74 |
-
// Draw current video frame to canvas
|
75 |
-
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
76 |
-
|
77 |
-
// Convert canvas to data URL
|
78 |
-
const dataURL = canvas.toDataURL("image/png");
|
79 |
-
|
80 |
-
// Save data URL to reuse when appending to form
|
81 |
-
latestFrameURL = dataURL;
|
82 |
-
|
83 |
-
// Create img element for captured frame
|
84 |
-
const capturedImage = document.createElement("img");
|
85 |
-
capturedImage.src = latestFrameURL;
|
86 |
-
|
87 |
-
// Append to captured frame div
|
88 |
-
capturedFrame.appendChild(capturedImage);
|
89 |
-
if (canvas) {
|
90 |
-
|
91 |
-
// Convert canvas to blob
|
92 |
-
canvas.toBlob(function(blob) {
|
93 |
-
|
94 |
-
// Create file from blob
|
95 |
-
const file = new File([blob], 'capturedImage.jpg', {type: 'image/jpeg'})
|
96 |
-
|
97 |
-
// Create FormData
|
98 |
-
const formData = new FormData();
|
99 |
-
|
100 |
-
// Append file
|
101 |
-
formData.append('image', file);
|
102 |
-
|
103 |
-
// Headers with token
|
104 |
-
const headers = new Headers();
|
105 |
-
headers.append('X-CSRFToken', csrftoken);
|
106 |
-
|
107 |
-
// Send FormData
|
108 |
-
fetch('/process_uploaded_image/', {
|
109 |
-
method: 'POST',
|
110 |
-
headers: headers,
|
111 |
-
body: formData
|
112 |
-
})
|
113 |
-
.then(response => response.blob())
|
114 |
-
.then(blob => {
|
115 |
-
|
116 |
-
// Create image from blob
|
117 |
-
const img = document.createElement('img');
|
118 |
-
img.src = URL.createObjectURL(blob);
|
119 |
-
|
120 |
-
// Replace original image with processed one
|
121 |
-
while (capturedFrame.firstChild) {
|
122 |
-
capturedFrame.firstChild.remove();
|
123 |
-
}
|
124 |
-
document.getElementById('capturedFrame').appendChild(img);
|
125 |
-
|
126 |
-
// Display processed image
|
127 |
-
// Append to DOM
|
128 |
-
// document.getElementById('processedFrame').appendChild(img);
|
129 |
-
|
130 |
-
})
|
131 |
-
.catch(error => {
|
132 |
-
console.error('Error processing image');
|
133 |
-
});
|
134 |
-
|
135 |
-
}, 'image/jpeg');
|
136 |
-
|
137 |
-
} else {
|
138 |
-
console.error("Canvas not found");
|
139 |
-
}
|
140 |
-
|
141 |
-
});
|
142 |
-
|
143 |
-
// Add event listener to upload button
|
144 |
-
uploadButton.addEventListener("click", function () {
|
145 |
-
const fileInput = document.createElement("input");
|
146 |
-
fileInput.type = "file";
|
147 |
-
|
148 |
-
fileInput.addEventListener("change", function () {
|
149 |
-
const fileReader = new FileReader();
|
150 |
-
|
151 |
-
fileReader.addEventListener("load", function () {
|
152 |
-
const uploadedImageURL = fileReader.result;
|
153 |
-
|
154 |
-
// Remove previously displayed captured frame (if any)
|
155 |
-
while (capturedFrame.firstChild) {
|
156 |
-
capturedFrame.firstChild.remove();
|
157 |
-
}
|
158 |
-
// Clear processed image display
|
159 |
-
while (processedFrame.firstChild) {
|
160 |
-
processedFrame.firstChild.remove();
|
161 |
-
}
|
162 |
-
|
163 |
-
// Create an image element for displaying uploaded image
|
164 |
-
const uploadedImage = document.createElement("img");
|
165 |
-
uploadedImage.src = uploadedImageURL;
|
166 |
-
const imageFile = fileInput.files[0];
|
167 |
-
let formData = new FormData();
|
168 |
-
formData.append('image', imageFile);
|
169 |
-
|
170 |
-
fetch('/process_uploaded_image/', {
|
171 |
-
method: 'POST',
|
172 |
-
body: formData
|
173 |
-
})
|
174 |
-
.then(response => response.blob())
|
175 |
-
.then(blob => {
|
176 |
-
|
177 |
-
// Create image from blob
|
178 |
-
const img = document.createElement('img');
|
179 |
-
img.src = URL.createObjectURL(blob);
|
180 |
-
|
181 |
-
// Replace original image with processed one
|
182 |
-
while (capturedFrame.firstChild) {
|
183 |
-
capturedFrame.firstChild.remove();
|
184 |
-
}
|
185 |
-
document.getElementById('capturedFrame').appendChild(img);
|
186 |
-
|
187 |
-
// Display processed image
|
188 |
-
// Append to DOM
|
189 |
-
// document.getElementById('processedFrame').appendChild(img);
|
190 |
-
|
191 |
-
})
|
192 |
-
.catch(error => {
|
193 |
-
console.error('Error processing image');
|
194 |
-
});
|
195 |
-
|
196 |
-
|
197 |
-
// Append uploaded image to captured frame div
|
198 |
-
capturedFrame.appendChild(uploadedImage);
|
199 |
-
|
200 |
-
});
|
201 |
-
|
202 |
-
if (fileInput.files.length > 0) {
|
203 |
-
fileReader.readAsDataURL(fileInput.files[0]);
|
204 |
-
}
|
205 |
-
});
|
206 |
-
|
207 |
-
fileInput.click();
|
208 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/help_system.js
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
export const helpCfg = {
|
2 |
-
"themeSet": false,
|
3 |
-
"title": "ws帮助",
|
4 |
-
"subTitle": "Yunzai-Bot & ws-plugin",
|
5 |
-
"colWidth": 265,
|
6 |
-
"theme": "all",
|
7 |
-
"themeExclude": [
|
8 |
-
"default"
|
9 |
-
],
|
10 |
-
"colCount": 3,
|
11 |
-
"bgBlur": true
|
12 |
-
}
|
13 |
-
export const helpList = [
|
14 |
-
{
|
15 |
-
"group": "连接管理",
|
16 |
-
"list": [
|
17 |
-
{
|
18 |
-
"icon": 80,
|
19 |
-
"title": "#ws添加连接",
|
20 |
-
"desc": "添加一个新的连接"
|
21 |
-
},
|
22 |
-
{
|
23 |
-
"icon": 63,
|
24 |
-
"title": "#ws删除连接",
|
25 |
-
"desc": "删除一个已有的连接 "
|
26 |
-
},
|
27 |
-
{
|
28 |
-
"icon": 66,
|
29 |
-
"title": "#ws关闭连接",
|
30 |
-
"desc": "不会删除已有连接,同时不进行连接"
|
31 |
-
},
|
32 |
-
{
|
33 |
-
"icon": 65,
|
34 |
-
"title": "#ws打开连接",
|
35 |
-
"desc": "打开已关闭的连接"
|
36 |
-
},
|
37 |
-
{
|
38 |
-
"icon": 79,
|
39 |
-
"title": "#ws查看连接",
|
40 |
-
"desc": "查看已有的所有连接名字和状态"
|
41 |
-
},
|
42 |
-
{
|
43 |
-
"icon": 64,
|
44 |
-
"title": "#ws重新连接",
|
45 |
-
"desc": "断开连接并重新连接"
|
46 |
-
}
|
47 |
-
]
|
48 |
-
},
|
49 |
-
{
|
50 |
-
"group": "其他设置",
|
51 |
-
"list": [
|
52 |
-
{
|
53 |
-
"icon": 81,
|
54 |
-
"title": "#ws(增加/删除)(禁用/启用)群123456",
|
55 |
-
"desc": "精确处理黑名单白名单,不带群号为当前群"
|
56 |
-
},
|
57 |
-
{
|
58 |
-
"icon": 84,
|
59 |
-
"title": "#ws(禁用/启用)群123456",
|
60 |
-
"desc": "模糊匹配,比如禁用群则优先看白名单,如果有就删除,否则添加到黑名单"
|
61 |
-
},
|
62 |
-
{
|
63 |
-
"icon": 85,
|
64 |
-
"title": "#ws查看(禁用/启用)群",
|
65 |
-
"desc": "查看当前(禁用/启用)的群聊列表"
|
66 |
-
},
|
67 |
-
]
|
68 |
-
},
|
69 |
-
{
|
70 |
-
"group": "其他说明",
|
71 |
-
"list": [
|
72 |
-
{
|
73 |
-
"icon": 71,
|
74 |
-
"title": "#ws连接说明",
|
75 |
-
"desc": "查看添加连接时的说明"
|
76 |
-
},
|
77 |
-
{
|
78 |
-
"icon": 94,
|
79 |
-
"title": "#ws设置",
|
80 |
-
"desc": "插件设置"
|
81 |
-
}
|
82 |
-
]
|
83 |
-
}
|
84 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/dont_touch/__init__.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
from pathlib import Path
|
3 |
-
from typing import List, Tuple
|
4 |
-
|
5 |
-
from PIL.Image import Image as IMG
|
6 |
-
from PIL.Image import Palette
|
7 |
-
from pil_utils import BuildImage
|
8 |
-
|
9 |
-
from meme_generator import add_meme
|
10 |
-
from meme_generator.utils import make_jpg_or_gif
|
11 |
-
|
12 |
-
img_dir = Path(__file__).parent / "images"
|
13 |
-
|
14 |
-
|
15 |
-
def get_dominant_colors(img: IMG) -> List[Tuple[int, int, int]]:
|
16 |
-
img = img.convert("P", palette=Palette.ADAPTIVE, colors=20)
|
17 |
-
palette = img.getpalette()
|
18 |
-
assert palette
|
19 |
-
color_indexs = sorted(img.getcolors(), reverse=True)
|
20 |
-
colors = [tuple(palette[i * 3 : i * 3 + 3]) for _, i in color_indexs]
|
21 |
-
colors = list(
|
22 |
-
filter(lambda c: c[0] * 0.299 + c[1] * 0.578 + c[2] * 0.114 < 200, colors)
|
23 |
-
)
|
24 |
-
return colors
|
25 |
-
|
26 |
-
|
27 |
-
def dont_touch(images: List[BuildImage], texts, args):
|
28 |
-
frame = BuildImage.open(img_dir / "0.png")
|
29 |
-
mask = BuildImage.open(img_dir / "mask.png").convert("L")
|
30 |
-
|
31 |
-
def paste_random_blocks(img: BuildImage, colors: List[Tuple[int, int, int]]):
|
32 |
-
x1, y1, x2, y2 = 200, 300, 400, 650
|
33 |
-
block_locs = []
|
34 |
-
for _ in range(150):
|
35 |
-
x = random.randint(x1, x2)
|
36 |
-
y = random.randint(y1, y2)
|
37 |
-
if mask.image.getpixel((x, y)) == 0:
|
38 |
-
continue
|
39 |
-
if any(abs(x - x_) < 13 and abs(y - y_) < 13 for x_, y_ in block_locs):
|
40 |
-
continue
|
41 |
-
block_locs.append((x, y))
|
42 |
-
color = random.choice(colors)
|
43 |
-
block = BuildImage.new("RGBA", (10, 10), color)
|
44 |
-
block = block.rotate(45, expand=True)
|
45 |
-
img.paste(block, (x, y), alpha=True)
|
46 |
-
|
47 |
-
def make(img: BuildImage) -> BuildImage:
|
48 |
-
img_frame = frame.copy()
|
49 |
-
colors = get_dominant_colors(img.image)
|
50 |
-
paste_random_blocks(img_frame, colors)
|
51 |
-
img = img.convert("RGBA").resize((250, 250), keep_ratio=True, inside=True)
|
52 |
-
return img_frame.paste(img, (25, 460), alpha=True)
|
53 |
-
|
54 |
-
return make_jpg_or_gif(images[0], make)
|
55 |
-
|
56 |
-
|
57 |
-
add_meme("dont_touch", dont_touch, min_images=1, max_images=1, keywords=["别碰"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/v5/theme.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
"""Tools for enabling and registering chart themes"""
|
2 |
-
|
3 |
-
from ...utils.theme import ThemeRegistry
|
4 |
-
|
5 |
-
VEGA_THEMES = [
|
6 |
-
"ggplot2",
|
7 |
-
"quartz",
|
8 |
-
"vox",
|
9 |
-
"fivethirtyeight",
|
10 |
-
"dark",
|
11 |
-
"latimes",
|
12 |
-
"urbaninstitute",
|
13 |
-
"excel",
|
14 |
-
"googlecharts",
|
15 |
-
"powerbi",
|
16 |
-
]
|
17 |
-
|
18 |
-
|
19 |
-
class VegaTheme:
|
20 |
-
"""Implementation of a builtin vega theme."""
|
21 |
-
|
22 |
-
def __init__(self, theme):
|
23 |
-
self.theme = theme
|
24 |
-
|
25 |
-
def __call__(self):
|
26 |
-
return {
|
27 |
-
"usermeta": {"embedOptions": {"theme": self.theme}},
|
28 |
-
"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}},
|
29 |
-
}
|
30 |
-
|
31 |
-
def __repr__(self):
|
32 |
-
return "VegaTheme({!r})".format(self.theme)
|
33 |
-
|
34 |
-
|
35 |
-
# The entry point group that can be used by other packages to declare other
|
36 |
-
# renderers that will be auto-detected. Explicit registration is also
|
37 |
-
# allowed by the PluginRegistery API.
|
38 |
-
ENTRY_POINT_GROUP = "altair.vegalite.v5.theme" # type: str
|
39 |
-
themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
|
40 |
-
|
41 |
-
themes.register(
|
42 |
-
"default",
|
43 |
-
lambda: {"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}},
|
44 |
-
)
|
45 |
-
themes.register(
|
46 |
-
"opaque",
|
47 |
-
lambda: {
|
48 |
-
"config": {
|
49 |
-
"background": "white",
|
50 |
-
"view": {"continuousWidth": 300, "continuousHeight": 300},
|
51 |
-
}
|
52 |
-
},
|
53 |
-
)
|
54 |
-
themes.register("none", lambda: {})
|
55 |
-
|
56 |
-
for theme in VEGA_THEMES:
|
57 |
-
themes.register(theme, VegaTheme(theme))
|
58 |
-
|
59 |
-
themes.enable("default")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/shapes.py
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
|
3 |
-
|
4 |
-
def _prefer_non_zero(*args):
|
5 |
-
for arg in args:
|
6 |
-
if arg != 0:
|
7 |
-
return arg
|
8 |
-
return 0.0
|
9 |
-
|
10 |
-
|
11 |
-
def _ntos(n):
|
12 |
-
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
|
13 |
-
return ("%.3f" % n).rstrip("0").rstrip(".")
|
14 |
-
|
15 |
-
|
16 |
-
def _strip_xml_ns(tag):
|
17 |
-
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
|
18 |
-
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
|
19 |
-
return tag.split("}", 1)[1] if "}" in tag else tag
|
20 |
-
|
21 |
-
|
22 |
-
def _transform(raw_value):
|
23 |
-
# TODO assumes a 'matrix' transform.
|
24 |
-
# No other transform functions are supported at the moment.
|
25 |
-
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
|
26 |
-
# start simple: if you aren't exactly matrix(...) then no love
|
27 |
-
match = re.match(r"matrix\((.*)\)", raw_value)
|
28 |
-
if not match:
|
29 |
-
raise NotImplementedError
|
30 |
-
matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1)))
|
31 |
-
if len(matrix) != 6:
|
32 |
-
raise ValueError("wrong # of terms in %s" % raw_value)
|
33 |
-
return matrix
|
34 |
-
|
35 |
-
|
36 |
-
class PathBuilder(object):
|
37 |
-
def __init__(self):
|
38 |
-
self.paths = []
|
39 |
-
self.transforms = []
|
40 |
-
|
41 |
-
def _start_path(self, initial_path=""):
|
42 |
-
self.paths.append(initial_path)
|
43 |
-
self.transforms.append(None)
|
44 |
-
|
45 |
-
def _end_path(self):
|
46 |
-
self._add("z")
|
47 |
-
|
48 |
-
def _add(self, path_snippet):
|
49 |
-
path = self.paths[-1]
|
50 |
-
if path:
|
51 |
-
path += " " + path_snippet
|
52 |
-
else:
|
53 |
-
path = path_snippet
|
54 |
-
self.paths[-1] = path
|
55 |
-
|
56 |
-
def _move(self, c, x, y):
|
57 |
-
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
|
58 |
-
|
59 |
-
def M(self, x, y):
|
60 |
-
self._move("M", x, y)
|
61 |
-
|
62 |
-
def m(self, x, y):
|
63 |
-
self._move("m", x, y)
|
64 |
-
|
65 |
-
def _arc(self, c, rx, ry, x, y, large_arc):
|
66 |
-
self._add(
|
67 |
-
"%s%s,%s 0 %d 1 %s,%s"
|
68 |
-
% (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y))
|
69 |
-
)
|
70 |
-
|
71 |
-
def A(self, rx, ry, x, y, large_arc=0):
|
72 |
-
self._arc("A", rx, ry, x, y, large_arc)
|
73 |
-
|
74 |
-
def a(self, rx, ry, x, y, large_arc=0):
|
75 |
-
self._arc("a", rx, ry, x, y, large_arc)
|
76 |
-
|
77 |
-
def _vhline(self, c, x):
|
78 |
-
self._add("%s%s" % (c, _ntos(x)))
|
79 |
-
|
80 |
-
def H(self, x):
|
81 |
-
self._vhline("H", x)
|
82 |
-
|
83 |
-
def h(self, x):
|
84 |
-
self._vhline("h", x)
|
85 |
-
|
86 |
-
def V(self, y):
|
87 |
-
self._vhline("V", y)
|
88 |
-
|
89 |
-
def v(self, y):
|
90 |
-
self._vhline("v", y)
|
91 |
-
|
92 |
-
def _line(self, c, x, y):
|
93 |
-
self._add("%s%s,%s" % (c, _ntos(x), _ntos(y)))
|
94 |
-
|
95 |
-
def L(self, x, y):
|
96 |
-
self._line("L", x, y)
|
97 |
-
|
98 |
-
def l(self, x, y):
|
99 |
-
self._line("l", x, y)
|
100 |
-
|
101 |
-
def _parse_line(self, line):
|
102 |
-
x1 = float(line.attrib.get("x1", 0))
|
103 |
-
y1 = float(line.attrib.get("y1", 0))
|
104 |
-
x2 = float(line.attrib.get("x2", 0))
|
105 |
-
y2 = float(line.attrib.get("y2", 0))
|
106 |
-
|
107 |
-
self._start_path()
|
108 |
-
self.M(x1, y1)
|
109 |
-
self.L(x2, y2)
|
110 |
-
|
111 |
-
def _parse_rect(self, rect):
|
112 |
-
x = float(rect.attrib.get("x", 0))
|
113 |
-
y = float(rect.attrib.get("y", 0))
|
114 |
-
w = float(rect.attrib.get("width"))
|
115 |
-
h = float(rect.attrib.get("height"))
|
116 |
-
rx = float(rect.attrib.get("rx", 0))
|
117 |
-
ry = float(rect.attrib.get("ry", 0))
|
118 |
-
|
119 |
-
rx = _prefer_non_zero(rx, ry)
|
120 |
-
ry = _prefer_non_zero(ry, rx)
|
121 |
-
# TODO there are more rules for adjusting rx, ry
|
122 |
-
|
123 |
-
self._start_path()
|
124 |
-
self.M(x + rx, y)
|
125 |
-
self.H(x + w - rx)
|
126 |
-
if rx > 0:
|
127 |
-
self.A(rx, ry, x + w, y + ry)
|
128 |
-
self.V(y + h - ry)
|
129 |
-
if rx > 0:
|
130 |
-
self.A(rx, ry, x + w - rx, y + h)
|
131 |
-
self.H(x + rx)
|
132 |
-
if rx > 0:
|
133 |
-
self.A(rx, ry, x, y + h - ry)
|
134 |
-
self.V(y + ry)
|
135 |
-
if rx > 0:
|
136 |
-
self.A(rx, ry, x + rx, y)
|
137 |
-
self._end_path()
|
138 |
-
|
139 |
-
def _parse_path(self, path):
|
140 |
-
if "d" in path.attrib:
|
141 |
-
self._start_path(initial_path=path.attrib["d"])
|
142 |
-
|
143 |
-
def _parse_polygon(self, poly):
|
144 |
-
if "points" in poly.attrib:
|
145 |
-
self._start_path("M" + poly.attrib["points"])
|
146 |
-
self._end_path()
|
147 |
-
|
148 |
-
def _parse_polyline(self, poly):
|
149 |
-
if "points" in poly.attrib:
|
150 |
-
self._start_path("M" + poly.attrib["points"])
|
151 |
-
|
152 |
-
def _parse_circle(self, circle):
|
153 |
-
cx = float(circle.attrib.get("cx", 0))
|
154 |
-
cy = float(circle.attrib.get("cy", 0))
|
155 |
-
r = float(circle.attrib.get("r"))
|
156 |
-
|
157 |
-
# arc doesn't seem to like being a complete shape, draw two halves
|
158 |
-
self._start_path()
|
159 |
-
self.M(cx - r, cy)
|
160 |
-
self.A(r, r, cx + r, cy, large_arc=1)
|
161 |
-
self.A(r, r, cx - r, cy, large_arc=1)
|
162 |
-
|
163 |
-
def _parse_ellipse(self, ellipse):
|
164 |
-
cx = float(ellipse.attrib.get("cx", 0))
|
165 |
-
cy = float(ellipse.attrib.get("cy", 0))
|
166 |
-
rx = float(ellipse.attrib.get("rx"))
|
167 |
-
ry = float(ellipse.attrib.get("ry"))
|
168 |
-
|
169 |
-
# arc doesn't seem to like being a complete shape, draw two halves
|
170 |
-
self._start_path()
|
171 |
-
self.M(cx - rx, cy)
|
172 |
-
self.A(rx, ry, cx + rx, cy, large_arc=1)
|
173 |
-
self.A(rx, ry, cx - rx, cy, large_arc=1)
|
174 |
-
|
175 |
-
def add_path_from_element(self, el):
|
176 |
-
tag = _strip_xml_ns(el.tag)
|
177 |
-
parse_fn = getattr(self, "_parse_%s" % tag.lower(), None)
|
178 |
-
if not callable(parse_fn):
|
179 |
-
return False
|
180 |
-
parse_fn(el)
|
181 |
-
if "transform" in el.attrib:
|
182 |
-
self.transforms[-1] = _transform(el.attrib["transform"])
|
183 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/__main__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from fontTools.varLib import main
|
3 |
-
|
4 |
-
|
5 |
-
if __name__ == "__main__":
|
6 |
-
sys.exit(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-6f7117a6.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as G,e as I,s as J,G as Z,N as O,K as F,p as U,M as z,n as q,A as j,V as be,B as K,P as D,O as S,U as $,Q as ge,R as x,k as p,m as L,o as v,u as Q,v as d,y as V,z as h,x as y,F as C,a7 as de,h as he,j as we,t as ke,a9 as Ae,ab as pe,ac as ve,ad as ye,ak as k,at as Fe,au as Be,E as ze,ae as Ue,q as je,r as Ee}from"./index-3370be2a.js";import{B as Ne}from"./Button-89624748.js";import{B as ae}from"./BlockLabel-56db415e.js";import{E as Oe}from"./Empty-585389a4.js";import{F as W}from"./File-ae385ffc.js";import{U as Se}from"./Upload-f29b2460.js";import{M as Te}from"./ModifyUpload-d8fc50ab.js";import{n as ee,b as Me}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import{U as Pe}from"./UploadText-28892309.js";import"./Blocks-f0129fcd.js";import"./IconButton-abe5ede9.js";const Ce=t=>{let e=["B","KB","MB","GB","PB"],n=0;for(;t>1024;)t/=1024,n++;let l=e[n];return t.toFixed(1)+" "+l},le=t=>{var e;return e=t.orig_name||t.name,e.length>30?`${e.substr(0,30)}...`:e},te=t=>{var e=0;if(Array.isArray(t))for(var n of t)n.size!==void 0&&(e+=n.size);else e=t.size||0;return Ce(e)};function ne(t,e,n){const l=t.slice();return l[4]=e[n],l[6]=n,l}function Re(t){let e;return{c(){e=D("Uploading...")},m(n,l){U(n,e,l)},p:q,d(n){n&&j(e)}}}function qe(t){let e,n,l,s;return{c(){e=O("a"),n=D("Download"),F(e,"href",l=t[4].data),F(e,"target","_blank"),F(e,"download",s=window.__is_colab__?null:t[4].orig_name||t[4].name),F(e,"class","svelte-xrr240")},m(a,i){U(a,e,i),z(e,n)},p(a,i){i&1&&l!==(l=a[4].data)&&F(e,"href",l),i&1&&s!==(s=window.__is_colab__?null:a[4].orig_name||a[4].name)&&F(e,"download",s)},d(a){a&&j(e)}}}function se(t){let e,n,l=le(t[4])+"",s,a,i,f=te(t[4])+"",r,g,o,m,_,b;function B(c,E){return c[4].data?qe:Re}let w=B(t),A=w(t);function T(){return t[3](t[4],t[6])}return{c(){e=O("tr"),n=O("td"),s=D(l),a=S(),i=O("td"),r=D(f),g=S(),o=O("td"),A.c(),m=S(),F(n,"class","svelte-xrr240"),F(i,"class","svelte-xrr240"),F(o,"class","download svelte-xrr240"),F(e,"class","file svelte-xrr240"),$(e,"selectable",t[1])},m(c,E){U(c,e,E),z(e,n),z(n,s),z(e,a),z(e,i),z(i,r),z(e,g),z(e,o),A.m(o,null),z(e,m),_||(b=ge(e,"click",T),_=!0)},p(c,E){t=c,E&1&&l!==(l=le(t[4])+"")&&x(s,l),E&1&&f!==(f=te(t[4])+"")&&x(r,f),w===(w=B(t))&&A?A.p(t,E):(A.d(1),A=w(t),A&&(A.c(),A.m(o,null))),E&2&&$(e,"selectable",t[1])},d(c){c&&j(e),A.d(),_=!1,b()}}}function De(t){let e,n,l,s=Z(Array.isArray(t[0])?t[0]:[t[0]]),a=[];for(let i=0;i<s.length;i+=1)a[i]=se(ne(t,s,i));return{c(){e=O("div"),n=O("table"),l=O("tbody");for(let i=0;i<a.length;i+=1)a[i].c();F(n,"class","file-preview svelte-xrr240"),F(e,"class","file-preview-holder svelte-xrr240")},m(i,f){U(i,e,f),z(e,n),z(n,l);for(let r=0;r<a.length;r+=1)a[r]&&a[r].m(l,null)},p(i,[f]){if(f&7){s=Z(Array.isArray(i[0])?i[0]:[i[0]]);let r;for(r=0;r<s.length;r+=1){const g=ne(i,s,r);a[r]?a[r].p(g,f):(a[r]=se(g),a[r].c(),a[r].m(l,null))}for(;r<a.length;r+=1)a[r].d(1);a.length=s.length}},i:q,o:q,d(i){i&&j(e),be(a,i)}}}function Ge(t,e,n){const l=K();let{value:s}=e,{selectable:a=!1}=e;const i=(f,r)=>l("select",{value:f.orig_name||f.name,index:r});return t.$$set=f=>{"value"in f&&n(0,s=f.value),"selectable"in f&&n(1,a=f.selectable)},[s,a,l,i]}class ie extends G{constructor(e){super(),I(this,e,Ge,De,J,{value:0,selectable:1})}}function Ie(t){let e,n;return e=new Oe({props:{unpadded_box:!0,size:"large",$$slots:{default:[Ke]},$$scope:{ctx:t}}}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&32&&(a.$$scope={dirty:s,ctx:l}),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function Je(t){let e,n;return e=new ie({props:{selectable:t[3],value:t[0]}}),e.$on("select",t[4]),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&8&&(a.selectable=l[3]),s&1&&(a.value=l[0]),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function Ke(t){let e,n;return e=new W({}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function Le(t){let e,n,l,s,a,i;e=new ae({props:{show_label:t[2],float:t[0]===null,Icon:W,label:t[1]||"File"}});const f=[Je,Ie],r=[];function g(o,m){return o[0]?0:1}return l=g(t),s=r[l]=f[l](t),{c(){p(e.$$.fragment),n=S(),s.c(),a=L()},m(o,m){v(e,o,m),U(o,n,m),r[l].m(o,m),U(o,a,m),i=!0},p(o,[m]){const _={};m&4&&(_.show_label=o[2]),m&1&&(_.float=o[0]===null),m&2&&(_.label=o[1]||"File"),e.$set(_);let b=l;l=g(o),l===b?r[l].p(o,m):(Q(),d(r[b],1,1,()=>{r[b]=null}),V(),s=r[l],s?s.p(o,m):(s=r[l]=f[l](o),s.c()),h(s,1),s.m(a.parentNode,a))},i(o){i||(h(e.$$.fragment,o),h(s),i=!0)},o(o){d(e.$$.fragment,o),d(s),i=!1},d(o){o&&(j(n),j(a)),y(e,o),r[l].d(o)}}}function Qe(t,e,n){let{value:l=null}=e,{label:s}=e,{show_label:a=!0}=e,{selectable:i=!1}=e;function f(r){C.call(this,t,r)}return t.$$set=r=>{"value"in r&&n(0,l=r.value),"label"in r&&n(1,s=r.label),"show_label"in r&&n(2,a=r.show_label),"selectable"in r&&n(3,i=r.selectable)},[l,s,a,i,f]}class Ve extends G{constructor(e){super(),I(this,e,Qe,Le,J,{value:0,label:1,show_label:2,selectable:3})}}function We(t){let e,n,l;function s(i){t[12](i)}let a={filetype:t[6],parse_to_data_url:!1,file_count:t[3],$$slots:{default:[Xe]},$$scope:{ctx:t}};return t[5]!==void 0&&(a.dragging=t[5]),e=new Se({props:a}),he.push(()=>we(e,"dragging",s)),e.$on("load",t[7]),{c(){p(e.$$.fragment)},m(i,f){v(e,i,f),l=!0},p(i,f){const r={};f&64&&(r.filetype=i[6]),f&8&&(r.file_count=i[3]),f&8192&&(r.$$scope={dirty:f,ctx:i}),!n&&f&32&&(n=!0,r.dragging=i[5],ke(()=>n=!1)),e.$set(r)},i(i){l||(h(e.$$.fragment,i),l=!0)},o(i){d(e.$$.fragment,i),l=!1},d(i){y(e,i)}}}function He(t){let e,n,l,s;return e=new Te({props:{absolute:!0}}),e.$on("clear",t[8]),l=new ie({props:{selectable:t[4],value:t[0]}}),l.$on("select",t[11]),{c(){p(e.$$.fragment),n=S(),p(l.$$.fragment)},m(a,i){v(e,a,i),U(a,n,i),v(l,a,i),s=!0},p(a,i){const f={};i&16&&(f.selectable=a[4]),i&1&&(f.value=a[0]),l.$set(f)},i(a){s||(h(e.$$.fragment,a),h(l.$$.fragment,a),s=!0)},o(a){d(e.$$.fragment,a),d(l.$$.fragment,a),s=!1},d(a){a&&j(n),y(e,a),y(l,a)}}}function Xe(t){let e;const n=t[10].default,l=Ae(n,t,t[13],null);return{c(){l&&l.c()},m(s,a){l&&l.m(s,a),e=!0},p(s,a){l&&l.p&&(!e||a&8192)&&pe(l,n,s,s[13],e?ye(n,s[13],a,null):ve(s[13]),null)},i(s){e||(h(l,s),e=!0)},o(s){d(l,s),e=!1},d(s){l&&l.d(s)}}}function Ye(t){let e,n,l,s,a,i;e=new ae({props:{show_label:t[2],Icon:W,float:t[0]===null,label:t[1]||"File"}});const f=[He,We],r=[];function g(o,m){return o[0]?0:1}return l=g(t),s=r[l]=f[l](t),{c(){p(e.$$.fragment),n=S(),s.c(),a=L()},m(o,m){v(e,o,m),U(o,n,m),r[l].m(o,m),U(o,a,m),i=!0},p(o,[m]){const _={};m&4&&(_.show_label=o[2]),m&1&&(_.float=o[0]===null),m&2&&(_.label=o[1]||"File"),e.$set(_);let b=l;l=g(o),l===b?r[l].p(o,m):(Q(),d(r[b],1,1,()=>{r[b]=null}),V(),s=r[l],s?s.p(o,m):(s=r[l]=f[l](o),s.c()),h(s,1),s.m(a.parentNode,a))},i(o){i||(h(e.$$.fragment,o),h(s),i=!0)},o(o){d(e.$$.fragment,o),d(s),i=!1},d(o){o&&(j(n),j(a)),y(e,o),r[l].d(o)}}}function Ze(t,e,n){let{$$slots:l={},$$scope:s}=e,{value:a}=e,{label:i}=e,{show_label:f=!0}=e,{file_count:r="single"}=e,{file_types:g=null}=e,{selectable:o=!1}=e;async function m({detail:c}){n(0,a=c),await de(),b("change",a),b("upload",c)}function _({detail:c}){n(0,a=null),b("change",a),b("clear")}const b=K();let B;g==null?B=null:(g=g.map(c=>c.startsWith(".")?c:c+"/*"),B=g.join(", "));let w=!1;function A(c){C.call(this,t,c)}function T(c){w=c,n(5,w)}return t.$$set=c=>{"value"in c&&n(0,a=c.value),"label"in c&&n(1,i=c.label),"show_label"in c&&n(2,f=c.show_label),"file_count"in c&&n(3,r=c.file_count),"file_types"in c&&n(9,g=c.file_types),"selectable"in c&&n(4,o=c.selectable),"$$scope"in c&&n(13,s=c.$$scope)},t.$$.update=()=>{t.$$.dirty&32&&b("drag",w)},[a,i,f,r,o,w,B,m,_,g,l,A,T,s]}class $e extends G{constructor(e){super(),I(this,e,Ze,Ye,J,{value:0,label:1,show_label:2,file_count:3,file_types:9,selectable:4})}}function xe(t){let e,n;return e=new Ve({props:{selectable:t[9],value:t[14],label:t[5],show_label:t[6]}}),e.$on("select",t[24]),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&512&&(a.selectable=l[9]),s&16384&&(a.value=l[14]),s&32&&(a.label=l[5]),s&64&&(a.show_label=l[6]),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function el(t){let e,n;return e=new $e({props:{label:t[5],show_label:t[6],value:t[14],file_count:t[7],file_types:t[8],selectable:t[9],$$slots:{default:[ll]},$$scope:{ctx:t}}}),e.$on("change",t[20]),e.$on("drag",t[21]),e.$on("clear",t[22]),e.$on("select",t[23]),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&32&&(a.label=l[5]),s&64&&(a.show_label=l[6]),s&16384&&(a.value=l[14]),s&128&&(a.file_count=l[7]),s&256&&(a.file_types=l[8]),s&512&&(a.selectable=l[9]),s&134217728&&(a.$$scope={dirty:s,ctx:l}),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function ll(t){let e,n;return e=new Pe({props:{type:"file"}}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p:q,i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function tl(t){let e,n,l,s,a,i;const f=[t[10],{status:t[16]?"generating":t[10]?.status||"complete"}];let r={};for(let _=0;_<f.length;_+=1)r=ze(r,f[_]);e=new Ue({props:r});const g=[el,xe],o=[];function m(_,b){return _[4]==="dynamic"?0:1}return l=m(t),s=o[l]=g[l](t),{c(){p(e.$$.fragment),n=S(),s.c(),a=L()},m(_,b){v(e,_,b),U(_,n,b),o[l].m(_,b),U(_,a,b),i=!0},p(_,b){const B=b&66560?je(f,[b&1024&&Ee(_[10]),{status:_[16]?"generating":_[10]?.status||"complete"}]):{};e.$set(B);let w=l;l=m(_),l===w?o[l].p(_,b):(Q(),d(o[w],1,1,()=>{o[w]=null}),V(),s=o[l],s?s.p(_,b):(s=o[l]=g[l](_),s.c()),h(s,1),s.m(a.parentNode,a))},i(_){i||(h(e.$$.fragment,_),h(s),i=!0)},o(_){d(e.$$.fragment,_),d(s),i=!1},d(_){_&&(j(n),j(a)),y(e,_),o[l].d(_)}}}function nl(t){let e,n;return e=new Ne({props:{visible:t[3],variant:t[4]==="dynamic"&&t[0]===null?"dashed":"solid",border_mode:t[15]?"focus":"base",padding:!1,elem_id:t[1],elem_classes:t[2],container:t[11],scale:t[12],min_width:t[13],$$slots:{default:[tl]},$$scope:{ctx:t}}}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,[s]){const a={};s&8&&(a.visible=l[3]),s&17&&(a.variant=l[4]==="dynamic"&&l[0]===null?"dashed":"solid"),s&32768&&(a.border_mode=l[15]?"focus":"base"),s&2&&(a.elem_id=l[1]),s&4&&(a.elem_classes=l[2]),s&2048&&(a.container=l[11]),s&4096&&(a.scale=l[12]),s&8192&&(a.min_width=l[13]),s&134334449&&(a.$$scope={dirty:s,ctx:l}),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function sl(t,e,n){let l,{elem_id:s=""}=e,{elem_classes:a=[]}=e,{visible:i=!0}=e,{value:f}=e,r,{mode:g}=e,{root:o}=e,{label:m}=e,{show_label:_}=e,{file_count:b}=e,{file_types:B=["file"]}=e,{root_url:w}=e,{selectable:A=!1}=e,{loading_status:T}=e,{container:c=!0}=e,{scale:E=null}=e,{min_width:H=void 0}=e;const re=Fe("upload_files")??Be;let X=!1,M=!1;const R=K(),oe=({detail:u})=>n(0,f=u),fe=({detail:u})=>n(15,X=u);function ue(u){C.call(this,t,u)}function _e(u){C.call(this,t,u)}function ce(u){C.call(this,t,u)}return t.$$set=u=>{"elem_id"in u&&n(1,s=u.elem_id),"elem_classes"in u&&n(2,a=u.elem_classes),"visible"in u&&n(3,i=u.visible),"value"in u&&n(0,f=u.value),"mode"in u&&n(4,g=u.mode),"root"in u&&n(17,o=u.root),"label"in u&&n(5,m=u.label),"show_label"in u&&n(6,_=u.show_label),"file_count"in u&&n(7,b=u.file_count),"file_types"in u&&n(8,B=u.file_types),"root_url"in u&&n(18,w=u.root_url),"selectable"in u&&n(9,A=u.selectable),"loading_status"in u&&n(10,T=u.loading_status),"container"in u&&n(11,c=u.container),"scale"in u&&n(12,E=u.scale),"min_width"in u&&n(13,H=u.min_width)},t.$$.update=()=>{if(t.$$.dirty&393217&&n(14,l=ee(f,o,w)),t.$$.dirty&933905&&JSON.stringify(l)!==JSON.stringify(r)){if(n(19,r=l),l===null)R("change"),n(16,M=!1);else if(!(Array.isArray(l)?l:[l]).every(u=>u.blob))n(16,M=!1),R("change");else if(g==="dynamic"){let u=(Array.isArray(l)?l:[l]).map(P=>P.blob),me=l;n(16,M=!0),re(o,u).then(P=>{me===l&&(n(16,M=!1),P.error?(Array.isArray(l)?l:[l]).forEach(async(N,Y)=>{N.data=await Me(N.blob),N.blob=void 0}):((Array.isArray(l)?l:[l]).forEach((N,Y)=>{P.files&&(N.orig_name=N.name,N.name=P.files[Y],N.is_file=!0,N.blob=void 0)}),n(19,r=n(14,l=ee(f,o,w)))),R("change"),R("upload"))})}}},[f,s,a,i,g,m,_,b,B,A,T,c,E,H,l,X,M,o,w,r,oe,fe,ue,_e,ce]}class al extends G{constructor(e){super(),I(this,e,sl,nl,J,{elem_id:1,elem_classes:2,visible:3,value:0,mode:4,root:17,label:5,show_label:6,file_count:7,file_types:8,root_url:18,selectable:9,loading_status:10,container:11,scale:12,min_width:13})}get elem_id(){return this.$$.ctx[1]}set elem_id(e){this.$$set({elem_id:e}),k()}get elem_classes(){return this.$$.ctx[2]}set elem_classes(e){this.$$set({elem_classes:e}),k()}get visible(){return this.$$.ctx[3]}set visible(e){this.$$set({visible:e}),k()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),k()}get mode(){return this.$$.ctx[4]}set mode(e){this.$$set({mode:e}),k()}get root(){return this.$$.ctx[17]}set root(e){this.$$set({root:e}),k()}get label(){return this.$$.ctx[5]}set label(e){this.$$set({label:e}),k()}get show_label(){return this.$$.ctx[6]}set show_label(e){this.$$set({show_label:e}),k()}get file_count(){return this.$$.ctx[7]}set file_count(e){this.$$set({file_count:e}),k()}get file_types(){return this.$$.ctx[8]}set file_types(e){this.$$set({file_types:e}),k()}get root_url(){return this.$$.ctx[18]}set root_url(e){this.$$set({root_url:e}),k()}get selectable(){return this.$$.ctx[9]}set selectable(e){this.$$set({selectable:e}),k()}get loading_status(){return this.$$.ctx[10]}set loading_status(e){this.$$set({loading_status:e}),k()}get container(){return this.$$.ctx[11]}set container(e){this.$$set({container:e}),k()}get scale(){return this.$$.ctx[12]}set scale(e){this.$$set({scale:e}),k()}get min_width(){return this.$$.ctx[13]}set min_width(e){this.$$set({min_width:e}),k()}}const hl=al,wl=["static","dynamic"],kl=t=>({type:{input_payload:"{ name: string; data: string }",response_object:"{ orig_name: string; name: string, size: number, data: string, is_file: boolean}"},description:{input_payload:"object with file name and base64 data",response_object:"object that includes path to file. The URL: {ROOT}file={name} contains the data"},example_data:{name:"zip.zip",data:"data:@file/octet-stream;base64,UEsFBgAAAAAAAAAAAAAAAAAAAAAAAA=="}});export{hl as Component,kl as document,wl as modes};
|
2 |
-
//# sourceMappingURL=index-6f7117a6.js.map
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-be790e2e.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
.rangeSlider{--pip:var(--range-pip, lightslategray);--pip-text:var(--range-pip-text, var(--pip));--pip-active:var(--range-pip-active, darkslategrey);--pip-active-text:var(--range-pip-active-text, var(--pip-active));--pip-hover:var(--range-pip-hover, darkslategrey);--pip-hover-text:var(--range-pip-hover-text, var(--pip-hover));--pip-in-range:var(--range-pip-in-range, var(--pip-active));--pip-in-range-text:var(--range-pip-in-range-text, var(--pip-active-text))}.rangePips{position:absolute;height:1em;left:0;right:0;bottom:-1em}.rangePips.vertical{height:auto;width:1em;inset:0 auto 0 100%}.rangePips .pip{height:.4em;position:absolute;top:.25em;width:1px;white-space:nowrap}.rangePips.vertical .pip{height:1px;width:.4em;left:.25em;top:auto;bottom:auto}.rangePips .pipVal{position:absolute;top:.4em;transform:translate(-50%,25%)}.rangePips.vertical .pipVal{position:absolute;top:0;left:.4em;transform:translate(25%,-50%)}.rangePips .pip{transition:all .15s ease}.rangePips .pipVal{transition:all .15s ease,font-weight 0s linear}.rangePips .pip{color:#789;color:var(--pip-text);background-color:#789;background-color:var(--pip)}.rangePips .pip.selected{color:#2f4f4f;color:var(--pip-active-text);background-color:#2f4f4f;background-color:var(--pip-active)}.rangePips.hoverable:not(.disabled) .pip:hover{color:#2f4f4f;color:var(--pip-hover-text);background-color:#2f4f4f;background-color:var(--pip-hover)}.rangePips .pip.in-range{color:#2f4f4f;color:var(--pip-in-range-text);background-color:#2f4f4f;background-color:var(--pip-in-range)}.rangePips .pip.selected{height:.75em}.rangePips.vertical .pip.selected{height:1px;width:.75em}.rangePips .pip.selected .pipVal{font-weight:700;top:.75em}.rangePips.vertical .pip.selected .pipVal{top:0;left:.75em}.rangePips.hoverable:not(.disabled) .pip:not(.selected):hover{transition:none}.rangePips.hoverable:not(.disabled) .pip:not(.selected):hover .pipVal{transition:none;font-weight:700}.rangeSlider{--slider:var(--range-slider, #d7dada);--handle-inactive:var(--range-handle-inactive, #99a2a2);--handle:var(--range-handle, #838de7);--handle-focus:var(--range-handle-focus, #4a40d4);--handle-border:var(--range-handle-border, var(--handle));--range-inactive:var(--range-range-inactive, var(--handle-inactive));--range:var(--range-range, var(--handle-focus));--float-inactive:var(--range-float-inactive, var(--handle-inactive));--float:var(--range-float, var(--handle-focus));--float-text:var(--range-float-text, white)}.rangeSlider{position:relative;border-radius:100px;height:.5em;margin:1em;transition:opacity .2s ease;user-select:none}.rangeSlider *{user-select:none}.rangeSlider.pips{margin-bottom:1.8em}.rangeSlider.pip-labels{margin-bottom:2.8em}.rangeSlider.vertical{display:inline-block;border-radius:100px;width:.5em;min-height:200px}.rangeSlider.vertical.pips{margin-right:1.8em;margin-bottom:1em}.rangeSlider.vertical.pip-labels{margin-right:2.8em;margin-bottom:1em}.rangeSlider .rangeHandle{position:absolute;display:block;height:1.4em;width:1.4em;top:.25em;bottom:auto;transform:translateY(-50%) translate(-50%);z-index:2}.rangeSlider.reversed .rangeHandle{transform:translateY(-50%) translate(50%)}.rangeSlider.vertical .rangeHandle{left:.25em;top:auto;transform:translateY(50%) translate(-50%)}.rangeSlider.vertical.reversed .rangeHandle{transform:translateY(-50%) translate(-50%)}.rangeSlider .rangeNub,.rangeSlider .rangeHandle:before{position:absolute;left:0;top:0;display:block;border-radius:10em;height:100%;width:100%;transition:box-shadow .2s ease}.rangeSlider .rangeHandle:before{content:"";inset:1px;height:auto;width:auto;box-shadow:0 0 0 0 var(--handle-border);opacity:0}.rangeSlider.hoverable:not(.disabled) .rangeHandle:hover:before{box-shadow:0 0 0 8px var(--handle-border);opacity:.2}.rangeSlider.hoverable:not(.disabled) .rangeHandle.press:before,.rangeSlider.hoverable:not(.disabled) .rangeHandle.press:hover:before{box-shadow:0 0 0 12px var(--handle-border);opacity:.4}.rangeSlider.range:not(.min):not(.max) .rangeNub{border-radius:10em 10em 10em 1.6em}.rangeSlider.range .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(-135deg)}.rangeSlider.range .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(45deg)}.rangeSlider.range.reversed .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(45deg)}.rangeSlider.range.reversed .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(-135deg)}.rangeSlider.range.vertical .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(135deg)}.rangeSlider.range.vertical .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(-45deg)}.rangeSlider.range.vertical.reversed .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(-45deg)}.rangeSlider.range.vertical.reversed .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(135deg)}.rangeSlider .rangeFloat{display:block;position:absolute;left:50%;top:-.5em;transform:translate(-50%,-100%);font-size:1em;text-align:center;opacity:0;pointer-events:none;white-space:nowrap;transition:all .2s ease;font-size:.9em;padding:.2em .4em;border-radius:.2em}.rangeSlider .rangeHandle.active .rangeFloat,.rangeSlider.hoverable .rangeHandle:hover .rangeFloat{opacity:1;top:-.2em;transform:translate(-50%,-100%)}.rangeSlider .rangeBar{position:absolute;display:block;transition:background .2s ease;border-radius:1em;height:.5em;top:0;user-select:none;z-index:1}.rangeSlider.vertical .rangeBar{width:.5em;height:auto}.rangeSlider{background-color:#d7dada;background-color:var(--slider)}.rangeSlider .rangeBar{background-color:#99a2a2;background-color:var(--range-inactive)}.rangeSlider.focus .rangeBar{background-color:#838de7;background-color:var(--range)}.rangeSlider .rangeNub{background-color:#99a2a2;background-color:var(--handle-inactive)}.rangeSlider.focus .rangeNub{background-color:#838de7;background-color:var(--handle)}.rangeSlider .rangeHandle.active .rangeNub{background-color:#4a40d4;background-color:var(--handle-focus)}.rangeSlider .rangeFloat{color:#fff;color:var(--float-text);background-color:#99a2a2;background-color:var(--float-inactive)}.rangeSlider.focus .rangeFloat{background-color:#4a40d4;background-color:var(--float)}.rangeSlider.disabled{opacity:.5}.rangeSlider.disabled .rangeNub{background-color:#d7dada;background-color:var(--slider)}.mic-wrap.svelte-1thnwz{padding:var(--size-2)}.record-icon.svelte-1thnwz{display:flex;position:relative;margin-right:var(--size-2);width:6px;height:6px}.dot.svelte-1thnwz{display:inline-flex;position:relative;border-radius:var(--radius-full);background:var(--color-red-500);width:6px;height:6px}.pinger.svelte-1thnwz{display:inline-flex;position:absolute;opacity:.9;animation:svelte-1thnwz-ping 1s cubic-bezier(0,0,.2,1) infinite;border-radius:var(--radius-full);background:var(--color-red-500);width:var(--size-full);height:var(--size-full)}@keyframes svelte-1thnwz-ping{75%,to{transform:scale(2);opacity:0}}audio.svelte-1thnwz{padding:var(--size-2);width:var(--size-full);height:var(--size-14)}audio.svelte-1yfus5a{padding:var(--size-2);width:var(--size-full);height:var(--size-14)}.icon-button.svelte-1yfus5a{position:absolute;top:6px;right:6px}
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/processing/html.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
"""HTML processing functions"""
|
2 |
-
from __future__ import annotations
|
3 |
-
|
4 |
-
from bs4 import BeautifulSoup
|
5 |
-
from requests.compat import urljoin
|
6 |
-
|
7 |
-
|
8 |
-
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
|
9 |
-
"""Extract hyperlinks from a BeautifulSoup object
|
10 |
-
|
11 |
-
Args:
|
12 |
-
soup (BeautifulSoup): The BeautifulSoup object
|
13 |
-
base_url (str): The base URL
|
14 |
-
|
15 |
-
Returns:
|
16 |
-
List[Tuple[str, str]]: The extracted hyperlinks
|
17 |
-
"""
|
18 |
-
return [
|
19 |
-
(link.text, urljoin(base_url, link["href"]))
|
20 |
-
for link in soup.find_all("a", href=True)
|
21 |
-
]
|
22 |
-
|
23 |
-
|
24 |
-
def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
|
25 |
-
"""Format hyperlinks to be displayed to the user
|
26 |
-
|
27 |
-
Args:
|
28 |
-
hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
|
29 |
-
|
30 |
-
Returns:
|
31 |
-
List[str]: The formatted hyperlinks
|
32 |
-
"""
|
33 |
-
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dantra1/CeliaSensei/models.py
DELETED
@@ -1,533 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
import modules
|
8 |
-
import attentions
|
9 |
-
import monotonic_align
|
10 |
-
|
11 |
-
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
12 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
13 |
-
from commons import init_weights, get_padding
|
14 |
-
|
15 |
-
|
16 |
-
class StochasticDurationPredictor(nn.Module):
|
17 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
18 |
-
super().__init__()
|
19 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
20 |
-
self.in_channels = in_channels
|
21 |
-
self.filter_channels = filter_channels
|
22 |
-
self.kernel_size = kernel_size
|
23 |
-
self.p_dropout = p_dropout
|
24 |
-
self.n_flows = n_flows
|
25 |
-
self.gin_channels = gin_channels
|
26 |
-
|
27 |
-
self.log_flow = modules.Log()
|
28 |
-
self.flows = nn.ModuleList()
|
29 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
30 |
-
for i in range(n_flows):
|
31 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
32 |
-
self.flows.append(modules.Flip())
|
33 |
-
|
34 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
35 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
36 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
37 |
-
self.post_flows = nn.ModuleList()
|
38 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
39 |
-
for i in range(4):
|
40 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
41 |
-
self.post_flows.append(modules.Flip())
|
42 |
-
|
43 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
44 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
45 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
46 |
-
if gin_channels != 0:
|
47 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
48 |
-
|
49 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
50 |
-
x = torch.detach(x)
|
51 |
-
x = self.pre(x)
|
52 |
-
if g is not None:
|
53 |
-
g = torch.detach(g)
|
54 |
-
x = x + self.cond(g)
|
55 |
-
x = self.convs(x, x_mask)
|
56 |
-
x = self.proj(x) * x_mask
|
57 |
-
|
58 |
-
if not reverse:
|
59 |
-
flows = self.flows
|
60 |
-
assert w is not None
|
61 |
-
|
62 |
-
logdet_tot_q = 0
|
63 |
-
h_w = self.post_pre(w)
|
64 |
-
h_w = self.post_convs(h_w, x_mask)
|
65 |
-
h_w = self.post_proj(h_w) * x_mask
|
66 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
67 |
-
z_q = e_q
|
68 |
-
for flow in self.post_flows:
|
69 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
70 |
-
logdet_tot_q += logdet_q
|
71 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
72 |
-
u = torch.sigmoid(z_u) * x_mask
|
73 |
-
z0 = (w - u) * x_mask
|
74 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
|
75 |
-
logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
|
76 |
-
|
77 |
-
logdet_tot = 0
|
78 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
79 |
-
logdet_tot += logdet
|
80 |
-
z = torch.cat([z0, z1], 1)
|
81 |
-
for flow in flows:
|
82 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
83 |
-
logdet_tot = logdet_tot + logdet
|
84 |
-
nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
|
85 |
-
return nll + logq # [b]
|
86 |
-
else:
|
87 |
-
flows = list(reversed(self.flows))
|
88 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
89 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
90 |
-
for flow in flows:
|
91 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
92 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
93 |
-
logw = z0
|
94 |
-
return logw
|
95 |
-
|
96 |
-
|
97 |
-
class DurationPredictor(nn.Module):
|
98 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
99 |
-
super().__init__()
|
100 |
-
|
101 |
-
self.in_channels = in_channels
|
102 |
-
self.filter_channels = filter_channels
|
103 |
-
self.kernel_size = kernel_size
|
104 |
-
self.p_dropout = p_dropout
|
105 |
-
self.gin_channels = gin_channels
|
106 |
-
|
107 |
-
self.drop = nn.Dropout(p_dropout)
|
108 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
109 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
110 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
|
111 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
112 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
113 |
-
|
114 |
-
if gin_channels != 0:
|
115 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
116 |
-
|
117 |
-
def forward(self, x, x_mask, g=None):
|
118 |
-
x = torch.detach(x)
|
119 |
-
if g is not None:
|
120 |
-
g = torch.detach(g)
|
121 |
-
x = x + self.cond(g)
|
122 |
-
x = self.conv_1(x * x_mask)
|
123 |
-
x = torch.relu(x)
|
124 |
-
x = self.norm_1(x)
|
125 |
-
x = self.drop(x)
|
126 |
-
x = self.conv_2(x * x_mask)
|
127 |
-
x = torch.relu(x)
|
128 |
-
x = self.norm_2(x)
|
129 |
-
x = self.drop(x)
|
130 |
-
x = self.proj(x * x_mask)
|
131 |
-
return x * x_mask
|
132 |
-
|
133 |
-
|
134 |
-
class TextEncoder(nn.Module):
|
135 |
-
def __init__(self,
|
136 |
-
n_vocab,
|
137 |
-
out_channels,
|
138 |
-
hidden_channels,
|
139 |
-
filter_channels,
|
140 |
-
n_heads,
|
141 |
-
n_layers,
|
142 |
-
kernel_size,
|
143 |
-
p_dropout):
|
144 |
-
super().__init__()
|
145 |
-
self.n_vocab = n_vocab
|
146 |
-
self.out_channels = out_channels
|
147 |
-
self.hidden_channels = hidden_channels
|
148 |
-
self.filter_channels = filter_channels
|
149 |
-
self.n_heads = n_heads
|
150 |
-
self.n_layers = n_layers
|
151 |
-
self.kernel_size = kernel_size
|
152 |
-
self.p_dropout = p_dropout
|
153 |
-
|
154 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
155 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
|
156 |
-
|
157 |
-
self.encoder = attentions.Encoder(
|
158 |
-
hidden_channels,
|
159 |
-
filter_channels,
|
160 |
-
n_heads,
|
161 |
-
n_layers,
|
162 |
-
kernel_size,
|
163 |
-
p_dropout)
|
164 |
-
self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
165 |
-
|
166 |
-
def forward(self, x, x_lengths):
|
167 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
168 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
169 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
170 |
-
|
171 |
-
x = self.encoder(x * x_mask, x_mask)
|
172 |
-
stats = self.proj(x) * x_mask
|
173 |
-
|
174 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
175 |
-
return x, m, logs, x_mask
|
176 |
-
|
177 |
-
|
178 |
-
class ResidualCouplingBlock(nn.Module):
|
179 |
-
def __init__(self,
|
180 |
-
channels,
|
181 |
-
hidden_channels,
|
182 |
-
kernel_size,
|
183 |
-
dilation_rate,
|
184 |
-
n_layers,
|
185 |
-
n_flows=4,
|
186 |
-
gin_channels=0):
|
187 |
-
super().__init__()
|
188 |
-
self.channels = channels
|
189 |
-
self.hidden_channels = hidden_channels
|
190 |
-
self.kernel_size = kernel_size
|
191 |
-
self.dilation_rate = dilation_rate
|
192 |
-
self.n_layers = n_layers
|
193 |
-
self.n_flows = n_flows
|
194 |
-
self.gin_channels = gin_channels
|
195 |
-
|
196 |
-
self.flows = nn.ModuleList()
|
197 |
-
for i in range(n_flows):
|
198 |
-
self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
199 |
-
self.flows.append(modules.Flip())
|
200 |
-
|
201 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
202 |
-
if not reverse:
|
203 |
-
for flow in self.flows:
|
204 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
205 |
-
else:
|
206 |
-
for flow in reversed(self.flows):
|
207 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
208 |
-
return x
|
209 |
-
|
210 |
-
|
211 |
-
class PosteriorEncoder(nn.Module):
|
212 |
-
def __init__(self,
|
213 |
-
in_channels,
|
214 |
-
out_channels,
|
215 |
-
hidden_channels,
|
216 |
-
kernel_size,
|
217 |
-
dilation_rate,
|
218 |
-
n_layers,
|
219 |
-
gin_channels=0):
|
220 |
-
super().__init__()
|
221 |
-
self.in_channels = in_channels
|
222 |
-
self.out_channels = out_channels
|
223 |
-
self.hidden_channels = hidden_channels
|
224 |
-
self.kernel_size = kernel_size
|
225 |
-
self.dilation_rate = dilation_rate
|
226 |
-
self.n_layers = n_layers
|
227 |
-
self.gin_channels = gin_channels
|
228 |
-
|
229 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
230 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
231 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
232 |
-
|
233 |
-
def forward(self, x, x_lengths, g=None):
|
234 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
235 |
-
x = self.pre(x) * x_mask
|
236 |
-
x = self.enc(x, x_mask, g=g)
|
237 |
-
stats = self.proj(x) * x_mask
|
238 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
239 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
240 |
-
return z, m, logs, x_mask
|
241 |
-
|
242 |
-
|
243 |
-
class Generator(torch.nn.Module):
|
244 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
245 |
-
super(Generator, self).__init__()
|
246 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
247 |
-
self.num_upsamples = len(upsample_rates)
|
248 |
-
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
249 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
250 |
-
|
251 |
-
self.ups = nn.ModuleList()
|
252 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
253 |
-
self.ups.append(weight_norm(
|
254 |
-
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
255 |
-
k, u, padding=(k-u)//2)))
|
256 |
-
|
257 |
-
self.resblocks = nn.ModuleList()
|
258 |
-
for i in range(len(self.ups)):
|
259 |
-
ch = upsample_initial_channel//(2**(i+1))
|
260 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
261 |
-
self.resblocks.append(resblock(ch, k, d))
|
262 |
-
|
263 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
264 |
-
self.ups.apply(init_weights)
|
265 |
-
|
266 |
-
if gin_channels != 0:
|
267 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
268 |
-
|
269 |
-
def forward(self, x, g=None):
|
270 |
-
x = self.conv_pre(x)
|
271 |
-
if g is not None:
|
272 |
-
x = x + self.cond(g)
|
273 |
-
|
274 |
-
for i in range(self.num_upsamples):
|
275 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
276 |
-
x = self.ups[i](x)
|
277 |
-
xs = None
|
278 |
-
for j in range(self.num_kernels):
|
279 |
-
if xs is None:
|
280 |
-
xs = self.resblocks[i*self.num_kernels+j](x)
|
281 |
-
else:
|
282 |
-
xs += self.resblocks[i*self.num_kernels+j](x)
|
283 |
-
x = xs / self.num_kernels
|
284 |
-
x = F.leaky_relu(x)
|
285 |
-
x = self.conv_post(x)
|
286 |
-
x = torch.tanh(x)
|
287 |
-
|
288 |
-
return x
|
289 |
-
|
290 |
-
def remove_weight_norm(self):
|
291 |
-
print('Removing weight norm...')
|
292 |
-
for l in self.ups:
|
293 |
-
remove_weight_norm(l)
|
294 |
-
for l in self.resblocks:
|
295 |
-
l.remove_weight_norm()
|
296 |
-
|
297 |
-
|
298 |
-
class DiscriminatorP(torch.nn.Module):
|
299 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
300 |
-
super(DiscriminatorP, self).__init__()
|
301 |
-
self.period = period
|
302 |
-
self.use_spectral_norm = use_spectral_norm
|
303 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
304 |
-
self.convs = nn.ModuleList([
|
305 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
306 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
307 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
308 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
309 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
310 |
-
])
|
311 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
312 |
-
|
313 |
-
def forward(self, x):
|
314 |
-
fmap = []
|
315 |
-
|
316 |
-
# 1d to 2d
|
317 |
-
b, c, t = x.shape
|
318 |
-
if t % self.period != 0: # pad first
|
319 |
-
n_pad = self.period - (t % self.period)
|
320 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
321 |
-
t = t + n_pad
|
322 |
-
x = x.view(b, c, t // self.period, self.period)
|
323 |
-
|
324 |
-
for l in self.convs:
|
325 |
-
x = l(x)
|
326 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
327 |
-
fmap.append(x)
|
328 |
-
x = self.conv_post(x)
|
329 |
-
fmap.append(x)
|
330 |
-
x = torch.flatten(x, 1, -1)
|
331 |
-
|
332 |
-
return x, fmap
|
333 |
-
|
334 |
-
|
335 |
-
class DiscriminatorS(torch.nn.Module):
|
336 |
-
def __init__(self, use_spectral_norm=False):
|
337 |
-
super(DiscriminatorS, self).__init__()
|
338 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
339 |
-
self.convs = nn.ModuleList([
|
340 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
341 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
342 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
343 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
344 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
345 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
346 |
-
])
|
347 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
348 |
-
|
349 |
-
def forward(self, x):
|
350 |
-
fmap = []
|
351 |
-
|
352 |
-
for l in self.convs:
|
353 |
-
x = l(x)
|
354 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
355 |
-
fmap.append(x)
|
356 |
-
x = self.conv_post(x)
|
357 |
-
fmap.append(x)
|
358 |
-
x = torch.flatten(x, 1, -1)
|
359 |
-
|
360 |
-
return x, fmap
|
361 |
-
|
362 |
-
|
363 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
364 |
-
def __init__(self, use_spectral_norm=False):
|
365 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
366 |
-
periods = [2,3,5,7,11]
|
367 |
-
|
368 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
369 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
370 |
-
self.discriminators = nn.ModuleList(discs)
|
371 |
-
|
372 |
-
def forward(self, y, y_hat):
|
373 |
-
y_d_rs = []
|
374 |
-
y_d_gs = []
|
375 |
-
fmap_rs = []
|
376 |
-
fmap_gs = []
|
377 |
-
for i, d in enumerate(self.discriminators):
|
378 |
-
y_d_r, fmap_r = d(y)
|
379 |
-
y_d_g, fmap_g = d(y_hat)
|
380 |
-
y_d_rs.append(y_d_r)
|
381 |
-
y_d_gs.append(y_d_g)
|
382 |
-
fmap_rs.append(fmap_r)
|
383 |
-
fmap_gs.append(fmap_g)
|
384 |
-
|
385 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
class SynthesizerTrn(nn.Module):
|
390 |
-
"""
|
391 |
-
Synthesizer for Training
|
392 |
-
"""
|
393 |
-
|
394 |
-
def __init__(self,
|
395 |
-
n_vocab,
|
396 |
-
spec_channels,
|
397 |
-
segment_size,
|
398 |
-
inter_channels,
|
399 |
-
hidden_channels,
|
400 |
-
filter_channels,
|
401 |
-
n_heads,
|
402 |
-
n_layers,
|
403 |
-
kernel_size,
|
404 |
-
p_dropout,
|
405 |
-
resblock,
|
406 |
-
resblock_kernel_sizes,
|
407 |
-
resblock_dilation_sizes,
|
408 |
-
upsample_rates,
|
409 |
-
upsample_initial_channel,
|
410 |
-
upsample_kernel_sizes,
|
411 |
-
n_speakers=0,
|
412 |
-
gin_channels=0,
|
413 |
-
use_sdp=True,
|
414 |
-
**kwargs):
|
415 |
-
|
416 |
-
super().__init__()
|
417 |
-
self.n_vocab = n_vocab
|
418 |
-
self.spec_channels = spec_channels
|
419 |
-
self.inter_channels = inter_channels
|
420 |
-
self.hidden_channels = hidden_channels
|
421 |
-
self.filter_channels = filter_channels
|
422 |
-
self.n_heads = n_heads
|
423 |
-
self.n_layers = n_layers
|
424 |
-
self.kernel_size = kernel_size
|
425 |
-
self.p_dropout = p_dropout
|
426 |
-
self.resblock = resblock
|
427 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
428 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
429 |
-
self.upsample_rates = upsample_rates
|
430 |
-
self.upsample_initial_channel = upsample_initial_channel
|
431 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
432 |
-
self.segment_size = segment_size
|
433 |
-
self.n_speakers = n_speakers
|
434 |
-
self.gin_channels = gin_channels
|
435 |
-
|
436 |
-
self.use_sdp = use_sdp
|
437 |
-
|
438 |
-
self.enc_p = TextEncoder(n_vocab,
|
439 |
-
inter_channels,
|
440 |
-
hidden_channels,
|
441 |
-
filter_channels,
|
442 |
-
n_heads,
|
443 |
-
n_layers,
|
444 |
-
kernel_size,
|
445 |
-
p_dropout)
|
446 |
-
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
447 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
448 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
449 |
-
|
450 |
-
if use_sdp:
|
451 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
452 |
-
else:
|
453 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
454 |
-
|
455 |
-
if n_speakers > 1:
|
456 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
457 |
-
|
458 |
-
def forward(self, x, x_lengths, y, y_lengths, sid=None):
|
459 |
-
|
460 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
461 |
-
if self.n_speakers > 0:
|
462 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
463 |
-
else:
|
464 |
-
g = None
|
465 |
-
|
466 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
467 |
-
z_p = self.flow(z, y_mask, g=g)
|
468 |
-
|
469 |
-
with torch.no_grad():
|
470 |
-
# negative cross-entropy
|
471 |
-
s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
|
472 |
-
neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
|
473 |
-
neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
474 |
-
neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
|
475 |
-
neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
|
476 |
-
neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
|
477 |
-
|
478 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
479 |
-
attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
|
480 |
-
|
481 |
-
w = attn.sum(2)
|
482 |
-
if self.use_sdp:
|
483 |
-
l_length = self.dp(x, x_mask, w, g=g)
|
484 |
-
l_length = l_length / torch.sum(x_mask)
|
485 |
-
else:
|
486 |
-
logw_ = torch.log(w + 1e-6) * x_mask
|
487 |
-
logw = self.dp(x, x_mask, g=g)
|
488 |
-
l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
|
489 |
-
|
490 |
-
# expand prior
|
491 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
|
492 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
|
493 |
-
|
494 |
-
z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
|
495 |
-
o = self.dec(z_slice, g=g)
|
496 |
-
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
497 |
-
|
498 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
499 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
500 |
-
if self.n_speakers > 0:
|
501 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
502 |
-
else:
|
503 |
-
g = None
|
504 |
-
|
505 |
-
if self.use_sdp:
|
506 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
507 |
-
else:
|
508 |
-
logw = self.dp(x, x_mask, g=g)
|
509 |
-
w = torch.exp(logw) * x_mask * length_scale
|
510 |
-
w_ceil = torch.ceil(w)
|
511 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
512 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
513 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
514 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
515 |
-
|
516 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
517 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
518 |
-
|
519 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
520 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
521 |
-
o = self.dec((z * y_mask)[:,:,:max_len], g=g)
|
522 |
-
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
523 |
-
|
524 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
525 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
526 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
527 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
528 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
529 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
530 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
531 |
-
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
532 |
-
return o_hat, y_mask, (z, z_p, z_hat)
|
533 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/DescriptionGPT/tools/download_cc.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
import argparse
|
5 |
-
from PIL import Image
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
if __name__ == '__main__':
|
9 |
-
parser = argparse.ArgumentParser()
|
10 |
-
parser.add_argument('--ann', default='datasets/cc3m/Train_GCC-training.tsv')
|
11 |
-
parser.add_argument('--save_image_path', default='datasets/cc3m/training/')
|
12 |
-
parser.add_argument('--cat_info', default='datasets/lvis/lvis_v1_val.json')
|
13 |
-
parser.add_argument('--out_path', default='datasets/cc3m/train_image_info.json')
|
14 |
-
parser.add_argument('--not_download_image', action='store_true')
|
15 |
-
args = parser.parse_args()
|
16 |
-
categories = json.load(open(args.cat_info, 'r'))['categories']
|
17 |
-
images = []
|
18 |
-
if not os.path.exists(args.save_image_path):
|
19 |
-
os.makedirs(args.save_image_path)
|
20 |
-
f = open(args.ann)
|
21 |
-
for i, line in enumerate(f):
|
22 |
-
cap, path = line[:-1].split('\t')
|
23 |
-
print(i, cap, path)
|
24 |
-
if not args.not_download_image:
|
25 |
-
os.system(
|
26 |
-
'wget {} -O {}/{}.jpg'.format(
|
27 |
-
path, args.save_image_path, i + 1))
|
28 |
-
try:
|
29 |
-
img = Image.open(
|
30 |
-
open('{}/{}.jpg'.format(args.save_image_path, i + 1), "rb"))
|
31 |
-
img = np.asarray(img.convert("RGB"))
|
32 |
-
h, w = img.shape[:2]
|
33 |
-
except:
|
34 |
-
continue
|
35 |
-
image_info = {
|
36 |
-
'id': i + 1,
|
37 |
-
'file_name': '{}.jpg'.format(i + 1),
|
38 |
-
'height': h,
|
39 |
-
'width': w,
|
40 |
-
'captions': [cap],
|
41 |
-
}
|
42 |
-
images.append(image_info)
|
43 |
-
data = {'categories': categories, 'images': images, 'annotations': []}
|
44 |
-
for k, v in data.items():
|
45 |
-
print(k, len(v))
|
46 |
-
print('Saving to', args.out_path)
|
47 |
-
json.dump(data, open(args.out_path, 'w'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/tests/utils/__init__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/StyleGAN-NADA/e4e/models/psp.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
import matplotlib
|
2 |
-
|
3 |
-
matplotlib.use('Agg')
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from e4e.models.encoders import psp_encoders
|
7 |
-
from e4e.models.stylegan2.model import Generator
|
8 |
-
from e4e.configs.paths_config import model_paths
|
9 |
-
|
10 |
-
|
11 |
-
def get_keys(d, name):
|
12 |
-
if 'state_dict' in d:
|
13 |
-
d = d['state_dict']
|
14 |
-
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
|
15 |
-
return d_filt
|
16 |
-
|
17 |
-
|
18 |
-
class pSp(nn.Module):
|
19 |
-
|
20 |
-
def __init__(self, opts, device):
|
21 |
-
super(pSp, self).__init__()
|
22 |
-
self.opts = opts
|
23 |
-
self.device = device
|
24 |
-
# Define architecture
|
25 |
-
self.encoder = self.set_encoder()
|
26 |
-
self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2)
|
27 |
-
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
|
28 |
-
# Load weights if needed
|
29 |
-
self.load_weights()
|
30 |
-
|
31 |
-
def set_encoder(self):
|
32 |
-
if self.opts.encoder_type == 'GradualStyleEncoder':
|
33 |
-
encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)
|
34 |
-
elif self.opts.encoder_type == 'Encoder4Editing':
|
35 |
-
encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts)
|
36 |
-
else:
|
37 |
-
raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))
|
38 |
-
return encoder
|
39 |
-
|
40 |
-
def load_weights(self):
|
41 |
-
if self.opts.checkpoint_path is not None:
|
42 |
-
print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path))
|
43 |
-
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
|
44 |
-
self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
|
45 |
-
self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True)
|
46 |
-
self.__load_latent_avg(ckpt)
|
47 |
-
else:
|
48 |
-
print('Loading encoders weights from irse50!')
|
49 |
-
encoder_ckpt = torch.load(model_paths['ir_se50'])
|
50 |
-
self.encoder.load_state_dict(encoder_ckpt, strict=False)
|
51 |
-
print('Loading decoder weights from pretrained!')
|
52 |
-
ckpt = torch.load(self.opts.stylegan_weights)
|
53 |
-
self.decoder.load_state_dict(ckpt['g_ema'], strict=False)
|
54 |
-
self.__load_latent_avg(ckpt, repeat=self.encoder.style_count)
|
55 |
-
|
56 |
-
def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True,
|
57 |
-
inject_latent=None, return_latents=False, alpha=None):
|
58 |
-
if input_code:
|
59 |
-
codes = x
|
60 |
-
else:
|
61 |
-
codes = self.encoder(x)
|
62 |
-
# normalize with respect to the center of an average face
|
63 |
-
if self.opts.start_from_latent_avg:
|
64 |
-
if codes.ndim == 2:
|
65 |
-
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
|
66 |
-
else:
|
67 |
-
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
|
68 |
-
|
69 |
-
if latent_mask is not None:
|
70 |
-
for i in latent_mask:
|
71 |
-
if inject_latent is not None:
|
72 |
-
if alpha is not None:
|
73 |
-
codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]
|
74 |
-
else:
|
75 |
-
codes[:, i] = inject_latent[:, i]
|
76 |
-
else:
|
77 |
-
codes[:, i] = 0
|
78 |
-
|
79 |
-
input_is_latent = not input_code
|
80 |
-
images, result_latent = self.decoder([codes],
|
81 |
-
input_is_latent=input_is_latent,
|
82 |
-
randomize_noise=randomize_noise,
|
83 |
-
return_latents=return_latents)
|
84 |
-
|
85 |
-
if resize:
|
86 |
-
images = self.face_pool(images)
|
87 |
-
|
88 |
-
if return_latents:
|
89 |
-
return images, result_latent
|
90 |
-
else:
|
91 |
-
return images
|
92 |
-
|
93 |
-
def __load_latent_avg(self, ckpt, repeat=None):
|
94 |
-
if 'latent_avg' in ckpt:
|
95 |
-
self.latent_avg = ckpt['latent_avg'].to(self.device)
|
96 |
-
if repeat is not None:
|
97 |
-
self.latent_avg = self.latent_avg.repeat(repeat, 1)
|
98 |
-
else:
|
99 |
-
self.latent_avg = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/hrnet.py
DELETED
@@ -1,835 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import math
|
16 |
-
|
17 |
-
import paddle
|
18 |
-
import paddle.nn as nn
|
19 |
-
import paddle.nn.functional as F
|
20 |
-
|
21 |
-
from paddleseg.cvlibs import manager, param_init
|
22 |
-
from paddleseg.models import layers
|
23 |
-
from paddleseg.utils import utils
|
24 |
-
|
25 |
-
__all__ = [
|
26 |
-
"HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30",
|
27 |
-
"HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64"
|
28 |
-
]
|
29 |
-
|
30 |
-
|
31 |
-
class HRNet(nn.Layer):
|
32 |
-
"""
|
33 |
-
The HRNet implementation based on PaddlePaddle.
|
34 |
-
|
35 |
-
The original article refers to
|
36 |
-
Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition"
|
37 |
-
(https://arxiv.org/pdf/1908.07919.pdf).
|
38 |
-
|
39 |
-
Args:
|
40 |
-
pretrained (str, optional): The path of pretrained model.
|
41 |
-
stage1_num_modules (int, optional): Number of modules for stage1. Default 1.
|
42 |
-
stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4).
|
43 |
-
stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64).
|
44 |
-
stage2_num_modules (int, optional): Number of modules for stage2. Default 1.
|
45 |
-
stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4).
|
46 |
-
stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36).
|
47 |
-
stage3_num_modules (int, optional): Number of modules for stage3. Default 4.
|
48 |
-
stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4).
|
49 |
-
stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72).
|
50 |
-
stage4_num_modules (int, optional): Number of modules for stage4. Default 3.
|
51 |
-
stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4).
|
52 |
-
stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144).
|
53 |
-
has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False.
|
54 |
-
align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
|
55 |
-
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
|
56 |
-
"""
|
57 |
-
|
58 |
-
def __init__(self,
|
59 |
-
input_channels=3,
|
60 |
-
pretrained=None,
|
61 |
-
stage1_num_modules=1,
|
62 |
-
stage1_num_blocks=(4, ),
|
63 |
-
stage1_num_channels=(64, ),
|
64 |
-
stage2_num_modules=1,
|
65 |
-
stage2_num_blocks=(4, 4),
|
66 |
-
stage2_num_channels=(18, 36),
|
67 |
-
stage3_num_modules=4,
|
68 |
-
stage3_num_blocks=(4, 4, 4),
|
69 |
-
stage3_num_channels=(18, 36, 72),
|
70 |
-
stage4_num_modules=3,
|
71 |
-
stage4_num_blocks=(4, 4, 4, 4),
|
72 |
-
stage4_num_channels=(18, 36, 72, 144),
|
73 |
-
has_se=False,
|
74 |
-
align_corners=False,
|
75 |
-
padding_same=True):
|
76 |
-
super(HRNet, self).__init__()
|
77 |
-
self.pretrained = pretrained
|
78 |
-
self.stage1_num_modules = stage1_num_modules
|
79 |
-
self.stage1_num_blocks = stage1_num_blocks
|
80 |
-
self.stage1_num_channels = stage1_num_channels
|
81 |
-
self.stage2_num_modules = stage2_num_modules
|
82 |
-
self.stage2_num_blocks = stage2_num_blocks
|
83 |
-
self.stage2_num_channels = stage2_num_channels
|
84 |
-
self.stage3_num_modules = stage3_num_modules
|
85 |
-
self.stage3_num_blocks = stage3_num_blocks
|
86 |
-
self.stage3_num_channels = stage3_num_channels
|
87 |
-
self.stage4_num_modules = stage4_num_modules
|
88 |
-
self.stage4_num_blocks = stage4_num_blocks
|
89 |
-
self.stage4_num_channels = stage4_num_channels
|
90 |
-
self.has_se = has_se
|
91 |
-
self.align_corners = align_corners
|
92 |
-
|
93 |
-
self.feat_channels = [i for i in stage4_num_channels]
|
94 |
-
self.feat_channels = [64] + self.feat_channels
|
95 |
-
|
96 |
-
self.conv_layer1_1 = layers.ConvBNReLU(
|
97 |
-
in_channels=input_channels,
|
98 |
-
out_channels=64,
|
99 |
-
kernel_size=3,
|
100 |
-
stride=2,
|
101 |
-
padding=1 if not padding_same else 'same',
|
102 |
-
bias_attr=False)
|
103 |
-
|
104 |
-
self.conv_layer1_2 = layers.ConvBNReLU(
|
105 |
-
in_channels=64,
|
106 |
-
out_channels=64,
|
107 |
-
kernel_size=3,
|
108 |
-
stride=2,
|
109 |
-
padding=1 if not padding_same else 'same',
|
110 |
-
bias_attr=False)
|
111 |
-
|
112 |
-
self.la1 = Layer1(
|
113 |
-
num_channels=64,
|
114 |
-
num_blocks=self.stage1_num_blocks[0],
|
115 |
-
num_filters=self.stage1_num_channels[0],
|
116 |
-
has_se=has_se,
|
117 |
-
name="layer2",
|
118 |
-
padding_same=padding_same)
|
119 |
-
|
120 |
-
self.tr1 = TransitionLayer(
|
121 |
-
in_channels=[self.stage1_num_channels[0] * 4],
|
122 |
-
out_channels=self.stage2_num_channels,
|
123 |
-
name="tr1",
|
124 |
-
padding_same=padding_same)
|
125 |
-
|
126 |
-
self.st2 = Stage(
|
127 |
-
num_channels=self.stage2_num_channels,
|
128 |
-
num_modules=self.stage2_num_modules,
|
129 |
-
num_blocks=self.stage2_num_blocks,
|
130 |
-
num_filters=self.stage2_num_channels,
|
131 |
-
has_se=self.has_se,
|
132 |
-
name="st2",
|
133 |
-
align_corners=align_corners,
|
134 |
-
padding_same=padding_same)
|
135 |
-
|
136 |
-
self.tr2 = TransitionLayer(
|
137 |
-
in_channels=self.stage2_num_channels,
|
138 |
-
out_channels=self.stage3_num_channels,
|
139 |
-
name="tr2",
|
140 |
-
padding_same=padding_same)
|
141 |
-
self.st3 = Stage(
|
142 |
-
num_channels=self.stage3_num_channels,
|
143 |
-
num_modules=self.stage3_num_modules,
|
144 |
-
num_blocks=self.stage3_num_blocks,
|
145 |
-
num_filters=self.stage3_num_channels,
|
146 |
-
has_se=self.has_se,
|
147 |
-
name="st3",
|
148 |
-
align_corners=align_corners,
|
149 |
-
padding_same=padding_same)
|
150 |
-
|
151 |
-
self.tr3 = TransitionLayer(
|
152 |
-
in_channels=self.stage3_num_channels,
|
153 |
-
out_channels=self.stage4_num_channels,
|
154 |
-
name="tr3",
|
155 |
-
padding_same=padding_same)
|
156 |
-
self.st4 = Stage(
|
157 |
-
num_channels=self.stage4_num_channels,
|
158 |
-
num_modules=self.stage4_num_modules,
|
159 |
-
num_blocks=self.stage4_num_blocks,
|
160 |
-
num_filters=self.stage4_num_channels,
|
161 |
-
has_se=self.has_se,
|
162 |
-
name="st4",
|
163 |
-
align_corners=align_corners,
|
164 |
-
padding_same=padding_same)
|
165 |
-
|
166 |
-
self.init_weight()
|
167 |
-
|
168 |
-
def forward(self, x):
|
169 |
-
feat_list = []
|
170 |
-
conv1 = self.conv_layer1_1(x)
|
171 |
-
feat_list.append(conv1)
|
172 |
-
conv2 = self.conv_layer1_2(conv1)
|
173 |
-
|
174 |
-
la1 = self.la1(conv2)
|
175 |
-
|
176 |
-
tr1 = self.tr1([la1])
|
177 |
-
st2 = self.st2(tr1)
|
178 |
-
|
179 |
-
tr2 = self.tr2(st2)
|
180 |
-
st3 = self.st3(tr2)
|
181 |
-
|
182 |
-
tr3 = self.tr3(st3)
|
183 |
-
st4 = self.st4(tr3)
|
184 |
-
|
185 |
-
feat_list = feat_list + st4
|
186 |
-
|
187 |
-
return feat_list
|
188 |
-
|
189 |
-
def init_weight(self):
|
190 |
-
for layer in self.sublayers():
|
191 |
-
if isinstance(layer, nn.Conv2D):
|
192 |
-
param_init.normal_init(layer.weight, std=0.001)
|
193 |
-
elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)):
|
194 |
-
param_init.constant_init(layer.weight, value=1.0)
|
195 |
-
param_init.constant_init(layer.bias, value=0.0)
|
196 |
-
if self.pretrained is not None:
|
197 |
-
utils.load_pretrained_model(self, self.pretrained)
|
198 |
-
|
199 |
-
|
200 |
-
class Layer1(nn.Layer):
|
201 |
-
def __init__(self,
|
202 |
-
num_channels,
|
203 |
-
num_filters,
|
204 |
-
num_blocks,
|
205 |
-
has_se=False,
|
206 |
-
name=None,
|
207 |
-
padding_same=True):
|
208 |
-
super(Layer1, self).__init__()
|
209 |
-
|
210 |
-
self.bottleneck_block_list = []
|
211 |
-
|
212 |
-
for i in range(num_blocks):
|
213 |
-
bottleneck_block = self.add_sublayer(
|
214 |
-
"bb_{}_{}".format(name, i + 1),
|
215 |
-
BottleneckBlock(
|
216 |
-
num_channels=num_channels if i == 0 else num_filters * 4,
|
217 |
-
num_filters=num_filters,
|
218 |
-
has_se=has_se,
|
219 |
-
stride=1,
|
220 |
-
downsample=True if i == 0 else False,
|
221 |
-
name=name + '_' + str(i + 1),
|
222 |
-
padding_same=padding_same))
|
223 |
-
self.bottleneck_block_list.append(bottleneck_block)
|
224 |
-
|
225 |
-
def forward(self, x):
|
226 |
-
conv = x
|
227 |
-
for block_func in self.bottleneck_block_list:
|
228 |
-
conv = block_func(conv)
|
229 |
-
return conv
|
230 |
-
|
231 |
-
|
232 |
-
class TransitionLayer(nn.Layer):
|
233 |
-
def __init__(self, in_channels, out_channels, name=None, padding_same=True):
|
234 |
-
super(TransitionLayer, self).__init__()
|
235 |
-
|
236 |
-
num_in = len(in_channels)
|
237 |
-
num_out = len(out_channels)
|
238 |
-
self.conv_bn_func_list = []
|
239 |
-
for i in range(num_out):
|
240 |
-
residual = None
|
241 |
-
if i < num_in:
|
242 |
-
if in_channels[i] != out_channels[i]:
|
243 |
-
residual = self.add_sublayer(
|
244 |
-
"transition_{}_layer_{}".format(name, i + 1),
|
245 |
-
layers.ConvBNReLU(
|
246 |
-
in_channels=in_channels[i],
|
247 |
-
out_channels=out_channels[i],
|
248 |
-
kernel_size=3,
|
249 |
-
padding=1 if not padding_same else 'same',
|
250 |
-
bias_attr=False))
|
251 |
-
else:
|
252 |
-
residual = self.add_sublayer(
|
253 |
-
"transition_{}_layer_{}".format(name, i + 1),
|
254 |
-
layers.ConvBNReLU(
|
255 |
-
in_channels=in_channels[-1],
|
256 |
-
out_channels=out_channels[i],
|
257 |
-
kernel_size=3,
|
258 |
-
stride=2,
|
259 |
-
padding=1 if not padding_same else 'same',
|
260 |
-
bias_attr=False))
|
261 |
-
self.conv_bn_func_list.append(residual)
|
262 |
-
|
263 |
-
def forward(self, x):
|
264 |
-
outs = []
|
265 |
-
for idx, conv_bn_func in enumerate(self.conv_bn_func_list):
|
266 |
-
if conv_bn_func is None:
|
267 |
-
outs.append(x[idx])
|
268 |
-
else:
|
269 |
-
if idx < len(x):
|
270 |
-
outs.append(conv_bn_func(x[idx]))
|
271 |
-
else:
|
272 |
-
outs.append(conv_bn_func(x[-1]))
|
273 |
-
return outs
|
274 |
-
|
275 |
-
|
276 |
-
class Branches(nn.Layer):
|
277 |
-
def __init__(self,
|
278 |
-
num_blocks,
|
279 |
-
in_channels,
|
280 |
-
out_channels,
|
281 |
-
has_se=False,
|
282 |
-
name=None,
|
283 |
-
padding_same=True):
|
284 |
-
super(Branches, self).__init__()
|
285 |
-
|
286 |
-
self.basic_block_list = []
|
287 |
-
|
288 |
-
for i in range(len(out_channels)):
|
289 |
-
self.basic_block_list.append([])
|
290 |
-
for j in range(num_blocks[i]):
|
291 |
-
in_ch = in_channels[i] if j == 0 else out_channels[i]
|
292 |
-
basic_block_func = self.add_sublayer(
|
293 |
-
"bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1),
|
294 |
-
BasicBlock(
|
295 |
-
num_channels=in_ch,
|
296 |
-
num_filters=out_channels[i],
|
297 |
-
has_se=has_se,
|
298 |
-
name=name + '_branch_layer_' + str(i + 1) + '_' +
|
299 |
-
str(j + 1),
|
300 |
-
padding_same=padding_same))
|
301 |
-
self.basic_block_list[i].append(basic_block_func)
|
302 |
-
|
303 |
-
def forward(self, x):
|
304 |
-
outs = []
|
305 |
-
for idx, input in enumerate(x):
|
306 |
-
conv = input
|
307 |
-
for basic_block_func in self.basic_block_list[idx]:
|
308 |
-
conv = basic_block_func(conv)
|
309 |
-
outs.append(conv)
|
310 |
-
return outs
|
311 |
-
|
312 |
-
|
313 |
-
class BottleneckBlock(nn.Layer):
|
314 |
-
def __init__(self,
|
315 |
-
num_channels,
|
316 |
-
num_filters,
|
317 |
-
has_se,
|
318 |
-
stride=1,
|
319 |
-
downsample=False,
|
320 |
-
name=None,
|
321 |
-
padding_same=True):
|
322 |
-
super(BottleneckBlock, self).__init__()
|
323 |
-
|
324 |
-
self.has_se = has_se
|
325 |
-
self.downsample = downsample
|
326 |
-
|
327 |
-
self.conv1 = layers.ConvBNReLU(
|
328 |
-
in_channels=num_channels,
|
329 |
-
out_channels=num_filters,
|
330 |
-
kernel_size=1,
|
331 |
-
bias_attr=False)
|
332 |
-
|
333 |
-
self.conv2 = layers.ConvBNReLU(
|
334 |
-
in_channels=num_filters,
|
335 |
-
out_channels=num_filters,
|
336 |
-
kernel_size=3,
|
337 |
-
stride=stride,
|
338 |
-
padding=1 if not padding_same else 'same',
|
339 |
-
bias_attr=False)
|
340 |
-
|
341 |
-
self.conv3 = layers.ConvBN(
|
342 |
-
in_channels=num_filters,
|
343 |
-
out_channels=num_filters * 4,
|
344 |
-
kernel_size=1,
|
345 |
-
bias_attr=False)
|
346 |
-
|
347 |
-
if self.downsample:
|
348 |
-
self.conv_down = layers.ConvBN(
|
349 |
-
in_channels=num_channels,
|
350 |
-
out_channels=num_filters * 4,
|
351 |
-
kernel_size=1,
|
352 |
-
bias_attr=False)
|
353 |
-
|
354 |
-
if self.has_se:
|
355 |
-
self.se = SELayer(
|
356 |
-
num_channels=num_filters * 4,
|
357 |
-
num_filters=num_filters * 4,
|
358 |
-
reduction_ratio=16,
|
359 |
-
name=name + '_fc')
|
360 |
-
|
361 |
-
self.add = layers.Add()
|
362 |
-
self.relu = layers.Activation("relu")
|
363 |
-
|
364 |
-
def forward(self, x):
|
365 |
-
residual = x
|
366 |
-
conv1 = self.conv1(x)
|
367 |
-
conv2 = self.conv2(conv1)
|
368 |
-
conv3 = self.conv3(conv2)
|
369 |
-
|
370 |
-
if self.downsample:
|
371 |
-
residual = self.conv_down(x)
|
372 |
-
|
373 |
-
if self.has_se:
|
374 |
-
conv3 = self.se(conv3)
|
375 |
-
|
376 |
-
y = self.add(conv3, residual)
|
377 |
-
y = self.relu(y)
|
378 |
-
return y
|
379 |
-
|
380 |
-
|
381 |
-
class BasicBlock(nn.Layer):
|
382 |
-
def __init__(self,
|
383 |
-
num_channels,
|
384 |
-
num_filters,
|
385 |
-
stride=1,
|
386 |
-
has_se=False,
|
387 |
-
downsample=False,
|
388 |
-
name=None,
|
389 |
-
padding_same=True):
|
390 |
-
super(BasicBlock, self).__init__()
|
391 |
-
|
392 |
-
self.has_se = has_se
|
393 |
-
self.downsample = downsample
|
394 |
-
|
395 |
-
self.conv1 = layers.ConvBNReLU(
|
396 |
-
in_channels=num_channels,
|
397 |
-
out_channels=num_filters,
|
398 |
-
kernel_size=3,
|
399 |
-
stride=stride,
|
400 |
-
padding=1 if not padding_same else 'same',
|
401 |
-
bias_attr=False)
|
402 |
-
self.conv2 = layers.ConvBN(
|
403 |
-
in_channels=num_filters,
|
404 |
-
out_channels=num_filters,
|
405 |
-
kernel_size=3,
|
406 |
-
padding=1 if not padding_same else 'same',
|
407 |
-
bias_attr=False)
|
408 |
-
|
409 |
-
if self.downsample:
|
410 |
-
self.conv_down = layers.ConvBNReLU(
|
411 |
-
in_channels=num_channels,
|
412 |
-
out_channels=num_filters,
|
413 |
-
kernel_size=1,
|
414 |
-
bias_attr=False)
|
415 |
-
|
416 |
-
if self.has_se:
|
417 |
-
self.se = SELayer(
|
418 |
-
num_channels=num_filters,
|
419 |
-
num_filters=num_filters,
|
420 |
-
reduction_ratio=16,
|
421 |
-
name=name + '_fc')
|
422 |
-
|
423 |
-
self.add = layers.Add()
|
424 |
-
self.relu = layers.Activation("relu")
|
425 |
-
|
426 |
-
def forward(self, x):
|
427 |
-
residual = x
|
428 |
-
conv1 = self.conv1(x)
|
429 |
-
conv2 = self.conv2(conv1)
|
430 |
-
|
431 |
-
if self.downsample:
|
432 |
-
residual = self.conv_down(x)
|
433 |
-
|
434 |
-
if self.has_se:
|
435 |
-
conv2 = self.se(conv2)
|
436 |
-
|
437 |
-
y = self.add(conv2, residual)
|
438 |
-
y = self.relu(y)
|
439 |
-
return y
|
440 |
-
|
441 |
-
|
442 |
-
class SELayer(nn.Layer):
|
443 |
-
def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
|
444 |
-
super(SELayer, self).__init__()
|
445 |
-
|
446 |
-
self.pool2d_gap = nn.AdaptiveAvgPool2D(1)
|
447 |
-
|
448 |
-
self._num_channels = num_channels
|
449 |
-
|
450 |
-
med_ch = int(num_channels / reduction_ratio)
|
451 |
-
stdv = 1.0 / math.sqrt(num_channels * 1.0)
|
452 |
-
self.squeeze = nn.Linear(
|
453 |
-
num_channels,
|
454 |
-
med_ch,
|
455 |
-
weight_attr=paddle.ParamAttr(
|
456 |
-
initializer=nn.initializer.Uniform(-stdv, stdv)))
|
457 |
-
|
458 |
-
stdv = 1.0 / math.sqrt(med_ch * 1.0)
|
459 |
-
self.excitation = nn.Linear(
|
460 |
-
med_ch,
|
461 |
-
num_filters,
|
462 |
-
weight_attr=paddle.ParamAttr(
|
463 |
-
initializer=nn.initializer.Uniform(-stdv, stdv)))
|
464 |
-
|
465 |
-
def forward(self, x):
|
466 |
-
pool = self.pool2d_gap(x)
|
467 |
-
pool = paddle.reshape(pool, shape=[-1, self._num_channels])
|
468 |
-
squeeze = self.squeeze(pool)
|
469 |
-
squeeze = F.relu(squeeze)
|
470 |
-
excitation = self.excitation(squeeze)
|
471 |
-
excitation = F.sigmoid(excitation)
|
472 |
-
excitation = paddle.reshape(
|
473 |
-
excitation, shape=[-1, self._num_channels, 1, 1])
|
474 |
-
out = x * excitation
|
475 |
-
return out
|
476 |
-
|
477 |
-
|
478 |
-
class Stage(nn.Layer):
|
479 |
-
def __init__(self,
|
480 |
-
num_channels,
|
481 |
-
num_modules,
|
482 |
-
num_blocks,
|
483 |
-
num_filters,
|
484 |
-
has_se=False,
|
485 |
-
multi_scale_output=True,
|
486 |
-
name=None,
|
487 |
-
align_corners=False,
|
488 |
-
padding_same=True):
|
489 |
-
super(Stage, self).__init__()
|
490 |
-
|
491 |
-
self._num_modules = num_modules
|
492 |
-
|
493 |
-
self.stage_func_list = []
|
494 |
-
for i in range(num_modules):
|
495 |
-
if i == num_modules - 1 and not multi_scale_output:
|
496 |
-
stage_func = self.add_sublayer(
|
497 |
-
"stage_{}_{}".format(name, i + 1),
|
498 |
-
HighResolutionModule(
|
499 |
-
num_channels=num_channels,
|
500 |
-
num_blocks=num_blocks,
|
501 |
-
num_filters=num_filters,
|
502 |
-
has_se=has_se,
|
503 |
-
multi_scale_output=False,
|
504 |
-
name=name + '_' + str(i + 1),
|
505 |
-
align_corners=align_corners,
|
506 |
-
padding_same=padding_same))
|
507 |
-
else:
|
508 |
-
stage_func = self.add_sublayer(
|
509 |
-
"stage_{}_{}".format(name, i + 1),
|
510 |
-
HighResolutionModule(
|
511 |
-
num_channels=num_channels,
|
512 |
-
num_blocks=num_blocks,
|
513 |
-
num_filters=num_filters,
|
514 |
-
has_se=has_se,
|
515 |
-
name=name + '_' + str(i + 1),
|
516 |
-
align_corners=align_corners,
|
517 |
-
padding_same=padding_same))
|
518 |
-
|
519 |
-
self.stage_func_list.append(stage_func)
|
520 |
-
|
521 |
-
def forward(self, x):
|
522 |
-
out = x
|
523 |
-
for idx in range(self._num_modules):
|
524 |
-
out = self.stage_func_list[idx](out)
|
525 |
-
return out
|
526 |
-
|
527 |
-
|
528 |
-
class HighResolutionModule(nn.Layer):
|
529 |
-
def __init__(self,
|
530 |
-
num_channels,
|
531 |
-
num_blocks,
|
532 |
-
num_filters,
|
533 |
-
has_se=False,
|
534 |
-
multi_scale_output=True,
|
535 |
-
name=None,
|
536 |
-
align_corners=False,
|
537 |
-
padding_same=True):
|
538 |
-
super(HighResolutionModule, self).__init__()
|
539 |
-
|
540 |
-
self.branches_func = Branches(
|
541 |
-
num_blocks=num_blocks,
|
542 |
-
in_channels=num_channels,
|
543 |
-
out_channels=num_filters,
|
544 |
-
has_se=has_se,
|
545 |
-
name=name,
|
546 |
-
padding_same=padding_same)
|
547 |
-
|
548 |
-
self.fuse_func = FuseLayers(
|
549 |
-
in_channels=num_filters,
|
550 |
-
out_channels=num_filters,
|
551 |
-
multi_scale_output=multi_scale_output,
|
552 |
-
name=name,
|
553 |
-
align_corners=align_corners,
|
554 |
-
padding_same=padding_same)
|
555 |
-
|
556 |
-
def forward(self, x):
|
557 |
-
out = self.branches_func(x)
|
558 |
-
out = self.fuse_func(out)
|
559 |
-
return out
|
560 |
-
|
561 |
-
|
562 |
-
class FuseLayers(nn.Layer):
|
563 |
-
def __init__(self,
|
564 |
-
in_channels,
|
565 |
-
out_channels,
|
566 |
-
multi_scale_output=True,
|
567 |
-
name=None,
|
568 |
-
align_corners=False,
|
569 |
-
padding_same=True):
|
570 |
-
super(FuseLayers, self).__init__()
|
571 |
-
|
572 |
-
self._actual_ch = len(in_channels) if multi_scale_output else 1
|
573 |
-
self._in_channels = in_channels
|
574 |
-
self.align_corners = align_corners
|
575 |
-
|
576 |
-
self.residual_func_list = []
|
577 |
-
for i in range(self._actual_ch):
|
578 |
-
for j in range(len(in_channels)):
|
579 |
-
if j > i:
|
580 |
-
residual_func = self.add_sublayer(
|
581 |
-
"residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
|
582 |
-
layers.ConvBN(
|
583 |
-
in_channels=in_channels[j],
|
584 |
-
out_channels=out_channels[i],
|
585 |
-
kernel_size=1,
|
586 |
-
bias_attr=False))
|
587 |
-
self.residual_func_list.append(residual_func)
|
588 |
-
elif j < i:
|
589 |
-
pre_num_filters = in_channels[j]
|
590 |
-
for k in range(i - j):
|
591 |
-
if k == i - j - 1:
|
592 |
-
residual_func = self.add_sublayer(
|
593 |
-
"residual_{}_layer_{}_{}_{}".format(
|
594 |
-
name, i + 1, j + 1, k + 1),
|
595 |
-
layers.ConvBN(
|
596 |
-
in_channels=pre_num_filters,
|
597 |
-
out_channels=out_channels[i],
|
598 |
-
kernel_size=3,
|
599 |
-
stride=2,
|
600 |
-
padding=1 if not padding_same else 'same',
|
601 |
-
bias_attr=False))
|
602 |
-
pre_num_filters = out_channels[i]
|
603 |
-
else:
|
604 |
-
residual_func = self.add_sublayer(
|
605 |
-
"residual_{}_layer_{}_{}_{}".format(
|
606 |
-
name, i + 1, j + 1, k + 1),
|
607 |
-
layers.ConvBNReLU(
|
608 |
-
in_channels=pre_num_filters,
|
609 |
-
out_channels=out_channels[j],
|
610 |
-
kernel_size=3,
|
611 |
-
stride=2,
|
612 |
-
padding=1 if not padding_same else 'same',
|
613 |
-
bias_attr=False))
|
614 |
-
pre_num_filters = out_channels[j]
|
615 |
-
self.residual_func_list.append(residual_func)
|
616 |
-
|
617 |
-
def forward(self, x):
|
618 |
-
outs = []
|
619 |
-
residual_func_idx = 0
|
620 |
-
for i in range(self._actual_ch):
|
621 |
-
residual = x[i]
|
622 |
-
residual_shape = paddle.shape(residual)[-2:]
|
623 |
-
for j in range(len(self._in_channels)):
|
624 |
-
if j > i:
|
625 |
-
y = self.residual_func_list[residual_func_idx](x[j])
|
626 |
-
residual_func_idx += 1
|
627 |
-
|
628 |
-
y = F.interpolate(
|
629 |
-
y,
|
630 |
-
residual_shape,
|
631 |
-
mode='bilinear',
|
632 |
-
align_corners=self.align_corners)
|
633 |
-
residual = residual + y
|
634 |
-
elif j < i:
|
635 |
-
y = x[j]
|
636 |
-
for k in range(i - j):
|
637 |
-
y = self.residual_func_list[residual_func_idx](y)
|
638 |
-
residual_func_idx += 1
|
639 |
-
|
640 |
-
residual = residual + y
|
641 |
-
|
642 |
-
residual = F.relu(residual)
|
643 |
-
outs.append(residual)
|
644 |
-
|
645 |
-
return outs
|
646 |
-
|
647 |
-
|
648 |
-
@manager.BACKBONES.add_component
|
649 |
-
def HRNet_W18_Small_V1(**kwargs):
|
650 |
-
model = HRNet(
|
651 |
-
stage1_num_modules=1,
|
652 |
-
stage1_num_blocks=[1],
|
653 |
-
stage1_num_channels=[32],
|
654 |
-
stage2_num_modules=1,
|
655 |
-
stage2_num_blocks=[2, 2],
|
656 |
-
stage2_num_channels=[16, 32],
|
657 |
-
stage3_num_modules=1,
|
658 |
-
stage3_num_blocks=[2, 2, 2],
|
659 |
-
stage3_num_channels=[16, 32, 64],
|
660 |
-
stage4_num_modules=1,
|
661 |
-
stage4_num_blocks=[2, 2, 2, 2],
|
662 |
-
stage4_num_channels=[16, 32, 64, 128],
|
663 |
-
**kwargs)
|
664 |
-
return model
|
665 |
-
|
666 |
-
|
667 |
-
@manager.BACKBONES.add_component
|
668 |
-
def HRNet_W18_Small_V2(**kwargs):
|
669 |
-
model = HRNet(
|
670 |
-
stage1_num_modules=1,
|
671 |
-
stage1_num_blocks=[2],
|
672 |
-
stage1_num_channels=[64],
|
673 |
-
stage2_num_modules=1,
|
674 |
-
stage2_num_blocks=[2, 2],
|
675 |
-
stage2_num_channels=[18, 36],
|
676 |
-
stage3_num_modules=3,
|
677 |
-
stage3_num_blocks=[2, 2, 2],
|
678 |
-
stage3_num_channels=[18, 36, 72],
|
679 |
-
stage4_num_modules=2,
|
680 |
-
stage4_num_blocks=[2, 2, 2, 2],
|
681 |
-
stage4_num_channels=[18, 36, 72, 144],
|
682 |
-
**kwargs)
|
683 |
-
return model
|
684 |
-
|
685 |
-
|
686 |
-
@manager.BACKBONES.add_component
|
687 |
-
def HRNet_W18(**kwargs):
|
688 |
-
model = HRNet(
|
689 |
-
stage1_num_modules=1,
|
690 |
-
stage1_num_blocks=[4],
|
691 |
-
stage1_num_channels=[64],
|
692 |
-
stage2_num_modules=1,
|
693 |
-
stage2_num_blocks=[4, 4],
|
694 |
-
stage2_num_channels=[18, 36],
|
695 |
-
stage3_num_modules=4,
|
696 |
-
stage3_num_blocks=[4, 4, 4],
|
697 |
-
stage3_num_channels=[18, 36, 72],
|
698 |
-
stage4_num_modules=3,
|
699 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
700 |
-
stage4_num_channels=[18, 36, 72, 144],
|
701 |
-
**kwargs)
|
702 |
-
return model
|
703 |
-
|
704 |
-
|
705 |
-
@manager.BACKBONES.add_component
|
706 |
-
def HRNet_W30(**kwargs):
|
707 |
-
model = HRNet(
|
708 |
-
stage1_num_modules=1,
|
709 |
-
stage1_num_blocks=[4],
|
710 |
-
stage1_num_channels=[64],
|
711 |
-
stage2_num_modules=1,
|
712 |
-
stage2_num_blocks=[4, 4],
|
713 |
-
stage2_num_channels=[30, 60],
|
714 |
-
stage3_num_modules=4,
|
715 |
-
stage3_num_blocks=[4, 4, 4],
|
716 |
-
stage3_num_channels=[30, 60, 120],
|
717 |
-
stage4_num_modules=3,
|
718 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
719 |
-
stage4_num_channels=[30, 60, 120, 240],
|
720 |
-
**kwargs)
|
721 |
-
return model
|
722 |
-
|
723 |
-
|
724 |
-
@manager.BACKBONES.add_component
|
725 |
-
def HRNet_W32(**kwargs):
|
726 |
-
model = HRNet(
|
727 |
-
stage1_num_modules=1,
|
728 |
-
stage1_num_blocks=[4],
|
729 |
-
stage1_num_channels=[64],
|
730 |
-
stage2_num_modules=1,
|
731 |
-
stage2_num_blocks=[4, 4],
|
732 |
-
stage2_num_channels=[32, 64],
|
733 |
-
stage3_num_modules=4,
|
734 |
-
stage3_num_blocks=[4, 4, 4],
|
735 |
-
stage3_num_channels=[32, 64, 128],
|
736 |
-
stage4_num_modules=3,
|
737 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
738 |
-
stage4_num_channels=[32, 64, 128, 256],
|
739 |
-
**kwargs)
|
740 |
-
return model
|
741 |
-
|
742 |
-
|
743 |
-
@manager.BACKBONES.add_component
|
744 |
-
def HRNet_W40(**kwargs):
|
745 |
-
model = HRNet(
|
746 |
-
stage1_num_modules=1,
|
747 |
-
stage1_num_blocks=[4],
|
748 |
-
stage1_num_channels=[64],
|
749 |
-
stage2_num_modules=1,
|
750 |
-
stage2_num_blocks=[4, 4],
|
751 |
-
stage2_num_channels=[40, 80],
|
752 |
-
stage3_num_modules=4,
|
753 |
-
stage3_num_blocks=[4, 4, 4],
|
754 |
-
stage3_num_channels=[40, 80, 160],
|
755 |
-
stage4_num_modules=3,
|
756 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
757 |
-
stage4_num_channels=[40, 80, 160, 320],
|
758 |
-
**kwargs)
|
759 |
-
return model
|
760 |
-
|
761 |
-
|
762 |
-
@manager.BACKBONES.add_component
|
763 |
-
def HRNet_W44(**kwargs):
|
764 |
-
model = HRNet(
|
765 |
-
stage1_num_modules=1,
|
766 |
-
stage1_num_blocks=[4],
|
767 |
-
stage1_num_channels=[64],
|
768 |
-
stage2_num_modules=1,
|
769 |
-
stage2_num_blocks=[4, 4],
|
770 |
-
stage2_num_channels=[44, 88],
|
771 |
-
stage3_num_modules=4,
|
772 |
-
stage3_num_blocks=[4, 4, 4],
|
773 |
-
stage3_num_channels=[44, 88, 176],
|
774 |
-
stage4_num_modules=3,
|
775 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
776 |
-
stage4_num_channels=[44, 88, 176, 352],
|
777 |
-
**kwargs)
|
778 |
-
return model
|
779 |
-
|
780 |
-
|
781 |
-
@manager.BACKBONES.add_component
|
782 |
-
def HRNet_W48(**kwargs):
|
783 |
-
model = HRNet(
|
784 |
-
stage1_num_modules=1,
|
785 |
-
stage1_num_blocks=[4],
|
786 |
-
stage1_num_channels=[64],
|
787 |
-
stage2_num_modules=1,
|
788 |
-
stage2_num_blocks=[4, 4],
|
789 |
-
stage2_num_channels=[48, 96],
|
790 |
-
stage3_num_modules=4,
|
791 |
-
stage3_num_blocks=[4, 4, 4],
|
792 |
-
stage3_num_channels=[48, 96, 192],
|
793 |
-
stage4_num_modules=3,
|
794 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
795 |
-
stage4_num_channels=[48, 96, 192, 384],
|
796 |
-
**kwargs)
|
797 |
-
return model
|
798 |
-
|
799 |
-
|
800 |
-
@manager.BACKBONES.add_component
|
801 |
-
def HRNet_W60(**kwargs):
|
802 |
-
model = HRNet(
|
803 |
-
stage1_num_modules=1,
|
804 |
-
stage1_num_blocks=[4],
|
805 |
-
stage1_num_channels=[64],
|
806 |
-
stage2_num_modules=1,
|
807 |
-
stage2_num_blocks=[4, 4],
|
808 |
-
stage2_num_channels=[60, 120],
|
809 |
-
stage3_num_modules=4,
|
810 |
-
stage3_num_blocks=[4, 4, 4],
|
811 |
-
stage3_num_channels=[60, 120, 240],
|
812 |
-
stage4_num_modules=3,
|
813 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
814 |
-
stage4_num_channels=[60, 120, 240, 480],
|
815 |
-
**kwargs)
|
816 |
-
return model
|
817 |
-
|
818 |
-
|
819 |
-
@manager.BACKBONES.add_component
|
820 |
-
def HRNet_W64(**kwargs):
|
821 |
-
model = HRNet(
|
822 |
-
stage1_num_modules=1,
|
823 |
-
stage1_num_blocks=[4],
|
824 |
-
stage1_num_channels=[64],
|
825 |
-
stage2_num_modules=1,
|
826 |
-
stage2_num_blocks=[4, 4],
|
827 |
-
stage2_num_channels=[64, 128],
|
828 |
-
stage3_num_modules=4,
|
829 |
-
stage3_num_blocks=[4, 4, 4],
|
830 |
-
stage3_num_channels=[64, 128, 256],
|
831 |
-
stage4_num_modules=3,
|
832 |
-
stage4_num_blocks=[4, 4, 4, 4],
|
833 |
-
stage4_num_channels=[64, 128, 256, 512],
|
834 |
-
**kwargs)
|
835 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/src/lib/useImageDimension.ts
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import { useEffect, useState } from "react"
|
2 |
-
|
3 |
-
import { ImageDimension, getImageDimension } from "./getImageDimension"
|
4 |
-
|
5 |
-
export function useImageDimension(src: string) {
|
6 |
-
const [dimension, setDimension] = useState<ImageDimension>({
|
7 |
-
width: 0,
|
8 |
-
height: 0,
|
9 |
-
})
|
10 |
-
|
11 |
-
useEffect(() => {
|
12 |
-
const compute = async () => {
|
13 |
-
const newDimension = await getImageDimension(src)
|
14 |
-
setDimension(newDimension)
|
15 |
-
}
|
16 |
-
compute()
|
17 |
-
}, [src])
|
18 |
-
|
19 |
-
return dimension
|
20 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dineshdc/MygenAIChatbot/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MygenAIChatbot
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/model.py
DELETED
@@ -1,680 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
from .op.fused_act import FusedLeakyReLU, fused_leaky_relu
|
8 |
-
from .op.upfirdn2d import upfirdn2d
|
9 |
-
|
10 |
-
|
11 |
-
class PixelNorm(nn.Module):
|
12 |
-
def __init__(self):
|
13 |
-
super().__init__()
|
14 |
-
|
15 |
-
def forward(self, input):
|
16 |
-
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
|
17 |
-
|
18 |
-
|
19 |
-
def make_kernel(k):
|
20 |
-
k = torch.tensor(k, dtype=torch.float32)
|
21 |
-
|
22 |
-
if k.ndim == 1:
|
23 |
-
k = k[None, :] * k[:, None]
|
24 |
-
|
25 |
-
k /= k.sum()
|
26 |
-
|
27 |
-
return k
|
28 |
-
|
29 |
-
|
30 |
-
class Upsample(nn.Module):
|
31 |
-
def __init__(self, kernel, factor=2):
|
32 |
-
super().__init__()
|
33 |
-
|
34 |
-
self.factor = factor
|
35 |
-
kernel = make_kernel(kernel) * (factor ** 2)
|
36 |
-
self.register_buffer('kernel', kernel)
|
37 |
-
|
38 |
-
p = kernel.shape[0] - factor
|
39 |
-
|
40 |
-
pad0 = (p + 1) // 2 + factor - 1
|
41 |
-
pad1 = p // 2
|
42 |
-
|
43 |
-
self.pad = (pad0, pad1)
|
44 |
-
|
45 |
-
def forward(self, input):
|
46 |
-
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
|
47 |
-
|
48 |
-
return out
|
49 |
-
|
50 |
-
|
51 |
-
class Downsample(nn.Module):
|
52 |
-
def __init__(self, kernel, factor=2):
|
53 |
-
super().__init__()
|
54 |
-
|
55 |
-
self.factor = factor
|
56 |
-
kernel = make_kernel(kernel)
|
57 |
-
self.register_buffer('kernel', kernel)
|
58 |
-
|
59 |
-
p = kernel.shape[0] - factor
|
60 |
-
|
61 |
-
pad0 = (p + 1) // 2
|
62 |
-
pad1 = p // 2
|
63 |
-
|
64 |
-
self.pad = (pad0, pad1)
|
65 |
-
|
66 |
-
def forward(self, input):
|
67 |
-
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
|
68 |
-
|
69 |
-
return out
|
70 |
-
|
71 |
-
|
72 |
-
class Blur(nn.Module):
|
73 |
-
def __init__(self, kernel, pad, upsample_factor=1):
|
74 |
-
super().__init__()
|
75 |
-
|
76 |
-
kernel = make_kernel(kernel)
|
77 |
-
|
78 |
-
if upsample_factor > 1:
|
79 |
-
kernel = kernel * (upsample_factor ** 2)
|
80 |
-
|
81 |
-
self.register_buffer('kernel', kernel)
|
82 |
-
|
83 |
-
self.pad = pad
|
84 |
-
|
85 |
-
def forward(self, input):
|
86 |
-
out = upfirdn2d(input, self.kernel, pad=self.pad)
|
87 |
-
|
88 |
-
return out
|
89 |
-
|
90 |
-
|
91 |
-
class EqualConv2d(nn.Module):
|
92 |
-
def __init__(
|
93 |
-
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
|
94 |
-
):
|
95 |
-
super().__init__()
|
96 |
-
|
97 |
-
self.weight = nn.Parameter(
|
98 |
-
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
|
99 |
-
)
|
100 |
-
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
|
101 |
-
|
102 |
-
self.stride = stride
|
103 |
-
self.padding = padding
|
104 |
-
|
105 |
-
if bias:
|
106 |
-
self.bias = nn.Parameter(torch.zeros(out_channel))
|
107 |
-
|
108 |
-
else:
|
109 |
-
self.bias = None
|
110 |
-
|
111 |
-
def forward(self, input):
|
112 |
-
out = F.conv2d(
|
113 |
-
input,
|
114 |
-
self.weight * self.scale,
|
115 |
-
bias=self.bias,
|
116 |
-
stride=self.stride,
|
117 |
-
padding=self.padding,
|
118 |
-
)
|
119 |
-
|
120 |
-
return out
|
121 |
-
|
122 |
-
def __repr__(self):
|
123 |
-
return (
|
124 |
-
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
|
125 |
-
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
|
126 |
-
)
|
127 |
-
|
128 |
-
|
129 |
-
class EqualLinear(nn.Module):
|
130 |
-
def __init__(
|
131 |
-
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
|
132 |
-
):
|
133 |
-
super().__init__()
|
134 |
-
|
135 |
-
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
|
136 |
-
|
137 |
-
if bias:
|
138 |
-
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
|
139 |
-
|
140 |
-
else:
|
141 |
-
self.bias = None
|
142 |
-
|
143 |
-
self.activation = activation
|
144 |
-
|
145 |
-
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
|
146 |
-
self.lr_mul = lr_mul
|
147 |
-
|
148 |
-
def forward(self, input):
|
149 |
-
if self.activation:
|
150 |
-
out = F.linear(input, self.weight * self.scale)
|
151 |
-
out = fused_leaky_relu(out, self.bias * self.lr_mul)
|
152 |
-
|
153 |
-
else:
|
154 |
-
out = F.linear(
|
155 |
-
input, self.weight * self.scale, bias=self.bias * self.lr_mul
|
156 |
-
)
|
157 |
-
|
158 |
-
return out
|
159 |
-
|
160 |
-
def __repr__(self):
|
161 |
-
return (
|
162 |
-
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
|
163 |
-
)
|
164 |
-
|
165 |
-
|
166 |
-
class ScaledLeakyReLU(nn.Module):
|
167 |
-
def __init__(self, negative_slope=0.2):
|
168 |
-
super().__init__()
|
169 |
-
|
170 |
-
self.negative_slope = negative_slope
|
171 |
-
|
172 |
-
def forward(self, input):
|
173 |
-
out = F.leaky_relu(input, negative_slope=self.negative_slope)
|
174 |
-
|
175 |
-
return out * math.sqrt(2)
|
176 |
-
|
177 |
-
|
178 |
-
class ModulatedConv2d(nn.Module):
|
179 |
-
def __init__(
|
180 |
-
self,
|
181 |
-
in_channel,
|
182 |
-
out_channel,
|
183 |
-
kernel_size,
|
184 |
-
style_dim,
|
185 |
-
demodulate=True,
|
186 |
-
upsample=False,
|
187 |
-
downsample=False,
|
188 |
-
blur_kernel=[1, 3, 3, 1],
|
189 |
-
):
|
190 |
-
super().__init__()
|
191 |
-
|
192 |
-
self.eps = 1e-8
|
193 |
-
self.kernel_size = kernel_size
|
194 |
-
self.in_channel = in_channel
|
195 |
-
self.out_channel = out_channel
|
196 |
-
self.upsample = upsample
|
197 |
-
self.downsample = downsample
|
198 |
-
|
199 |
-
if upsample:
|
200 |
-
factor = 2
|
201 |
-
p = (len(blur_kernel) - factor) - (kernel_size - 1)
|
202 |
-
pad0 = (p + 1) // 2 + factor - 1
|
203 |
-
pad1 = p // 2 + 1
|
204 |
-
|
205 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
|
206 |
-
|
207 |
-
if downsample:
|
208 |
-
factor = 2
|
209 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
210 |
-
pad0 = (p + 1) // 2
|
211 |
-
pad1 = p // 2
|
212 |
-
|
213 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
|
214 |
-
|
215 |
-
fan_in = in_channel * kernel_size ** 2
|
216 |
-
self.scale = 1 / math.sqrt(fan_in)
|
217 |
-
self.padding = kernel_size // 2
|
218 |
-
|
219 |
-
self.weight = nn.Parameter(
|
220 |
-
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
|
221 |
-
)
|
222 |
-
|
223 |
-
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
|
224 |
-
|
225 |
-
self.demodulate = demodulate
|
226 |
-
|
227 |
-
def __repr__(self):
|
228 |
-
return (
|
229 |
-
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
|
230 |
-
f'upsample={self.upsample}, downsample={self.downsample})'
|
231 |
-
)
|
232 |
-
|
233 |
-
def forward(self, input, style):
|
234 |
-
batch, in_channel, height, width = input.shape
|
235 |
-
|
236 |
-
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
|
237 |
-
weight = self.scale * self.weight * style
|
238 |
-
|
239 |
-
if self.demodulate:
|
240 |
-
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
|
241 |
-
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
|
242 |
-
|
243 |
-
weight = weight.view(
|
244 |
-
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
245 |
-
)
|
246 |
-
|
247 |
-
if self.upsample:
|
248 |
-
input = input.view(1, batch * in_channel, height, width)
|
249 |
-
weight = weight.view(
|
250 |
-
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
251 |
-
)
|
252 |
-
weight = weight.transpose(1, 2).reshape(
|
253 |
-
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
|
254 |
-
)
|
255 |
-
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
|
256 |
-
_, _, height, width = out.shape
|
257 |
-
out = out.view(batch, self.out_channel, height, width)
|
258 |
-
out = self.blur(out)
|
259 |
-
|
260 |
-
elif self.downsample:
|
261 |
-
input = self.blur(input)
|
262 |
-
_, _, height, width = input.shape
|
263 |
-
input = input.view(1, batch * in_channel, height, width)
|
264 |
-
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
|
265 |
-
_, _, height, width = out.shape
|
266 |
-
out = out.view(batch, self.out_channel, height, width)
|
267 |
-
|
268 |
-
else:
|
269 |
-
input = input.view(1, batch * in_channel, height, width)
|
270 |
-
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
|
271 |
-
_, _, height, width = out.shape
|
272 |
-
out = out.view(batch, self.out_channel, height, width)
|
273 |
-
|
274 |
-
return out
|
275 |
-
|
276 |
-
|
277 |
-
class NoiseInjection(nn.Module):
|
278 |
-
def __init__(self):
|
279 |
-
super().__init__()
|
280 |
-
|
281 |
-
self.weight = nn.Parameter(torch.zeros(1))
|
282 |
-
|
283 |
-
def forward(self, image, noise=None):
|
284 |
-
if noise is None:
|
285 |
-
batch, _, height, width = image.shape
|
286 |
-
noise = image.new_empty(batch, 1, height, width).normal_()
|
287 |
-
|
288 |
-
return image + self.weight * noise
|
289 |
-
|
290 |
-
|
291 |
-
class ConstantInput(nn.Module):
|
292 |
-
def __init__(self, channel, size=4):
|
293 |
-
super().__init__()
|
294 |
-
|
295 |
-
self.input = nn.Parameter(torch.randn(1, channel, size, size // 2))
|
296 |
-
|
297 |
-
def forward(self, input):
|
298 |
-
batch = input.shape[0]
|
299 |
-
out = self.input.repeat(batch, 1, 1, 1)
|
300 |
-
|
301 |
-
return out
|
302 |
-
|
303 |
-
|
304 |
-
class StyledConv(nn.Module):
|
305 |
-
def __init__(
|
306 |
-
self,
|
307 |
-
in_channel,
|
308 |
-
out_channel,
|
309 |
-
kernel_size,
|
310 |
-
style_dim,
|
311 |
-
upsample=False,
|
312 |
-
blur_kernel=[1, 3, 3, 1],
|
313 |
-
demodulate=True,
|
314 |
-
):
|
315 |
-
super().__init__()
|
316 |
-
|
317 |
-
self.conv = ModulatedConv2d(
|
318 |
-
in_channel,
|
319 |
-
out_channel,
|
320 |
-
kernel_size,
|
321 |
-
style_dim,
|
322 |
-
upsample=upsample,
|
323 |
-
blur_kernel=blur_kernel,
|
324 |
-
demodulate=demodulate,
|
325 |
-
)
|
326 |
-
|
327 |
-
self.noise = NoiseInjection()
|
328 |
-
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
|
329 |
-
# self.activate = ScaledLeakyReLU(0.2)
|
330 |
-
self.activate = FusedLeakyReLU(out_channel)
|
331 |
-
|
332 |
-
def forward(self, input, style, noise=None):
|
333 |
-
out = self.conv(input, style)
|
334 |
-
out = self.noise(out, noise=noise)
|
335 |
-
# out = out + self.bias
|
336 |
-
out = self.activate(out)
|
337 |
-
|
338 |
-
return out
|
339 |
-
|
340 |
-
|
341 |
-
class ToRGB(nn.Module):
|
342 |
-
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
|
343 |
-
super().__init__()
|
344 |
-
|
345 |
-
if upsample:
|
346 |
-
self.upsample = Upsample(blur_kernel)
|
347 |
-
|
348 |
-
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
|
349 |
-
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
350 |
-
|
351 |
-
def forward(self, input, style, skip=None):
|
352 |
-
out = self.conv(input, style)
|
353 |
-
out = out + self.bias
|
354 |
-
|
355 |
-
if skip is not None:
|
356 |
-
skip = self.upsample(skip)
|
357 |
-
|
358 |
-
out = out + skip
|
359 |
-
|
360 |
-
return out
|
361 |
-
|
362 |
-
|
363 |
-
class Generator(nn.Module):
|
364 |
-
def __init__(
|
365 |
-
self,
|
366 |
-
size,
|
367 |
-
style_dim,
|
368 |
-
n_mlp,
|
369 |
-
channel_multiplier=2,
|
370 |
-
blur_kernel=[1, 3, 3, 1],
|
371 |
-
lr_mlp=0.01,
|
372 |
-
):
|
373 |
-
super().__init__()
|
374 |
-
|
375 |
-
self.size = size
|
376 |
-
|
377 |
-
self.style_dim = style_dim
|
378 |
-
|
379 |
-
layers = [PixelNorm()]
|
380 |
-
|
381 |
-
for i in range(n_mlp):
|
382 |
-
layers.append(
|
383 |
-
EqualLinear(
|
384 |
-
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
|
385 |
-
)
|
386 |
-
)
|
387 |
-
|
388 |
-
self.style = nn.Sequential(*layers)
|
389 |
-
|
390 |
-
self.channels = {
|
391 |
-
4: 512,
|
392 |
-
8: 512,
|
393 |
-
16: 512,
|
394 |
-
32: 512,
|
395 |
-
64: 256 * channel_multiplier,
|
396 |
-
128: 128 * channel_multiplier,
|
397 |
-
256: 64 * channel_multiplier,
|
398 |
-
512: 32 * channel_multiplier,
|
399 |
-
1024: 16 * channel_multiplier,
|
400 |
-
}
|
401 |
-
|
402 |
-
self.input = ConstantInput(self.channels[4])
|
403 |
-
self.conv1 = StyledConv(
|
404 |
-
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
|
405 |
-
)
|
406 |
-
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
|
407 |
-
|
408 |
-
self.log_size = int(math.log(size, 2))
|
409 |
-
self.num_layers = (self.log_size - 2) * 2 + 1
|
410 |
-
|
411 |
-
self.convs = nn.ModuleList()
|
412 |
-
self.upsamples = nn.ModuleList()
|
413 |
-
self.to_rgbs = nn.ModuleList()
|
414 |
-
self.noises = nn.Module()
|
415 |
-
|
416 |
-
in_channel = self.channels[4]
|
417 |
-
|
418 |
-
for layer_idx in range(self.num_layers):
|
419 |
-
res = (layer_idx + 5) // 2
|
420 |
-
shape = [1, 1, 2 ** res, 2 ** res // 2]
|
421 |
-
self.noises.register_buffer(
|
422 |
-
"noise_{}".format(layer_idx), torch.randn(*shape)
|
423 |
-
)
|
424 |
-
|
425 |
-
for i in range(3, self.log_size + 1):
|
426 |
-
out_channel = self.channels[2 ** i]
|
427 |
-
|
428 |
-
self.convs.append(
|
429 |
-
StyledConv(
|
430 |
-
in_channel,
|
431 |
-
out_channel,
|
432 |
-
3,
|
433 |
-
style_dim,
|
434 |
-
upsample=True,
|
435 |
-
blur_kernel=blur_kernel,
|
436 |
-
)
|
437 |
-
)
|
438 |
-
|
439 |
-
self.convs.append(
|
440 |
-
StyledConv(
|
441 |
-
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
|
442 |
-
)
|
443 |
-
)
|
444 |
-
|
445 |
-
self.to_rgbs.append(ToRGB(out_channel, style_dim))
|
446 |
-
|
447 |
-
in_channel = out_channel
|
448 |
-
|
449 |
-
self.n_latent = self.log_size * 2 - 2
|
450 |
-
|
451 |
-
def make_noise(self):
|
452 |
-
device = self.input.input.device
|
453 |
-
|
454 |
-
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2 // 2, device=device)]
|
455 |
-
|
456 |
-
for i in range(3, self.log_size + 1):
|
457 |
-
for _ in range(2):
|
458 |
-
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i // 2, device=device))
|
459 |
-
|
460 |
-
return noises
|
461 |
-
|
462 |
-
def mean_latent(self, n_latent):
|
463 |
-
latent_in = torch.randn(
|
464 |
-
n_latent, self.style_dim, device=self.input.input.device
|
465 |
-
)
|
466 |
-
latent = self.style(latent_in).mean(0, keepdim=True)
|
467 |
-
|
468 |
-
return latent
|
469 |
-
|
470 |
-
def get_latent(self, input):
|
471 |
-
return self.style(input)
|
472 |
-
|
473 |
-
def forward(
|
474 |
-
self,
|
475 |
-
styles,
|
476 |
-
return_latents=False,
|
477 |
-
return_features=False,
|
478 |
-
inject_index=None,
|
479 |
-
truncation=1,
|
480 |
-
truncation_latent=None,
|
481 |
-
input_is_latent=False,
|
482 |
-
noise=None,
|
483 |
-
randomize_noise=True,
|
484 |
-
):
|
485 |
-
if not input_is_latent:
|
486 |
-
styles = [self.style(s) for s in styles]
|
487 |
-
|
488 |
-
if noise is None:
|
489 |
-
if randomize_noise:
|
490 |
-
noise = [None] * self.num_layers
|
491 |
-
else:
|
492 |
-
noise = [
|
493 |
-
getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
|
494 |
-
]
|
495 |
-
|
496 |
-
if truncation < 1:
|
497 |
-
style_t = []
|
498 |
-
|
499 |
-
for style in styles:
|
500 |
-
style_t.append(
|
501 |
-
truncation_latent + truncation * (style - truncation_latent)
|
502 |
-
)
|
503 |
-
|
504 |
-
styles = style_t
|
505 |
-
|
506 |
-
if len(styles) < 2:
|
507 |
-
inject_index = self.n_latent
|
508 |
-
if styles[0].ndim < 3:
|
509 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
510 |
-
else:
|
511 |
-
latent = styles[0]
|
512 |
-
|
513 |
-
else:
|
514 |
-
if inject_index is None:
|
515 |
-
inject_index = random.randint(1, self.n_latent - 1)
|
516 |
-
|
517 |
-
# latent = styles[0].unsqueeze(0)
|
518 |
-
# if latent.shape[1] == 1:
|
519 |
-
# latent = latent.repeat(1, inject_index, 1)
|
520 |
-
# else:
|
521 |
-
# latent = latent[:, :inject_index, :]
|
522 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
523 |
-
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
|
524 |
-
# latent = styles[0][:, :inject_index, :]
|
525 |
-
# latent2 = styles[1][:, inject_index:, :]
|
526 |
-
latent = torch.cat([latent, latent2], 1)
|
527 |
-
out = self.input(latent)
|
528 |
-
out = self.conv1(out, latent[:, 0], noise=noise[0])
|
529 |
-
|
530 |
-
skip = self.to_rgb1(out, latent[:, 1])
|
531 |
-
|
532 |
-
i = 1
|
533 |
-
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
534 |
-
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
|
535 |
-
):
|
536 |
-
out = conv1(out, latent[:, i], noise=noise1)
|
537 |
-
out = conv2(out, latent[:, i + 1], noise=noise2)
|
538 |
-
skip = to_rgb(out, latent[:, i + 2], skip)
|
539 |
-
|
540 |
-
i += 2
|
541 |
-
|
542 |
-
image = skip
|
543 |
-
|
544 |
-
if return_latents:
|
545 |
-
return image, latent
|
546 |
-
elif return_features:
|
547 |
-
return image, out
|
548 |
-
else:
|
549 |
-
return image, None
|
550 |
-
|
551 |
-
|
552 |
-
class ConvLayer(nn.Sequential):
|
553 |
-
def __init__(
|
554 |
-
self,
|
555 |
-
in_channel,
|
556 |
-
out_channel,
|
557 |
-
kernel_size,
|
558 |
-
downsample=False,
|
559 |
-
blur_kernel=[1, 3, 3, 1],
|
560 |
-
bias=True,
|
561 |
-
activate=True,
|
562 |
-
):
|
563 |
-
layers = []
|
564 |
-
|
565 |
-
if downsample:
|
566 |
-
factor = 2
|
567 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
568 |
-
pad0 = (p + 1) // 2
|
569 |
-
pad1 = p // 2
|
570 |
-
|
571 |
-
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
|
572 |
-
|
573 |
-
stride = 2
|
574 |
-
self.padding = 0
|
575 |
-
|
576 |
-
else:
|
577 |
-
stride = 1
|
578 |
-
self.padding = kernel_size // 2
|
579 |
-
|
580 |
-
layers.append(
|
581 |
-
EqualConv2d(
|
582 |
-
in_channel,
|
583 |
-
out_channel,
|
584 |
-
kernel_size,
|
585 |
-
padding=self.padding,
|
586 |
-
stride=stride,
|
587 |
-
bias=bias and not activate,
|
588 |
-
)
|
589 |
-
)
|
590 |
-
|
591 |
-
if activate:
|
592 |
-
if bias:
|
593 |
-
layers.append(FusedLeakyReLU(out_channel))
|
594 |
-
|
595 |
-
else:
|
596 |
-
layers.append(ScaledLeakyReLU(0.2))
|
597 |
-
|
598 |
-
super().__init__(*layers)
|
599 |
-
|
600 |
-
|
601 |
-
class ResBlock(nn.Module):
|
602 |
-
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
|
603 |
-
super().__init__()
|
604 |
-
|
605 |
-
self.conv1 = ConvLayer(in_channel, in_channel, 3)
|
606 |
-
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
|
607 |
-
|
608 |
-
self.skip = ConvLayer(
|
609 |
-
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
|
610 |
-
)
|
611 |
-
|
612 |
-
def forward(self, input):
|
613 |
-
out = self.conv1(input)
|
614 |
-
out = self.conv2(out)
|
615 |
-
|
616 |
-
skip = self.skip(input)
|
617 |
-
out = (out + skip) / math.sqrt(2)
|
618 |
-
|
619 |
-
return out
|
620 |
-
|
621 |
-
|
622 |
-
class Discriminator(nn.Module):
|
623 |
-
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
|
624 |
-
super().__init__()
|
625 |
-
|
626 |
-
channels = {
|
627 |
-
4: 512,
|
628 |
-
8: 512,
|
629 |
-
16: 512,
|
630 |
-
32: 512,
|
631 |
-
64: 256 * channel_multiplier,
|
632 |
-
128: 128 * channel_multiplier,
|
633 |
-
256: 64 * channel_multiplier,
|
634 |
-
512: 32 * channel_multiplier,
|
635 |
-
1024: 16 * channel_multiplier,
|
636 |
-
}
|
637 |
-
|
638 |
-
convs = [ConvLayer(3, channels[size], 1)]
|
639 |
-
|
640 |
-
log_size = int(math.log(size, 2))
|
641 |
-
|
642 |
-
in_channel = channels[size]
|
643 |
-
|
644 |
-
for i in range(log_size, 2, -1):
|
645 |
-
out_channel = channels[2 ** (i - 1)]
|
646 |
-
|
647 |
-
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
|
648 |
-
|
649 |
-
in_channel = out_channel
|
650 |
-
|
651 |
-
self.convs = nn.Sequential(*convs)
|
652 |
-
|
653 |
-
self.stddev_group = 4
|
654 |
-
self.stddev_feat = 1
|
655 |
-
|
656 |
-
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
|
657 |
-
self.final_linear = nn.Sequential(
|
658 |
-
EqualLinear(channels[4] * 4 * 4 // 2, channels[4], activation='fused_lrelu'),
|
659 |
-
EqualLinear(channels[4], 1),
|
660 |
-
)
|
661 |
-
|
662 |
-
def forward(self, input):
|
663 |
-
out = self.convs(input)
|
664 |
-
|
665 |
-
batch, channel, height, width = out.shape
|
666 |
-
group = min(batch, self.stddev_group)
|
667 |
-
stddev = out.view(
|
668 |
-
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
|
669 |
-
)
|
670 |
-
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
671 |
-
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
|
672 |
-
stddev = stddev.repeat(group, 1, height, width)
|
673 |
-
out = torch.cat([out, stddev], 1)
|
674 |
-
|
675 |
-
out = self.final_conv(out)
|
676 |
-
|
677 |
-
out = out.view(batch, -1)
|
678 |
-
out = self.final_linear(out)
|
679 |
-
|
680 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|