Commit
·
6a1b12b
1
Parent(s):
13e5f79
Update parquet files (step 58 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/LEGAL_NOTICE.md +0 -15
- spaces/17TheWord/RealESRGAN/realesrgan/models/realesrnet_model.py +0 -188
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aerofly Rc 7 Cracked Pepper - The Ultimate Flight Simulator Experience.md +0 -73
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Can I Download Photoshop For Free.md +0 -16
- spaces/1gistliPinn/ChatGPT4/Examples/City Car Driving Free Download V2.2.7 Crack.md +0 -9
- spaces/1line/AutoGPT/autogpt/json_utils/__init__.py +0 -0
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/100 In 1 Offline collection APK - Free Download for Android Devices.md +0 -137
- spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 APK A Fun and Wacky Racing Adventure.md +0 -175
- spaces/7eu7d7/anime-ai-detect-fucker/app.py +0 -50
- spaces/801artistry/RVC801/MDXNet.py +0 -272
- spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Redis 9e063b60eca24a1783c225cfdc21dd8c.md +0 -5
- spaces/AIConsultant/MusicGen/audiocraft/models/loaders.py +0 -141
- spaces/AIDHD/audio-video-transcriber/README.md +0 -12
- spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_bias_act.cpp +0 -21
- spaces/AIFILMS/generate_human_motion/VQ-Trans/models/modules.py +0 -109
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py +0 -91
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/linear_probe.py +0 -63
- spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetB.py +0 -307
- spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/core_vq.py +0 -400
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenSizers.js +0 -10
- spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.cpp +0 -107
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py +0 -725
- spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py +0 -159
- spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/instaboost.py +0 -98
- spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py +0 -2
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/colorspace.py +0 -306
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/ipython.py +0 -39
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/status_codes.py +0 -128
- spaces/AtomdffAI/wechatgpt4atom/docker/build.alpine.sh +0 -10
- spaces/AvaterClasher/Food_Classifier_Refined_MONI/app.py +0 -70
- spaces/BLACKHOST/timer/README.md +0 -12
- spaces/Bambicita/rvc-models/README.md +0 -14
- spaces/Benson/text-generation/Examples/Cuerda Hroe 1.3.3 Mod Apk.md +0 -91
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/logging.py +0 -36
- spaces/BillBojangeles2000/WikiGPT/app.py +0 -83
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/README.md +0 -16
- spaces/CVPR/LIVE/pybind11/setup.py +0 -130
- spaces/CVPR/LIVE/thrust/cmake/ThrustInstallRules.cmake +0 -25
- spaces/CVPR/LIVE/thrust/thrust/type_traits/is_operator_less_or_greater_function_object.h +0 -136
- spaces/CVPR/WALT/mmdet/core/post_processing/__init__.py +0 -8
- spaces/CVPR/WALT/mmdet/models/necks/__init__.py +0 -16
- spaces/CVPR/lama-example/bin/paper_runfiles/blur_tests.sh +0 -37
- spaces/CVPR/lama-example/saicinpainting/training/data/__init__.py +0 -0
- spaces/ChandraMohanNayal/AutoGPT/scripts/check_requirements.py +0 -32
- spaces/CikeyQI/meme-api/meme_generator/memes/anya_suki/__init__.py +0 -44
- spaces/CofAI/chat.b4/g4f/Provider/Providers/Ails.py +0 -87
- spaces/Cvandi/remake/setup.py +0 -107
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiofiles/ospath.py +0 -15
- spaces/Danielzero/GPT3.5/modules/shared.py +0 -55
spaces/101-5/gpt4free/LEGAL_NOTICE.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
## Legal Notice
|
2 |
-
|
3 |
-
This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security or request the removal of their site from this repository.
|
4 |
-
|
5 |
-
Please note the following:
|
6 |
-
|
7 |
-
1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers mentioned.
|
8 |
-
|
9 |
-
2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the TOS of the each Website.
|
10 |
-
|
11 |
-
3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations.
|
12 |
-
|
13 |
-
4. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
|
14 |
-
|
15 |
-
5. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/RealESRGAN/realesrgan/models/realesrnet_model.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
|
5 |
-
from basicsr.data.transforms import paired_random_crop
|
6 |
-
from basicsr.models.sr_model import SRModel
|
7 |
-
from basicsr.utils import DiffJPEG, USMSharp
|
8 |
-
from basicsr.utils.img_process_util import filter2D
|
9 |
-
from basicsr.utils.registry import MODEL_REGISTRY
|
10 |
-
from torch.nn import functional as F
|
11 |
-
|
12 |
-
|
13 |
-
@MODEL_REGISTRY.register()
|
14 |
-
class RealESRNetModel(SRModel):
|
15 |
-
"""RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
|
16 |
-
|
17 |
-
It is trained without GAN losses.
|
18 |
-
It mainly performs:
|
19 |
-
1. randomly synthesize LQ images in GPU tensors
|
20 |
-
2. optimize the networks with GAN training.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, opt):
|
24 |
-
super(RealESRNetModel, self).__init__(opt)
|
25 |
-
self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
|
26 |
-
self.usm_sharpener = USMSharp().cuda() # do usm sharpening
|
27 |
-
self.queue_size = opt.get('queue_size', 180)
|
28 |
-
|
29 |
-
@torch.no_grad()
|
30 |
-
def _dequeue_and_enqueue(self):
|
31 |
-
"""It is the training pair pool for increasing the diversity in a batch.
|
32 |
-
|
33 |
-
Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
|
34 |
-
batch could not have different resize scaling factors. Therefore, we employ this training pair pool
|
35 |
-
to increase the degradation diversity in a batch.
|
36 |
-
"""
|
37 |
-
# initialize
|
38 |
-
b, c, h, w = self.lq.size()
|
39 |
-
if not hasattr(self, 'queue_lr'):
|
40 |
-
assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
|
41 |
-
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
|
42 |
-
_, c, h, w = self.gt.size()
|
43 |
-
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
|
44 |
-
self.queue_ptr = 0
|
45 |
-
if self.queue_ptr == self.queue_size: # the pool is full
|
46 |
-
# do dequeue and enqueue
|
47 |
-
# shuffle
|
48 |
-
idx = torch.randperm(self.queue_size)
|
49 |
-
self.queue_lr = self.queue_lr[idx]
|
50 |
-
self.queue_gt = self.queue_gt[idx]
|
51 |
-
# get first b samples
|
52 |
-
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
|
53 |
-
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
|
54 |
-
# update the queue
|
55 |
-
self.queue_lr[0:b, :, :, :] = self.lq.clone()
|
56 |
-
self.queue_gt[0:b, :, :, :] = self.gt.clone()
|
57 |
-
|
58 |
-
self.lq = lq_dequeue
|
59 |
-
self.gt = gt_dequeue
|
60 |
-
else:
|
61 |
-
# only do enqueue
|
62 |
-
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
|
63 |
-
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
|
64 |
-
self.queue_ptr = self.queue_ptr + b
|
65 |
-
|
66 |
-
@torch.no_grad()
|
67 |
-
def feed_data(self, data):
|
68 |
-
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
|
69 |
-
"""
|
70 |
-
if self.is_train and self.opt.get('high_order_degradation', True):
|
71 |
-
# training data synthesis
|
72 |
-
self.gt = data['gt'].to(self.device)
|
73 |
-
# USM sharpen the GT images
|
74 |
-
if self.opt['gt_usm'] is True:
|
75 |
-
self.gt = self.usm_sharpener(self.gt)
|
76 |
-
|
77 |
-
self.kernel1 = data['kernel1'].to(self.device)
|
78 |
-
self.kernel2 = data['kernel2'].to(self.device)
|
79 |
-
self.sinc_kernel = data['sinc_kernel'].to(self.device)
|
80 |
-
|
81 |
-
ori_h, ori_w = self.gt.size()[2:4]
|
82 |
-
|
83 |
-
# ----------------------- The first degradation process ----------------------- #
|
84 |
-
# blur
|
85 |
-
out = filter2D(self.gt, self.kernel1)
|
86 |
-
# random resize
|
87 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
|
88 |
-
if updown_type == 'up':
|
89 |
-
scale = np.random.uniform(1, self.opt['resize_range'][1])
|
90 |
-
elif updown_type == 'down':
|
91 |
-
scale = np.random.uniform(self.opt['resize_range'][0], 1)
|
92 |
-
else:
|
93 |
-
scale = 1
|
94 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
95 |
-
out = F.interpolate(out, scale_factor=scale, mode=mode)
|
96 |
-
# add noise
|
97 |
-
gray_noise_prob = self.opt['gray_noise_prob']
|
98 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob']:
|
99 |
-
out = random_add_gaussian_noise_pt(
|
100 |
-
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
101 |
-
else:
|
102 |
-
out = random_add_poisson_noise_pt(
|
103 |
-
out,
|
104 |
-
scale_range=self.opt['poisson_scale_range'],
|
105 |
-
gray_prob=gray_noise_prob,
|
106 |
-
clip=True,
|
107 |
-
rounds=False)
|
108 |
-
# JPEG compression
|
109 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
|
110 |
-
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
|
111 |
-
out = self.jpeger(out, quality=jpeg_p)
|
112 |
-
|
113 |
-
# ----------------------- The second degradation process ----------------------- #
|
114 |
-
# blur
|
115 |
-
if np.random.uniform() < self.opt['second_blur_prob']:
|
116 |
-
out = filter2D(out, self.kernel2)
|
117 |
-
# random resize
|
118 |
-
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
|
119 |
-
if updown_type == 'up':
|
120 |
-
scale = np.random.uniform(1, self.opt['resize_range2'][1])
|
121 |
-
elif updown_type == 'down':
|
122 |
-
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
|
123 |
-
else:
|
124 |
-
scale = 1
|
125 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
126 |
-
out = F.interpolate(
|
127 |
-
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
|
128 |
-
# add noise
|
129 |
-
gray_noise_prob = self.opt['gray_noise_prob2']
|
130 |
-
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
|
131 |
-
out = random_add_gaussian_noise_pt(
|
132 |
-
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
|
133 |
-
else:
|
134 |
-
out = random_add_poisson_noise_pt(
|
135 |
-
out,
|
136 |
-
scale_range=self.opt['poisson_scale_range2'],
|
137 |
-
gray_prob=gray_noise_prob,
|
138 |
-
clip=True,
|
139 |
-
rounds=False)
|
140 |
-
|
141 |
-
# JPEG compression + the final sinc filter
|
142 |
-
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
|
143 |
-
# as one operation.
|
144 |
-
# We consider two orders:
|
145 |
-
# 1. [resize back + sinc filter] + JPEG compression
|
146 |
-
# 2. JPEG compression + [resize back + sinc filter]
|
147 |
-
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
|
148 |
-
if np.random.uniform() < 0.5:
|
149 |
-
# resize back + the final sinc filter
|
150 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
151 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
152 |
-
out = filter2D(out, self.sinc_kernel)
|
153 |
-
# JPEG compression
|
154 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
155 |
-
out = torch.clamp(out, 0, 1)
|
156 |
-
out = self.jpeger(out, quality=jpeg_p)
|
157 |
-
else:
|
158 |
-
# JPEG compression
|
159 |
-
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
|
160 |
-
out = torch.clamp(out, 0, 1)
|
161 |
-
out = self.jpeger(out, quality=jpeg_p)
|
162 |
-
# resize back + the final sinc filter
|
163 |
-
mode = random.choice(['area', 'bilinear', 'bicubic'])
|
164 |
-
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
|
165 |
-
out = filter2D(out, self.sinc_kernel)
|
166 |
-
|
167 |
-
# clamp and round
|
168 |
-
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
169 |
-
|
170 |
-
# random crop
|
171 |
-
gt_size = self.opt['gt_size']
|
172 |
-
self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale'])
|
173 |
-
|
174 |
-
# training pair pool
|
175 |
-
self._dequeue_and_enqueue()
|
176 |
-
self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
|
177 |
-
else:
|
178 |
-
# for paired training or validation
|
179 |
-
self.lq = data['lq'].to(self.device)
|
180 |
-
if 'gt' in data:
|
181 |
-
self.gt = data['gt'].to(self.device)
|
182 |
-
self.gt_usm = self.usm_sharpener(self.gt)
|
183 |
-
|
184 |
-
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
|
185 |
-
# do not use the synthetic process during validation
|
186 |
-
self.is_train = False
|
187 |
-
super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
|
188 |
-
self.is_train = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aerofly Rc 7 Cracked Pepper - The Ultimate Flight Simulator Experience.md
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Aerofly RC 7: A Realistic and Fun Simulator for RC Enthusiasts</h1>
|
3 |
-
<p>If you love flying radio controlled (RC) models, you know how important it is to practice and improve your skills. But sometimes, the weather, the location, or the budget can limit your flying opportunities. That's why you need a good simulator that can give you a realistic and fun experience of flying RC models anytime, anywhere. And that's exactly what Aerofly RC 7 can offer you.</p>
|
4 |
-
<h2>Aerofly Rc 7 Cracked Pepper -</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://byltly.com/2uKwTB">https://byltly.com/2uKwTB</a></b></p><br /><br />
|
5 |
-
<p>Aerofly RC 7 is a simulator for Windows and Mac operating systems that lets you learn and improve your RC flying skills with over 200 models and 50 sceneries to choose from. Whether you prefer airplanes, helicopters, jets, gliders, or quadcopters, you will find something that suits your taste and skill level. You can also fly with friends online, compete in game-like challenges, or create your own content with the DLC and user-created content available.</p>
|
6 |
-
<p>In this article, we will show you the features, benefits, and tips of using Aerofly RC 7 as your RC simulator. By the end of this article, you will be ready to take off and enjoy the thrill of flying RC models with Aerofly RC 7.</p>
|
7 |
-
<h2>Features of Aerofly RC 7</h2>
|
8 |
-
<p>Aerofly RC 7 is not just a game, it's a realistic simulation that mimics the physics, dynamics, and graphics of real RC models. Here are some of the features that make Aerofly RC 7 stand out from other simulators:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>Over 200 models and 50 sceneries to choose from</b>: You can fly a wide variety of models, from aerobatic airplanes to scale models, from helicopters to jets, from gliders to quadcopters. You can also choose from different sceneries, such as fields, mountains, lakes, cities, or even aircraft carriers. You can customize your models with different colors, decals, or accessories. You can also scale your models up or down to fit your preference.</li>
|
11 |
-
<li><b>State-of-the-art physics simulation and stunning graphics</b>: Aerofly RC 7 uses a sophisticated physics engine that gives you a realistic feeling of flying. You can adjust the wind speed and direction, the time of day, or the weather conditions to challenge yourself. You can also enjoy the high-quality graphics that show every detail of your model and the scenery. You can zoom in or out, change the camera angle, or use different views to get the best perspective.</li>
|
12 |
-
<li><b>Different model types and flying modes</b>: Aerofly RC 7 supports different types of models, such as electric or gas powered airplanes, helicopters with collective pitch or fixed pitch, jets with thrust vectoring or without, gliders with flaps or without, or quadcopters with different flight modes. You can also choose from different flying modes, such as beginner mode that limits the bank angle and altitude, normal mode that gives you full control but prevents stalling or crashing, expert mode that simulates real-world physics without any assistance.</li>
|
13 |
-
<li><b>Multiplayer mode and game-like competitions</b>: Aerofly RC 7 allows you to fly with friends or other pilots online in multiplayer mode. You can chat with them, share tips, or challenge them to races or aerobatic contests. You can also compete in game-like competitions that test your skills in different tasks, such as landing on a moving target, flying through hoops, or performing stunts.</li>
|
14 |
-
<li><b>DLC and user-created content available</b>: Aerofly RC 7 offers DLC (downloadable content) that adds more models and sceneries to your simulator. You can also download user-created content from the official website or the Steam community that adds more variety and creativity to your simulator.</li>
|
15 |
-
</ul>
|
16 |
-
<h2>How to Get Started with Aerofly RC 7</h2>
|
17 |
-
<p>If you are new to Aerofly RC 7 or simulators in general, don't worry. Getting started with Aerofly RC 7 is easy and fun. Here are some steps to help you get going:</p>
|
18 |
-
<p>Aerofly Rc 7 Cracked Pepper - download full version<br />
|
19 |
-
Aerofly Rc 7 Cracked Pepper - best flight simulator game<br />
|
20 |
-
Aerofly Rc 7 Cracked Pepper - how to install and play<br />
|
21 |
-
Aerofly Rc 7 Cracked Pepper - review and rating<br />
|
22 |
-
Aerofly Rc 7 Cracked Pepper - free trial and activation code<br />
|
23 |
-
Aerofly Rc 7 Cracked Pepper - system requirements and compatibility<br />
|
24 |
-
Aerofly Rc 7 Cracked Pepper - tips and tricks for beginners<br />
|
25 |
-
Aerofly Rc 7 Cracked Pepper - realistic physics and graphics<br />
|
26 |
-
Aerofly Rc 7 Cracked Pepper - online multiplayer mode and features<br />
|
27 |
-
Aerofly Rc 7 Cracked Pepper - custom planes and scenarios<br />
|
28 |
-
Aerofly Rc 7 Cracked Pepper - comparison with other rc simulators<br />
|
29 |
-
Aerofly Rc 7 Cracked Pepper - troubleshooting and support<br />
|
30 |
-
Aerofly Rc 7 Cracked Pepper - latest updates and patches<br />
|
31 |
-
Aerofly Rc 7 Cracked Pepper - controller options and settings<br />
|
32 |
-
Aerofly Rc 7 Cracked Pepper - tutorial and training mode<br />
|
33 |
-
Aerofly Rc 7 Cracked Pepper - modding and community<br />
|
34 |
-
Aerofly Rc 7 Cracked Pepper - screenshots and videos<br />
|
35 |
-
Aerofly Rc 7 Cracked Pepper - cheats and hacks<br />
|
36 |
-
Aerofly Rc 7 Cracked Pepper - steam version and discounts<br />
|
37 |
-
Aerofly Rc 7 Cracked Pepper - vr compatibility and experience<br />
|
38 |
-
Aerofly Rc 7 Cracked Pepper - mac version and availability<br />
|
39 |
-
Aerofly Rc 7 Cracked Pepper - helicopter mode and controls<br />
|
40 |
-
Aerofly Rc 7 Cracked Pepper - glider mode and challenges<br />
|
41 |
-
Aerofly Rc 7 Cracked Pepper - jet mode and speed<br />
|
42 |
-
Aerofly Rc 7 Cracked Pepper - aerobatic mode and stunts<br />
|
43 |
-
Aerofly Rc 7 Cracked Pepper - scale mode and realism<br />
|
44 |
-
Aerofly Rc 7 Cracked Pepper - quadcopter mode and fun<br />
|
45 |
-
Aerofly Rc 7 Cracked Pepper - night mode and lighting effects<br />
|
46 |
-
Aerofly Rc 7 Cracked Pepper - water mode and landing skills<br />
|
47 |
-
Aerofly Rc 7 Cracked Pepper - wind mode and turbulence effects<br />
|
48 |
-
Aerofly Rc 7 Cracked Pepper - thermal mode and soaring skills<br />
|
49 |
-
Aerofly Rc 7 Cracked Pepper - slope mode and flying techniques<br />
|
50 |
-
Aerofly Rc 7 Cracked Pepper - dynamic mode and aerodynamics<br />
|
51 |
-
Aerofly Rc 7 Cracked Pepper - crash mode and damage effects<br />
|
52 |
-
Aerofly Rc 7 Cracked Pepper - cockpit mode and instruments<br />
|
53 |
-
Aerofly Rc 7 Cracked Pepper - chase mode and camera angles<br />
|
54 |
-
Aerofly Rc 7 Cracked Pepper - follow mode and formation flying<br />
|
55 |
-
Aerofly Rc 7 Cracked Pepper - race mode and competition rules<br />
|
56 |
-
Aerofly Rc 7 Cracked Pepper - combat mode and weapons systems<br />
|
57 |
-
Aerofly Rc 7 Cracked Pepper - rescue mode and missions objectives<br />
|
58 |
-
Aerofly Rc 7 Cracked Pepper - exploration mode and hidden secrets<br />
|
59 |
-
Aerofly Rc 7 Cracked Pepper - fun mode and easter eggs<br />
|
60 |
-
Aerofly Rc 7 Cracked Pepper - expert mode and difficulty levels<br />
|
61 |
-
Aerofly Rc 7 Cracked Pepper - challenge mode and achievements <br />
|
62 |
-
Aerofly Rc 7 Cracked Pepper - editor mode and creation tools <br />
|
63 |
-
Aerofly Rc 7 Cracked Pepper - sound mode and audio quality <br />
|
64 |
-
Aerofly Rc 7 Cracked Pepper - weather mode and climate effects <br />
|
65 |
-
Aerofly Rc 7 Cracked Pepper - scenery mode and landscape details <br />
|
66 |
-
Aerofly Rc 7 Cracked Pepper - location mode and geographic accuracy <br />
|
67 |
-
Aerofly Rc 7 Cracked Pepper - history mode and historical planes </p>
|
68 |
-
<ol>
|
69 |
-
<li><b>Check the system requirements and install the simulator</b>: Before you buy Aerofly RC 7, make sure your computer meets the minimum system requirements for Windows or Mac operating systems. You can find them on the official website or on Steam. Once you buy Aerofly RC 7 from Steam or from a retailer, follow the instructions to install it on your computer.</li>
|
70 |
-
<li><b>Choose a model and a scenery</b>: After launching Aerofly RC 7, you will see the main menu where you can choose a model and a scenery. You can browse through different categories of models and sceneries by clicking on the arrows on the left and right sides of the screen. You can also use the filters on the top right corner of the screen to narrow down your choices by model type, difficulty level, size, etc. Once you find a model and a scenery that you like, click on them to select them.</li>
|
71 |
-
<li><b>Basic controls and settings</b>: After selecting a model and a scenery, you will see a screen where you can adjust some basic controls and settings before flying. You can use your mouse, keyboard, joystick, gamepad, or an actual RC transmitter to control your model. You can also change some settings such as sound volume, graphics quality,</li></p> 0a6ba089eb<br />
|
72 |
-
<br />
|
73 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Can I Download Photoshop For Free.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Can I Download Photoshop for Free?</h1>
|
3 |
-
<p>Photoshop is one of the most popular and powerful photo editing software in the world. It has a lot of features and tools to help you create stunning images and graphics. However, Photoshop is not free and requires a subscription to access all the functions and content.</p>
|
4 |
-
<h2>can i download photoshop for free</h2><br /><p><b><b>Download Zip</b> ✶✶✶ <a href="https://byltly.com/2uKxMJ">https://byltly.com/2uKxMJ</a></b></p><br /><br />
|
5 |
-
<p>If you want to use Photoshop without paying anything, you might be tempted to look for a free download. However, this is not a good idea for several reasons.</p>
|
6 |
-
<ul>
|
7 |
-
<li>First of all, downloading Photoshop for free is illegal and unethical. You are violating the terms of service and the intellectual property rights of the developers. You are also depriving them of their rightful income and support.</li>
|
8 |
-
<li>Secondly, downloading Photoshop for free is risky and dangerous. You never know what kind of malware or viruses might be hidden in the file. You could expose your device and your personal data to hackers and cybercriminals. You could also damage your device or lose your files.</li>
|
9 |
-
<li>Thirdly, downloading Photoshop for free is unreliable and unsatisfying. You might not get the latest version or the full functionality of the software. You might encounter bugs, errors, crashes, or compatibility issues. You might also miss out on updates, new features, and content.</li>
|
10 |
-
</ul>
|
11 |
-
<p>Therefore, the best way to enjoy Photoshop is to download it from the official source and pay for the subscription. This way, you can support the developers, protect your device and data, and have the best user experience possible.</p>
|
12 |
-
<p>If you still want to try Photoshop for free, you can take advantage of the free trial period that they offer. You can also look for discounts, coupons, or promotions that they might have from time to time. Alternatively, you can look for other free or cheaper photo editing software that suit your needs and preferences.</p>
|
13 |
-
<p></p>
|
14 |
-
<p>In conclusion, Photoshop free download is not worth it. It is illegal, unethical, risky, dangerous, unreliable, and unsatisfying. The best way to use Photoshop is to download it from the official source and pay for the subscription. This way, you can enjoy all the benefits and features of this amazing photo editing software.</p> ddb901b051<br />
|
15 |
-
<br />
|
16 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/City Car Driving Free Download V2.2.7 Crack.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>city car driving free download v2.2.7 crack</h2><br /><p><b><b>Download File</b> > <a href="https://imgfil.com/2uxZoH">https://imgfil.com/2uxZoH</a></b></p><br /><br />
|
2 |
-
|
3 |
-
September 15, 2563 B.C. - Minimum OS : Windows 7 SP1/8/8.1/10 (64 Bit) Processor: Intel Pentium Dual Core 3.2 GHz / AMD Athlon II X4 3.1 GHz Memory: 4 GB RAM Graphics: NVIDIA GeForce GTS 450 / AMD Radeon HD 5670 DirectX: version 9.0c Video memory: 1 GB or more Additional software: DirectX
|
4 |
-
9.0c or higher HDD space on HDD 5 GB
|
5 |
-
Start the game, register.
|
6 |
-
Go to the folder C:\\GAMES\\The Dark Crystal Chronicles - Age of Ages\\AppData\\LocalLow\\Sid Meier's Civilization V (in the AppData folder you can see the path to the Civilization V folder, in the one that for Windows 7 the path will look like this: C :\\Users\\username\\AppData\\LocalLow\\Sid Meier's Civilization V). 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/json_utils/__init__.py
DELETED
File without changes
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/100 In 1 Offline collection APK - Free Download for Android Devices.md
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>100+ in 1 Offline Collection APK: A Review</h1>
|
3 |
-
<p>If you are looking for a way to enjoy a variety of games on your Android device without having to worry about internet connection, ads, or in-app purchases, then you might want to check out the <strong>100+ in 1 Offline Collection APK</strong>. This is an app that contains over 100 games that you can play offline and for free. Sounds too good to be true, right? Well, in this article, we will review this app and see what it has to offer, how to download and install it, and what are its pros and cons. Let's get started!</p>
|
4 |
-
<h2>100+ in 1 offline collection apk</h2><br /><p><b><b>Download Zip</b> ⚹⚹⚹ <a href="https://urlin.us/2uT0rj">https://urlin.us/2uT0rj</a></b></p><br /><br />
|
5 |
-
<h2>What is 100+ in 1 Offline Collection APK?</h2>
|
6 |
-
<p>100+ in 1 Offline Collection APK is an app developed by <strong>Superxyz Lab</strong>, a game studio that specializes in creating offline games for Android devices. The app is also known as <strong>Gamebanjo Deluxe</strong>, and it features the most popular independent games from various genres and categories. You can find games like arcade, puzzle, racing, shooting, sports, strategy, adventure, and more. The app is designed to provide you with endless entertainment and fun without requiring any internet connection or spending any money.</p>
|
7 |
-
<h3>Features of 100+ in 1 Offline Collection APK</h3>
|
8 |
-
<h4>100+ games in one app</h4>
|
9 |
-
<p>The main feature of this app is that it contains over 100 games that you can play anytime and anywhere. You don't need to download or install each game separately, as they are all included in the app. You can easily switch between games by using the menu or the swipe gesture. You can also bookmark your favorite games for quick access. Some of the games that you can find in this app are:</p>
|
10 |
-
<ul>
|
11 |
-
<li><strong>Angry Birds</strong>: The classic game where you have to launch birds at pigs using a slingshot.</li>
|
12 |
-
<li><strong>Cut the Rope</strong>: A physics-based puzzle game where you have to cut ropes to feed candy to a cute monster.</li>
|
13 |
-
<li><strong>Fruit Ninja</strong>: A game where you have to slice fruits with your finger as they fly across the screen.</li>
|
14 |
-
<li><strong>Temple Run</strong>: A game where you have to run away from monkeys while avoiding obstacles and collecting coins.</li>
|
15 |
-
<li><strong>Subway Surfers</strong>: A game where you have to run on subway tracks while dodging trains and other hazards.</li>
|
16 |
-
<li><strong>Plants vs Zombies</strong>: A game where you have to plant flowers and vegetables to defend your house from zombies.</li>
|
17 |
-
<li><strong>Candy Crush Saga</strong>: A game where you have to match candies of the same color to clear them from the board.</li>
|
18 |
-
<li><strong>Asphalt 8</strong>: A game where you have to race cars on various tracks and perform stunts.</li>
|
19 |
-
<li><strong>Clash of Clans</strong>: A game where you have to build your own village and fight against other players.</li>
|
20 |
-
<li><strong>Minecraft</strong>: A game where you can create your own world using blocks and explore other players 's worlds.</li>
|
21 |
-
<li><strong>And many more!</strong></li>
|
22 |
-
</ul>
|
23 |
-
<h4>Offline and free to play</h4>
|
24 |
-
<p>Another feature of this app is that it does not require any internet connection to play the games. You can enjoy them offline without worrying about data usage, Wi-Fi availability, or network issues. You can also play them for free without having to pay for any subscription, membership, or premium features. The app does not contain any ads or in-app purchases that could interrupt your gaming experience or tempt you to spend money.</p>
|
25 |
-
<p>100+ in 1 offline collection apk download<br />
|
26 |
-
100+ in 1 offline collection game for android<br />
|
27 |
-
100+ in 1 offline collection apk latest version<br />
|
28 |
-
100+ in 1 offline collection app free download<br />
|
29 |
-
100+ in 1 offline collection gamebanjo deluxe<br />
|
30 |
-
How to install 100+ in 1 offline collection apk<br />
|
31 |
-
100+ in 1 offline collection apk for pc windows<br />
|
32 |
-
100+ in 1 offline collection apk mod<br />
|
33 |
-
100+ in 1 offline collection apk old version<br />
|
34 |
-
100+ in 1 offline collection apk no ads<br />
|
35 |
-
Best offline games collection apk<br />
|
36 |
-
Offline games collection apk download<br />
|
37 |
-
Offline games collection app for android<br />
|
38 |
-
Offline games collection apk latest version<br />
|
39 |
-
Offline games collection app free download<br />
|
40 |
-
How to play offline games collection apk<br />
|
41 |
-
Offline games collection apk for pc windows<br />
|
42 |
-
Offline games collection apk mod<br />
|
43 |
-
Offline games collection apk old version<br />
|
44 |
-
Offline games collection apk no ads<br />
|
45 |
-
Gamebanjo deluxe apk download<br />
|
46 |
-
Gamebanjo deluxe game for android<br />
|
47 |
-
Gamebanjo deluxe apk latest version<br />
|
48 |
-
Gamebanjo deluxe app free download<br />
|
49 |
-
Gamebanjo deluxe 100 most popular games<br />
|
50 |
-
How to play gamebanjo deluxe apk<br />
|
51 |
-
Gamebanjo deluxe apk for pc windows<br />
|
52 |
-
Gamebanjo deluxe apk mod<br />
|
53 |
-
Gamebanjo deluxe apk old version<br />
|
54 |
-
Gamebanjo deluxe apk no ads<br />
|
55 |
-
Superxyz lab apk download<br />
|
56 |
-
Superxyz lab game for android<br />
|
57 |
-
Superxyz lab apk latest version<br />
|
58 |
-
Superxyz lab app free download<br />
|
59 |
-
Superxyz lab 100 in 1 game features<br />
|
60 |
-
How to play superxyz lab apk<br />
|
61 |
-
Superxyz lab apk for pc windows<br />
|
62 |
-
Superxyz lab apk mod<br />
|
63 |
-
Superxyz lab apk old version<br />
|
64 |
-
Superxyz lab apk no ads</p>
|
65 |
-
<h4>High quality and full size games</h4>
|
66 |
-
<p>The app also boasts of having high quality and full size games that are not compromised or reduced in any way. The games have the same graphics, sound, and gameplay as the original versions. You can play them in full screen mode and adjust the settings according to your preference. The app also supports various screen resolutions and orientations, so you can play the games on any device.</p>
|
67 |
-
<h4>Various genres and categories</h4>
|
68 |
-
<p>The app also offers a wide range of genres and categories to suit your mood and taste. You can find games that are fun, challenging, relaxing, educational, or addictive. You can also find games that are suitable for different age groups and preferences. Whether you like action, adventure, puzzle, strategy, racing, shooting, sports, or anything else, you can find it in this app.</p>
|
69 |
-
<h3>How to download and install 100+ in 1 Offline Collection APK?</h3>
|
70 |
-
<h4>Download the APK file from a trusted source</h4>
|
71 |
-
<p>To download and install this app, you need to get the APK file from a trusted source. An APK file is an Android application package that contains all the files and data needed to run an app on your device. You can find the APK file for this app on various websites that offer free and safe downloads of Android apps. Some of the websites that you can use are:</p>
|
72 |
-
<ul>
|
73 |
-
<li><strong>[APKPure]</strong>: A website that provides pure APK files for Android apps and games.</li>
|
74 |
-
<li><strong>[APKMirror]</strong>: A website that hosts a large collection of APK files for Android apps and games.</li>
|
75 |
-
<li><strong>[Uptodown]</strong>: A website that offers downloads of Android apps and games in various languages.</li>
|
76 |
-
</ul>
|
77 |
-
<p>Once you find the APK file for this app on one of these websites, you can download it by clicking on the download button or link. The file size is about 300 MB, so make sure you have enough space on your device and a stable internet connection.</p>
|
78 |
-
<h4>Enable unknown sources on your device</h4>
|
79 |
-
<p>After downloading the APK file, you need to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than the Google Play Store. To enable unknown sources, follow these steps:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Go to your device's settings and look for security or privacy options.</li>
|
82 |
-
<li>Find the option that says unknown sources or install unknown apps and toggle it on.</li>
|
83 |
-
<li>You may see a warning message that says installing apps from unknown sources could harm your device. Tap on OK or Allow to proceed.</li>
|
84 |
-
</ol>
|
85 |
-
<h4>Install the APK file and launch the app</h4>
|
86 |
-
<p>Once you have enabled unknown sources, you can install the APK file by following these steps:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Locate the APK file on your device's file manager or downloads folder.</li>
|
89 |
-
<li>Tap on the APK file and follow the instructions on the screen to install it.</li>
|
90 |
-
<li>You may see a message that says this app is not compatible with your device or requires additional permissions. Tap on Install Anyway or Accept to continue.</li>
|
91 |
-
<li>Wait for the installation process to finish and then tap on Open or Done to launch the app.</li>
|
92 |
-
</ol>
|
93 |
-
<p>Congratulations! You have successfully downloaded and installed 100+ in 1 Offline Collection APK on your device. You can now enjoy playing over 100 games offline and for free!</p> <h3>Pros and cons of 100+ in 1 Offline Collection APK</h3>
|
94 |
-
<p>Like any other app, 100+ in 1 Offline Collection APK has its advantages and disadvantages. Here are some of the pros and cons that you should consider before downloading and installing this app:</p>
|
95 |
-
<h4>Pros:</h4>
|
96 |
-
<ul>
|
97 |
-
<li><strong>No internet connection required</strong>: You can play the games offline without depending on Wi-Fi or mobile data. This is great for saving data, battery, and money. It is also convenient for traveling, commuting, or staying in places with poor or no internet connection.</li>
|
98 |
-
<li><strong>No ads or in-app purchases</strong>: You can play the games for free without being interrupted by annoying ads or pop-ups. You also don't have to worry about spending money on extra features, coins, gems, or lives. You can enjoy the games without any limitations or distractions.</li>
|
99 |
-
<li><strong>Easy to switch between games</strong>: You can access all the games from one app and switch between them easily. You don't have to exit or close one game to play another. You can also bookmark your favorite games for faster access. You can play as many games as you want without cluttering your device's memory or storage.</li>
|
100 |
-
<li><strong>Suitable for all ages and preferences</strong>: You can find games that are fun and appropriate for everyone. Whether you are a kid, a teenager, an adult, or a senior, you can find games that match your interests and skills. You can also play with your friends, family, or alone.</li>
|
101 |
-
</ul>
|
102 |
-
<h4>Cons:</h4>
|
103 |
-
<ul>
|
104 |
-
<li><strong>Large file size (over 300 MB)</strong>: The app takes up a lot of space on your device's storage. You may need to delete some files or apps to make room for it. You may also experience some lag or slow performance if your device has low specifications or memory.</li>
|
105 |
-
<li><strong>Some games may not work on some devices</strong>: The app may not be compatible with all devices or operating systems. Some games may not run properly or crash on some devices. You may need to update your device's software or hardware to play some games.</li>
|
106 |
-
<li><strong>Some games may have bugs or glitches</strong>: The app may contain some errors or defects that affect the quality or functionality of some games. Some games may freeze, crash, or display incorrect graphics or sounds. You may need to report these issues to the developer or wait for updates or fixes.</li>
|
107 |
-
</ul>
|
108 |
-
<h2>Conclusion</h2>
|
109 |
-
<p>100+ in 1 Offline Collection APK is an app that lets you play over 100 games offline and for free on your Android device. It has many features, such as high quality and full size games, various genres and categories, no ads or in-app purchases, and easy to switch between games. It also has some drawbacks, such as large file size, compatibility issues, and bugs or glitches. If you are looking for a way to have fun and entertainment without internet connection or spending money, then you might want to give this app a try. However, you should also be aware of the potential risks and problems that it may cause to your device or gaming experience.</p>
|
110 |
-
<h2>FAQs</h2>
|
111 |
-
<p>Here are some of the frequently asked questions about 100+ in 1 Offline Collection APK:</p>
|
112 |
-
<ol>
|
113 |
-
<li><strong>Is 100+ in 1 Offline Collection APK safe to download and install?</strong></li>
|
114 |
-
<p>Yes, 100+ in 1 Offline Collection APK is safe to download and install if you get it from a trusted source. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain viruses, malware, spyware, or other harmful elements. You should also scan the APK file with an antivirus program before installing it.</p>
|
115 |
-
<li><strong>Is 100+ in 1 Offline Collection APK legal to use?</strong></li>
|
116 |
-
<p>Yes, 100+ in 1 Offline Collection APK is legal to use as long as you don't violate any laws or regulations in your country or region. The app does not contain any pirated or copyrighted content, as it only features independent games that are free to play. However, you should always respect the rights and privacy of the developers and publishers of the games.</p>
|
117 |
-
<li><strong>How do I update 100+ in 1 Offline Collection APK?</strong></li>
|
118 |
-
<p>To update 100+ in 1 Offline Collection APK, you need to download and install the latest version of the APK file from a trusted source. You can check for updates by visiting the website where you downloaded the app or by following the developer's social media accounts. You can also enable automatic updates on your device's settings if available.</p>
|
119 |
-
<li><strong>How do I uninstall 100+ in 1 Offline Collection APK?</strong></li>
|
120 |
-
<p>To uninstall 100+ in 1 Offline Collection APK, you need to follow these steps:</p>
|
121 |
-
<ol>
|
122 |
-
<li>Go to your device's settings and look for apps or applications options.</li>
|
123 |
-
<li>Find and tap on 100+ in 1 Offline Collection APK or Gamebanjo Deluxe.</li>
|
124 |
-
<li>Tap on Uninstall and confirm your action.</li>
|
125 |
-
<li>Wait for the uninstallation process to finish and then tap on OK or Done.</li>
|
126 |
-
</ol>
|
127 |
-
<p>Note that uninstalling the app will delete all the games and data that are stored in it. You may need to back up your progress or achievements before uninstalling the app.</p>
|
128 |
-
<li><strong>How do I contact the developer of 100+ in 1 Offline Collection APK?</strong></li>
|
129 |
-
<p>If you have any questions, feedback, suggestions, or complaints about 100+ in 1 Offline Collection APK, you can contact the developer by using one of these methods:</p>
|
130 |
-
<ul>
|
131 |
-
<li><strong>Email</strong>: You can send an email to <a href="mailto:[email protected]">[email protected]</a> and expect a reply within 24 hours.</li>
|
132 |
-
<li><strong>Facebook</strong>: You can visit and like their Facebook page at <a href="https://www.facebook.com/superxyzlab">https://www.facebook.com/superxyzlab</a> and send them a message or comment on their posts.</li>
|
133 |
-
<li><strong>Twitter</strong>: You can follow and tweet them at <a href="https://twitter.com/superxyzlab">@superxyzlab</a> and get updates and news about their games and apps.</li>
|
134 |
-
</ul>
|
135 |
-
<p>The developer is very responsive and friendly, so don't hesitate to reach out to them if you need any help or support.</p> 197e85843d<br />
|
136 |
-
<br />
|
137 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Beach Buggy Racing 2 APK A Fun and Wacky Racing Adventure.md
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Beach Buggy Racing 2: A Fun and Wacky Kart Racer for Android</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are looking for a fun and wacky kart racing game for your Android device, you might want to check out Beach Buggy Racing 2. This is a sequel to the popular Beach Buggy Racing game that introduced over 100 million international mobile players to console-style kart-racing with a playful offroad twist. With Beach Buggy Racing 2, you can join the Beach Buggy Racing League and compete against drivers and cars from around the world. Race through Egyptian pyramids, dragon-infested castles, pirate ship wrecks, and experimental alien bio-labs. Collect and upgrade an arsenal of fun and wacky powerups. Recruit new drivers, assemble a garage full of cars and race your way to the top of the League.</p>
|
5 |
-
<p>In this article, we will tell you what Beach Buggy Racing 2 is, how to download and install it on your Android device, what features it offers, some tips and tricks to help you win, and our review of the game.</p>
|
6 |
-
<h2>beach buggy racing 2 en apk</h2><br /><p><b><b>Download Zip</b> ->>> <a href="https://jinyurl.com/2uNTt4">https://jinyurl.com/2uNTt4</a></b></p><br /><br />
|
7 |
-
<h2>What is Beach Buggy Racing 2?</h2>
|
8 |
-
<p>Beach Buggy Racing 2 is a fully 3D off-road kart racing game with amazing physics, detailed cars and characters, and spectacular weapons, powered by Vector Engine and NVIDIA's PhysX. It's like a console game in the palm of your hand! You can play any part of the game solo or with friends in split screen—from the story-driven Adventure mode to multi-event Championships, adrenaline-pumping Races, skill-mastering Drift Attacks and more. You can also customize your own game modes with different powerups, race rules, lap counts and more.</p>
|
9 |
-
<h2>How to download and install Beach Buggy Racing 2 on Android?</h2>
|
10 |
-
<p>Beach Buggy Racing 2 is free to play, but it contains items that can be purchased for real money. You can download it from the Google Play Store by following these steps:</p>
|
11 |
-
<ol>
|
12 |
-
<li>Open the Google Play Store app on your Android device.</li>
|
13 |
-
<li>Search for "Beach Buggy Racing 2" or use this <a href="(^1^)">link</a>.</li>
|
14 |
-
<li>Tap on the "Install" button and wait for the download to finish.</li>
|
15 |
-
<li>Tap on the "Open" button to launch the game.</li>
|
16 |
-
</ol>
|
17 |
-
<p>You can also download the APK file from other sources, but make sure they are safe and trustworthy. To install an APK file, you need to enable "Unknown sources" in your device settings. Then, you can open the APK file and follow the instructions to install it.</p>
|
18 |
-
<h2>Features of Beach Buggy Racing 2</h2>
|
19 |
-
<h3>Spectacular kart racing action</h3>
|
20 |
-
<p>Beach Buggy Racing 2 offers a variety of tracks and environments to race on, each with their own challenges and surprises. You can race through Egyptian pyramids, dragon-infested castles, pirate ship wrecks, and experimental alien bio-labs. You can also encounter obstacles like tumbleweeds, birds, fireballs, giant crabs, lava flows, sandstorms, and more. You can use different types of vehicles like beach buggies, monster trucks, muscle cars, classic pickups and formula supercars. <h3>Upgrade your powerups</h3>
|
21 |
-
<p>One of the coolest features of Beach Buggy Racing 2 is that you can upgrade your powerups to make them more powerful and effective. You can do this by collecting coins and gems during the races, or by completing quests and achievements. You can also buy coins and gems with real money if you want to speed up the process. Upgrading your powerups will give you an edge over your rivals, as you can unleash more damage, more speed, or more protection. You can upgrade each powerup up to five times, and each upgrade will cost more coins and gems than the previous one. You can also unlock new powerups by playing the Adventure mode or by opening chests.</p>
|
22 |
-
<h3>Build your team</h3>
|
23 |
-
<p>Another cool feature of Beach Buggy Racing 2 is that you can recruit new drivers to join your team. Each driver has a unique ability that can help you in different situations. For example, Rez has the ability to fire a laser beam that zaps anyone in front of him, while McSkelly has the ability to summon a swarm of bats that blind the other racers. You can unlock new drivers by playing the Adventure mode or by opening chests. You can also upgrade your drivers to make their abilities more powerful and effective. You can do this by collecting driver cards during the races, or by buying them with coins and gems. Upgrading your drivers will also increase their stats, such as speed, acceleration, handling, and armor.</p>
|
24 |
-
<h3>Collect over 55 cars</h3>
|
25 |
-
<p>Beach Buggy Racing 2 has a huge collection of cars that you can unlock and use in the races. There are over 55 cars in total, each with their own style and performance. You can find beach buggies, monster trucks, muscle cars, classic pickups, formula supercars, and even some weird and wacky vehicles like a lunar rover, a shark car, a unicorn car, and a dragon car. You can unlock new cars by playing the Adventure mode or by opening chests. You can also upgrade your cars to make them faster and stronger. You can do this by collecting car parts during the races, or by buying them with coins and gems. Upgrading your cars will also change their appearance, making them look cooler and more customized.</p>
|
26 |
-
<p>beach buggy racing 2 mod apk unlimited money<br />
|
27 |
-
beach buggy racing 2 hack apk download<br />
|
28 |
-
beach buggy racing 2 apk + obb<br />
|
29 |
-
beach buggy racing 2 apk pure<br />
|
30 |
-
beach buggy racing 2 online multiplayer apk<br />
|
31 |
-
beach buggy racing 2 latest version apk<br />
|
32 |
-
beach buggy racing 2 free download apk<br />
|
33 |
-
beach buggy racing 2 android game apk<br />
|
34 |
-
beach buggy racing 2 full unlocked apk<br />
|
35 |
-
beach buggy racing 2 cheats apk<br />
|
36 |
-
beach buggy racing 2 premium apk<br />
|
37 |
-
beach buggy racing 2 offline mode apk<br />
|
38 |
-
beach buggy racing 2 apk for pc<br />
|
39 |
-
beach buggy racing 2 apk no ads<br />
|
40 |
-
beach buggy racing 2 apk revdl<br />
|
41 |
-
beach buggy racing 2 mod menu apk<br />
|
42 |
-
beach buggy racing 2 all cars unlocked apk<br />
|
43 |
-
beach buggy racing 2 unlimited gems apk<br />
|
44 |
-
beach buggy racing 2 mod apk rexdl<br />
|
45 |
-
beach buggy racing 2 vip pass apk<br />
|
46 |
-
beach buggy racing 2 mod apk happymod<br />
|
47 |
-
beach buggy racing 2 old version apk<br />
|
48 |
-
beach buggy racing 2 modded apk android 1<br />
|
49 |
-
beach buggy racing 2 cracked apk<br />
|
50 |
-
beach buggy racing 2 mod apk android republic<br />
|
51 |
-
beach buggy racing 2 update apk<br />
|
52 |
-
beach buggy racing 2 beta apk<br />
|
53 |
-
beach buggy racing 2 mod apk an1<br />
|
54 |
-
beach buggy racing 2 pro apk<br />
|
55 |
-
beach buggy racing 2 modded apk download<br />
|
56 |
-
beach buggy racing 2 original apk<br />
|
57 |
-
beach buggy racing 2 modded apk free shopping<br />
|
58 |
-
beach buggy racing 2 modded apk unlimited everything<br />
|
59 |
-
beach buggy racing 2 hacked version apk<br />
|
60 |
-
beach buggy racing 2 modded apk all levels unlocked<br />
|
61 |
-
beach buggy racing 2 modded apk no root<br />
|
62 |
-
beach buggy racing 2 modded apk anti ban<br />
|
63 |
-
beach buggy racing 2 modded apk unlimited tickets<br />
|
64 |
-
beach buggy racing 2 modded apk god mode<br />
|
65 |
-
beach buggy racing 2 modded apk high damage</p>
|
66 |
-
<h3>Play against the world</h3>
|
67 |
-
<p>Beach Buggy Racing 2 is not only a solo game, but also a multiplayer game. You can play against other players from around the world in online races, tournaments, and leagues. You can also challenge your friends to a race in split-screen mode on the same device, or connect with other devices via WiFi or Bluetooth. Playing against other players will test your skills and strategies, as well as earn you rewards and trophies. You can also chat with other players in the game lobby, or join a club to team up with other racers and share tips and tricks.</p>
|
68 |
-
<h3>Customize your ride</h3>
|
69 |
-
<p>Beach Buggy Racing 2 lets you customize your ride to suit your style and personality. You can change the color of your car, add stickers and decals, change the wheels and tires, add spoilers and exhausts, and more. You can also customize your driver's appearance, such as their outfit, hairstyle, sunglasses, hat, helmet, mask, etc. You can unlock new customization options by playing the Adventure mode or by opening chests. You can also buy them with coins and gems if you want to get them faster.</p>
|
70 |
-
<h3>Awesome new game modes</h3>
|
71 |
-
<p>Beach Buggy Racing 2 offers a variety of game modes to keep you entertained and challenged. Besides the Adventure mode, which is the main story mode where you race through different worlds and events, you can also play other modes such as:</p>
|
72 |
-
<ul>
|
73 |
-
<li>Race: This is the classic mode where you race against seven other racers on any track you choose.</li>
|
74 |
-
<li>Championship: This is a series of races where you compete for points and trophies.</li>
|
75 |
-
<li>Daily Challenge: This is a special race that changes every day and has different rules and rewards.</li>
|
76 |
-
<li>Drift Attack: This is a mode where you have to drift as much as possible on a track to earn points.</li>
|
77 |
-
<li>Firework Fury: This is a mode where you have to collect rockets on a track and fire them at targets.</li>
|
78 |
-
<li>Boss Battle: This is a mode where you have to race against a boss character who has special abilities.</li>
|
79 |
-
<li>Custom Race: This is a mode where you can create your own race with different settings such as powerups, laps, opponents, etc.</li>
|
80 |
-
</ul>
|
81 |
-
<h2>Tips and tricks for Beach Buggy Racing 2</h2>
|
82 |
-
<h3>Master the drift</h3>
|
83 |
-
<p>Drifting is an essential skill <p>Drifting is an essential skill in Beach Buggy Racing 2, as it allows you to take sharp turns without losing speed. To drift, you need to tap and hold the brake button while steering. You will see a yellow trail behind your car, indicating that you are drifting. The longer you drift, the more boost you will accumulate. You can use the boost by releasing the brake button and tapping the gas button. Boosting will give you a burst of speed that can help you overtake your opponents or avoid obstacles. You can also use the boost to perform a powerslide, which is a drift that goes in the opposite direction of the turn. This can help you change lanes quickly or dodge incoming attacks.</p>
|
84 |
-
<h3>Use the driver's ability at the right time</h3>
|
85 |
-
<p>As mentioned before, each driver in Beach Buggy Racing 2 has a unique ability that can give you an advantage in the race. However, you need to use it wisely and at the right time. Each ability has a cooldown time, which means that you can't use it again until it recharges. You can see the cooldown timer on the bottom left corner of the screen, next to your driver's portrait. You can also see a blue bar above your car, which indicates how much charge you have for your ability. You can charge your ability by collecting blue orbs on the track, or by hitting other racers with powerups. To use your ability, you need to tap on your driver's portrait when it is fully charged. Some abilities are offensive, such as Rez's laser beam or McSkelly's bat swarm, while some are defensive, such as Roxie's shield or Tiki's teleport. You need to use them according to the situation and your strategy.</p>
|
86 |
-
<h3>Don't fall into the trap</h3>
|
87 |
-
<p>Beach Buggy Racing 2 is full of traps and hazards that can slow you down or damage your car. You need to be careful and avoid them as much as possible. Some of the traps and hazards include:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Mines: These are small explosives that are placed on the track by other racers or by the environment. They will explode when you touch them, causing you to spin out and lose speed.</li>
|
90 |
-
<li>Oil slicks: These are slippery patches of oil that are spilled on the track by other racers or by the environment. They will make you lose control and skid off course.</li>
|
91 |
-
<li>Fireballs: These are balls of fire that are shot from cannons or volcanoes on some tracks. They will hit you and set you on fire, causing you to lose health and speed.</li>
|
92 |
-
<li>Lava flows: These are streams of lava that flow across some tracks. They will burn you and damage your car if you touch them.</li>
|
93 |
-
<li>Sandstorms: These are storms of sand that blow across some tracks. They will reduce your visibility and make it harder to see where you are going.</li>
|
94 |
-
<li>Tumbleweeds: These are balls of dried plants that roll across some tracks. They will bounce off your car and slow you down if you hit them.</li>
|
95 |
-
</ul>
|
96 |
-
<p>You can avoid these traps and hazards by steering away from them, using your boost to get past them, or using your powerups to destroy them or protect yourself from them.</p>
|
97 |
-
<h3>Build the best deck of crazy powerups</h3>
|
98 |
-
<p>Beach Buggy Racing 2 has a lot of crazy powerups that you can use to spice up the race and sabotage your opponents. You can collect powerups by driving through red bubbles on the track, or by opening chests. You can also upgrade your powerups to make them more powerful and effective, as explained before. However, you can only equip four powerups at a time, so you need to choose wisely which ones to use. You can create different decks of powerups for different situations and strategies. For example, you can create a deck of offensive powerups, such as rockets, fireballs, lightning bolts, etc., to attack your opponents and slow them down. Or, you can create a deck of defensive powerups, such as shields, magnets, oil slicks, etc., to protect yourself from attacks and traps. Or, you can create a deck of utility powerups, such as boosts, teleports, springs, etc., to enhance your speed and maneuverability.</p>
|
99 |
-
<h3>Grab those fast bubbles</h3>
|
100 |
-
<p>Besides red bubbles that contain powerups, there are also green bubbles that contain coins and gems, blue bubbles that contain driver cards and car parts, and yellow bubbles that contain fast bubbles. Fast bubbles are special items that give you an instant boost of speed when you collect them. They are very useful for overtaking your opponents or escaping from danger. However, they are also very rare and hard to find. You need to keep an eye out for them and grab them whenever <p>you see them. They are usually hidden in secret places or behind obstacles, so you need to explore the tracks and find the best routes to get them. You can also use your powerups or your driver's ability to help you reach them. For example, you can use a spring to jump over a wall, or a teleport to skip a section of the track.</p>
|
101 |
-
<h3>Choose the best controls</h3>
|
102 |
-
<p>Beach Buggy Racing 2 offers different options for controlling your car. You can choose between tilt, touch, or gamepad controls. You can also adjust the sensitivity and the layout of the buttons. You need to find the best controls that suit your preference and style. You can experiment with different settings and see which one works best for you. You can also switch between different controls during the game by pausing and going to the settings menu. Here are some pros and cons of each control option:</p>
|
103 |
-
<table>
|
104 |
-
<tr>
|
105 |
-
<th>Control option</th>
|
106 |
-
<th>Pros</th>
|
107 |
-
<th>Cons</th>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>Tilt</td>
|
111 |
-
<td>More realistic and immersive, easy to drift and powerslide, no need to touch the screen.</td>
|
112 |
-
<td>Less precise and responsive, may cause motion sickness, may not work well on some devices.</td>
|
113 |
-
</tr>
|
114 |
-
<tr>
|
115 |
-
<td>Touch</td>
|
116 |
-
<td>More precise and responsive, easy to steer and brake, works well on any device.</td>
|
117 |
-
<td>Less realistic and immersive, may block the view of the screen, may cause finger fatigue.</td>
|
118 |
-
</tr>
|
119 |
-
<tr>
|
120 |
-
<td>Gamepad</td>
|
121 |
-
<td>Most realistic and immersive, most precise and responsive, most comfortable and ergonomic.</td>
|
122 |
-
<td>Requires an external device, may not be compatible with some games or devices, may be expensive or hard to find.</td>
|
123 |
-
</tr>
|
124 |
-
</table>
|
125 |
-
<h2>Review of Beach Buggy Racing 2</h2>
|
126 |
-
<h3>Pros and cons</h3>
|
127 |
-
<p>Beach Buggy Racing 2 is a fun and wacky kart racing game that offers a lot of features and content for Android users. However, it also has some drawbacks that may affect your enjoyment of the game. Here are some pros and cons of Beach Buggy Racing 2:</p>
|
128 |
-
<table>
|
129 |
-
<tr>
|
130 |
-
<th>Pros</th>
|
131 |
-
<th>Cons</th>
|
132 |
-
</tr>
|
133 |
-
<tr>
|
134 |
-
<td>Stunning graphics and sound effects.</td>
|
135 |
-
<td>Frequent ads and pop-ups.</td>
|
136 |
-
</tr>
|
137 |
-
<tr>
|
138 |
-
<td>Varied tracks and environments.</td>
|
139 |
-
<td>Sometimes laggy or buggy.</td>
|
140 |
-
</tr>
|
141 |
-
<tr>
|
142 |
-
<td>Huge collection of cars and drivers.</td>
|
143 |
-
<td>Somewhat pay-to-win.</td>
|
144 |
-
</tr>
|
145 |
-
<tr>
|
146 |
-
<td>Crazy powerups and abilities.</td>
|
147 |
-
<td>Sometimes unfair or frustrating.</td>
|
148 |
-
</tr>
|
149 |
-
<tr>
|
150 |
-
<td>Multifaceted game modes.</td>
|
151 |
-
<td>Sometimes repetitive or boring.</td>
|
152 |
-
</tr>
|
153 |
-
<tr>
|
154 |
-
<td>Multifaceted game modes.</td>
|
155 |
-
<td>Sometimes repetitive or boring.</td></tr><tr><td>Multifaceted game modes.</td><td>Sometimes repetitive or boring.</td></tr><tr><td>Multifaceted game modes.</td><td>Sometimes repetitive or boring.</td></tr><tr><td>Multifaceted game modes.</td><td>Sometimes repetitive or boring.</td></tr></table>
|
156 |
-
<h3>Rating and verdict</h3>
|
157 |
-
<p>We give Beach Buggy Racing 2 a rating of 4 out of 5 stars. It is a fun and wacky kart racing game that will keep you entertained and challenged for hours. It has stunning graphics, varied tracks, huge collection of cars and drivers, crazy powerups and abilities, multifaceted game modes, and multiplayer options. However, it also has frequent ads, laggy performance, pay-to-win elements, unfair difficulty, and repetitive gameplay. If you are looking for a kart racing game for your Android device, you might want to give Beach Buggy Racing 2 a try. It is free to download and play, but it contains in-app purchases that can enhance your experience. You can also check out other similar games such as Mario Kart Tour, Crash Bandicoot: On the Run!, Sonic Racing Transformed, etc.</p>
|
158 |
-
<h2>Frequently Asked Questions (FAQs)</h2>
|
159 |
-
<p>Here are some frequently asked questions (FAQs) about Beach Buggy Racing 2:</p>
|
160 |
-
<ol>
|
161 |
-
<li><b>How do I unlock new cars and drivers?</b></li>
|
162 |
-
<p>You can unlock new cars and drivers by playing the Adventure mode or by opening chests. You can also buy them with coins and gems if you want to get them faster.</p>
|
163 |
-
<li><b>How do I upgrade my cars, drivers, and powerups?</b></li>
|
164 |
-
<p>You can upgrade your <p>You can upgrade your cars, drivers, and powerups by collecting coins, gems, car parts, driver cards, and powerup cards during the races, or by buying them with real money. You can also upgrade them by completing quests and achievements. Upgrading your cars, drivers, and powerups will make them more powerful and effective, as well as change their appearance.</p>
|
165 |
-
<li><b>How do I play with my friends?</b></li>
|
166 |
-
<p>You can play with your friends in split-screen mode on the same device, or connect with other devices via WiFi or Bluetooth. You can also play online with your friends or other players from around the world in races, tournaments, and leagues. You can also chat with your friends in the game lobby, or join a club to team up with other racers and share tips and tricks.</p>
|
167 |
-
<li><b>How do I get more coins and gems?</b></li>
|
168 |
-
<p>You can get more coins and gems by playing the game and collecting them during the races, or by opening chests. You can also get more coins and gems by watching ads, completing offers, or buying them with real money. Coins and gems are used to unlock and upgrade cars, drivers, powerups, and customization options.</p>
|
169 |
-
<li><b>How do I get rid of ads?</b></li>
|
170 |
-
<p>You can get rid of ads by buying any amount of coins or gems with real money. This will remove all ads from the game permanently. You can also turn off your internet connection to avoid ads, but this will also disable some features of the game such as online multiplayer, daily challenge, etc.</p>
|
171 |
-
<li><b>How do I contact the developers?</b></li>
|
172 |
-
<p>You can contact the developers of Beach Buggy Racing 2 by visiting their website at <a href="">www.vectorunit.com</a>, or by sending them an email at <a href="mailto:[email protected]">[email protected]</a>. You can also follow them on social media platforms such as Facebook, Twitter, Instagram, YouTube, etc. You can also leave a review or a comment on the Google Play Store to share your feedback and suggestions.</p>
|
173 |
-
</ol></p> 197e85843d<br />
|
174 |
-
<br />
|
175 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7eu7d7/anime-ai-detect-fucker/app.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from attack import Attacker
|
3 |
-
import argparse
|
4 |
-
|
5 |
-
def do_attack(img, eps, step_size, steps, progress=gr.Progress()):
|
6 |
-
args=argparse.Namespace()
|
7 |
-
args.out_dir='./'
|
8 |
-
args.target='auto'
|
9 |
-
args.eps=eps
|
10 |
-
args.step_size=step_size
|
11 |
-
args.steps=steps
|
12 |
-
args.test_atk=False
|
13 |
-
|
14 |
-
step = progress.tqdm(range(steps))
|
15 |
-
|
16 |
-
def pdg_prog(ori_images, images, labels):
|
17 |
-
step.update(1)
|
18 |
-
|
19 |
-
attacker = Attacker(args, pgd_callback=pdg_prog)
|
20 |
-
atk_img, noise = attacker.attack_(img)
|
21 |
-
attacker.save_image(img, noise, 'out.png')
|
22 |
-
return 'out_atk.png'
|
23 |
-
|
24 |
-
with gr.Blocks(title="Anime AI Detect Fucker Demo", theme="dark") as demo:
|
25 |
-
gr.HTML('<a href="https://github.com/7eu7d7/anime-ai-detect-fucker">github repo</a>')
|
26 |
-
|
27 |
-
with gr.Row():
|
28 |
-
with gr.Column():
|
29 |
-
with gr.Row():
|
30 |
-
eps = gr.Slider(label="eps (Noise intensity)", minimum=1, maximum=16, step=1, value=1)
|
31 |
-
step_size = gr.Slider(label="Noise step size", minimum=0.001, maximum=16, step=0.001, value=0.136)
|
32 |
-
with gr.Row():
|
33 |
-
steps = gr.Slider(label="step count", minimum=1, maximum=100, step=1, value=20)
|
34 |
-
model_name = gr.Dropdown(label="attack target",
|
35 |
-
choices=["auto", "human", "ai"],
|
36 |
-
interactive=True,
|
37 |
-
value="auto", show_label=True)
|
38 |
-
|
39 |
-
input_image = gr.Image(label="Clean Image", type="pil")
|
40 |
-
|
41 |
-
atk_btn = gr.Button("Attack")
|
42 |
-
|
43 |
-
with gr.Column():
|
44 |
-
output_image = gr.Image(label="Attacked Image")
|
45 |
-
|
46 |
-
atk_btn.click(fn=do_attack,
|
47 |
-
inputs=[input_image, eps, step_size, steps],
|
48 |
-
outputs=output_image)
|
49 |
-
|
50 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/MDXNet.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
import soundfile as sf
|
2 |
-
import torch, pdb, os, warnings, librosa
|
3 |
-
import numpy as np
|
4 |
-
import onnxruntime as ort
|
5 |
-
from tqdm import tqdm
|
6 |
-
import torch
|
7 |
-
|
8 |
-
dim_c = 4
|
9 |
-
|
10 |
-
|
11 |
-
class Conv_TDF_net_trim:
|
12 |
-
def __init__(
|
13 |
-
self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
|
14 |
-
):
|
15 |
-
super(Conv_TDF_net_trim, self).__init__()
|
16 |
-
|
17 |
-
self.dim_f = dim_f
|
18 |
-
self.dim_t = 2**dim_t
|
19 |
-
self.n_fft = n_fft
|
20 |
-
self.hop = hop
|
21 |
-
self.n_bins = self.n_fft // 2 + 1
|
22 |
-
self.chunk_size = hop * (self.dim_t - 1)
|
23 |
-
self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
|
24 |
-
device
|
25 |
-
)
|
26 |
-
self.target_name = target_name
|
27 |
-
self.blender = "blender" in model_name
|
28 |
-
|
29 |
-
out_c = dim_c * 4 if target_name == "*" else dim_c
|
30 |
-
self.freq_pad = torch.zeros(
|
31 |
-
[1, out_c, self.n_bins - self.dim_f, self.dim_t]
|
32 |
-
).to(device)
|
33 |
-
|
34 |
-
self.n = L // 2
|
35 |
-
|
36 |
-
def stft(self, x):
|
37 |
-
x = x.reshape([-1, self.chunk_size])
|
38 |
-
x = torch.stft(
|
39 |
-
x,
|
40 |
-
n_fft=self.n_fft,
|
41 |
-
hop_length=self.hop,
|
42 |
-
window=self.window,
|
43 |
-
center=True,
|
44 |
-
return_complex=True,
|
45 |
-
)
|
46 |
-
x = torch.view_as_real(x)
|
47 |
-
x = x.permute([0, 3, 1, 2])
|
48 |
-
x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
|
49 |
-
[-1, dim_c, self.n_bins, self.dim_t]
|
50 |
-
)
|
51 |
-
return x[:, :, : self.dim_f]
|
52 |
-
|
53 |
-
def istft(self, x, freq_pad=None):
|
54 |
-
freq_pad = (
|
55 |
-
self.freq_pad.repeat([x.shape[0], 1, 1, 1])
|
56 |
-
if freq_pad is None
|
57 |
-
else freq_pad
|
58 |
-
)
|
59 |
-
x = torch.cat([x, freq_pad], -2)
|
60 |
-
c = 4 * 2 if self.target_name == "*" else 2
|
61 |
-
x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
|
62 |
-
[-1, 2, self.n_bins, self.dim_t]
|
63 |
-
)
|
64 |
-
x = x.permute([0, 2, 3, 1])
|
65 |
-
x = x.contiguous()
|
66 |
-
x = torch.view_as_complex(x)
|
67 |
-
x = torch.istft(
|
68 |
-
x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
|
69 |
-
)
|
70 |
-
return x.reshape([-1, c, self.chunk_size])
|
71 |
-
|
72 |
-
|
73 |
-
def get_models(device, dim_f, dim_t, n_fft):
|
74 |
-
return Conv_TDF_net_trim(
|
75 |
-
device=device,
|
76 |
-
model_name="Conv-TDF",
|
77 |
-
target_name="vocals",
|
78 |
-
L=11,
|
79 |
-
dim_f=dim_f,
|
80 |
-
dim_t=dim_t,
|
81 |
-
n_fft=n_fft,
|
82 |
-
)
|
83 |
-
|
84 |
-
|
85 |
-
warnings.filterwarnings("ignore")
|
86 |
-
cpu = torch.device("cpu")
|
87 |
-
if torch.cuda.is_available():
|
88 |
-
device = torch.device("cuda:0")
|
89 |
-
elif torch.backends.mps.is_available():
|
90 |
-
device = torch.device("mps")
|
91 |
-
else:
|
92 |
-
device = torch.device("cpu")
|
93 |
-
|
94 |
-
|
95 |
-
class Predictor:
|
96 |
-
def __init__(self, args):
|
97 |
-
self.args = args
|
98 |
-
self.model_ = get_models(
|
99 |
-
device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
|
100 |
-
)
|
101 |
-
self.model = ort.InferenceSession(
|
102 |
-
os.path.join(args.onnx, self.model_.target_name + ".onnx"),
|
103 |
-
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
104 |
-
)
|
105 |
-
print("onnx load done")
|
106 |
-
|
107 |
-
def demix(self, mix):
|
108 |
-
samples = mix.shape[-1]
|
109 |
-
margin = self.args.margin
|
110 |
-
chunk_size = self.args.chunks * 44100
|
111 |
-
assert not margin == 0, "margin cannot be zero!"
|
112 |
-
if margin > chunk_size:
|
113 |
-
margin = chunk_size
|
114 |
-
|
115 |
-
segmented_mix = {}
|
116 |
-
|
117 |
-
if self.args.chunks == 0 or samples < chunk_size:
|
118 |
-
chunk_size = samples
|
119 |
-
|
120 |
-
counter = -1
|
121 |
-
for skip in range(0, samples, chunk_size):
|
122 |
-
counter += 1
|
123 |
-
|
124 |
-
s_margin = 0 if counter == 0 else margin
|
125 |
-
end = min(skip + chunk_size + margin, samples)
|
126 |
-
|
127 |
-
start = skip - s_margin
|
128 |
-
|
129 |
-
segmented_mix[skip] = mix[:, start:end].copy()
|
130 |
-
if end == samples:
|
131 |
-
break
|
132 |
-
|
133 |
-
sources = self.demix_base(segmented_mix, margin_size=margin)
|
134 |
-
"""
|
135 |
-
mix:(2,big_sample)
|
136 |
-
segmented_mix:offset->(2,small_sample)
|
137 |
-
sources:(1,2,big_sample)
|
138 |
-
"""
|
139 |
-
return sources
|
140 |
-
|
141 |
-
def demix_base(self, mixes, margin_size):
|
142 |
-
chunked_sources = []
|
143 |
-
progress_bar = tqdm(total=len(mixes))
|
144 |
-
progress_bar.set_description("Processing")
|
145 |
-
for mix in mixes:
|
146 |
-
cmix = mixes[mix]
|
147 |
-
sources = []
|
148 |
-
n_sample = cmix.shape[1]
|
149 |
-
model = self.model_
|
150 |
-
trim = model.n_fft // 2
|
151 |
-
gen_size = model.chunk_size - 2 * trim
|
152 |
-
pad = gen_size - n_sample % gen_size
|
153 |
-
mix_p = np.concatenate(
|
154 |
-
(np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
|
155 |
-
)
|
156 |
-
mix_waves = []
|
157 |
-
i = 0
|
158 |
-
while i < n_sample + pad:
|
159 |
-
waves = np.array(mix_p[:, i : i + model.chunk_size])
|
160 |
-
mix_waves.append(waves)
|
161 |
-
i += gen_size
|
162 |
-
mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
|
163 |
-
with torch.no_grad():
|
164 |
-
_ort = self.model
|
165 |
-
spek = model.stft(mix_waves)
|
166 |
-
if self.args.denoise:
|
167 |
-
spec_pred = (
|
168 |
-
-_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
|
169 |
-
+ _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
|
170 |
-
)
|
171 |
-
tar_waves = model.istft(torch.tensor(spec_pred))
|
172 |
-
else:
|
173 |
-
tar_waves = model.istft(
|
174 |
-
torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
|
175 |
-
)
|
176 |
-
tar_signal = (
|
177 |
-
tar_waves[:, :, trim:-trim]
|
178 |
-
.transpose(0, 1)
|
179 |
-
.reshape(2, -1)
|
180 |
-
.numpy()[:, :-pad]
|
181 |
-
)
|
182 |
-
|
183 |
-
start = 0 if mix == 0 else margin_size
|
184 |
-
end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
|
185 |
-
if margin_size == 0:
|
186 |
-
end = None
|
187 |
-
sources.append(tar_signal[:, start:end])
|
188 |
-
|
189 |
-
progress_bar.update(1)
|
190 |
-
|
191 |
-
chunked_sources.append(sources)
|
192 |
-
_sources = np.concatenate(chunked_sources, axis=-1)
|
193 |
-
# del self.model
|
194 |
-
progress_bar.close()
|
195 |
-
return _sources
|
196 |
-
|
197 |
-
def prediction(self, m, vocal_root, others_root, format):
|
198 |
-
os.makedirs(vocal_root, exist_ok=True)
|
199 |
-
os.makedirs(others_root, exist_ok=True)
|
200 |
-
basename = os.path.basename(m)
|
201 |
-
mix, rate = librosa.load(m, mono=False, sr=44100)
|
202 |
-
if mix.ndim == 1:
|
203 |
-
mix = np.asfortranarray([mix, mix])
|
204 |
-
mix = mix.T
|
205 |
-
sources = self.demix(mix.T)
|
206 |
-
opt = sources[0].T
|
207 |
-
if format in ["wav", "flac"]:
|
208 |
-
sf.write(
|
209 |
-
"%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
|
210 |
-
)
|
211 |
-
sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
|
212 |
-
else:
|
213 |
-
path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
|
214 |
-
path_other = "%s/%s_others.wav" % (others_root, basename)
|
215 |
-
sf.write(path_vocal, mix - opt, rate)
|
216 |
-
sf.write(path_other, opt, rate)
|
217 |
-
if os.path.exists(path_vocal):
|
218 |
-
os.system(
|
219 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
220 |
-
% (path_vocal, path_vocal[:-4] + ".%s" % format)
|
221 |
-
)
|
222 |
-
if os.path.exists(path_other):
|
223 |
-
os.system(
|
224 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
225 |
-
% (path_other, path_other[:-4] + ".%s" % format)
|
226 |
-
)
|
227 |
-
|
228 |
-
|
229 |
-
class MDXNetDereverb:
|
230 |
-
def __init__(self, chunks):
|
231 |
-
self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy"
|
232 |
-
self.shifts = 10 #'Predict with randomised equivariant stabilisation'
|
233 |
-
self.mixing = "min_mag" # ['default','min_mag','max_mag']
|
234 |
-
self.chunks = chunks
|
235 |
-
self.margin = 44100
|
236 |
-
self.dim_t = 9
|
237 |
-
self.dim_f = 3072
|
238 |
-
self.n_fft = 6144
|
239 |
-
self.denoise = True
|
240 |
-
self.pred = Predictor(self)
|
241 |
-
|
242 |
-
def _path_audio_(self, input, vocal_root, others_root, format):
|
243 |
-
self.pred.prediction(input, vocal_root, others_root, format)
|
244 |
-
|
245 |
-
|
246 |
-
if __name__ == "__main__":
|
247 |
-
dereverb = MDXNetDereverb(15)
|
248 |
-
from time import time as ttime
|
249 |
-
|
250 |
-
t0 = ttime()
|
251 |
-
dereverb._path_audio_(
|
252 |
-
"雪雪伴奏对消HP5.wav",
|
253 |
-
"vocal",
|
254 |
-
"others",
|
255 |
-
)
|
256 |
-
t1 = ttime()
|
257 |
-
print(t1 - t0)
|
258 |
-
|
259 |
-
|
260 |
-
"""
|
261 |
-
|
262 |
-
runtime\python.exe MDXNet.py
|
263 |
-
|
264 |
-
6G:
|
265 |
-
15/9:0.8G->6.8G
|
266 |
-
14:0.8G->6.5G
|
267 |
-
25:炸
|
268 |
-
|
269 |
-
half15:0.7G->6.6G,22.69s
|
270 |
-
fp32-15:0.7G->6.6G,20.85s
|
271 |
-
|
272 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Redis 9e063b60eca24a1783c225cfdc21dd8c.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# Redis
|
2 |
-
|
3 |
-
Last edited time: March 31, 2023 1:49 PM
|
4 |
-
Owner: Anonymous
|
5 |
-
Tags: Infrastructure
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/models/loaders.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Utility functions to load from the checkpoints.
|
9 |
-
Each checkpoint is a torch.saved dict with the following keys:
|
10 |
-
- 'xp.cfg': the hydra config as dumped during training. This should be used
|
11 |
-
to rebuild the object using the audiocraft.models.builders functions,
|
12 |
-
- 'model_best_state': a readily loadable best state for the model, including
|
13 |
-
the conditioner. The model obtained from `xp.cfg` should be compatible
|
14 |
-
with this state dict. In the case of a LM, the encodec model would not be
|
15 |
-
bundled along but instead provided separately.
|
16 |
-
|
17 |
-
Those functions also support loading from a remote location with the Torch Hub API.
|
18 |
-
They also support overriding some parameters, in particular the device and dtype
|
19 |
-
of the returned model.
|
20 |
-
"""
|
21 |
-
|
22 |
-
from pathlib import Path
|
23 |
-
from huggingface_hub import hf_hub_download
|
24 |
-
import typing as tp
|
25 |
-
import os
|
26 |
-
|
27 |
-
from omegaconf import OmegaConf, DictConfig
|
28 |
-
import torch
|
29 |
-
|
30 |
-
from . import builders
|
31 |
-
from .encodec import CompressionModel
|
32 |
-
|
33 |
-
|
34 |
-
def get_audiocraft_cache_dir() -> tp.Optional[str]:
|
35 |
-
return os.environ.get('AUDIOCRAFT_CACHE_DIR', None)
|
36 |
-
|
37 |
-
|
38 |
-
def _get_state_dict(
|
39 |
-
file_or_url_or_id: tp.Union[Path, str],
|
40 |
-
filename: tp.Optional[str] = None,
|
41 |
-
device='cpu',
|
42 |
-
cache_dir: tp.Optional[str] = None,
|
43 |
-
):
|
44 |
-
if cache_dir is None:
|
45 |
-
cache_dir = get_audiocraft_cache_dir()
|
46 |
-
# Return the state dict either from a file or url
|
47 |
-
file_or_url_or_id = str(file_or_url_or_id)
|
48 |
-
assert isinstance(file_or_url_or_id, str)
|
49 |
-
|
50 |
-
if os.path.isfile(file_or_url_or_id):
|
51 |
-
return torch.load(file_or_url_or_id, map_location=device)
|
52 |
-
|
53 |
-
if os.path.isdir(file_or_url_or_id):
|
54 |
-
file = f"{file_or_url_or_id}/{filename}"
|
55 |
-
return torch.load(file, map_location=device)
|
56 |
-
|
57 |
-
elif file_or_url_or_id.startswith('https://'):
|
58 |
-
return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True)
|
59 |
-
|
60 |
-
else:
|
61 |
-
assert filename is not None, "filename needs to be defined if using HF checkpoints"
|
62 |
-
|
63 |
-
file = hf_hub_download(repo_id=file_or_url_or_id, filename=filename, cache_dir=cache_dir)
|
64 |
-
return torch.load(file, map_location=device)
|
65 |
-
|
66 |
-
|
67 |
-
def load_compression_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
|
68 |
-
return _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir)
|
69 |
-
|
70 |
-
|
71 |
-
def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
|
72 |
-
pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
|
73 |
-
if 'pretrained' in pkg:
|
74 |
-
return CompressionModel.get_pretrained(pkg['pretrained'], device=device)
|
75 |
-
cfg = OmegaConf.create(pkg['xp.cfg'])
|
76 |
-
cfg.device = str(device)
|
77 |
-
model = builders.get_compression_model(cfg)
|
78 |
-
model.load_state_dict(pkg['best_state'])
|
79 |
-
model.eval()
|
80 |
-
return model
|
81 |
-
|
82 |
-
|
83 |
-
def load_lm_model_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
|
84 |
-
return _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir)
|
85 |
-
|
86 |
-
|
87 |
-
def _delete_param(cfg: DictConfig, full_name: str):
|
88 |
-
parts = full_name.split('.')
|
89 |
-
for part in parts[:-1]:
|
90 |
-
if part in cfg:
|
91 |
-
cfg = cfg[part]
|
92 |
-
else:
|
93 |
-
return
|
94 |
-
OmegaConf.set_struct(cfg, False)
|
95 |
-
if parts[-1] in cfg:
|
96 |
-
del cfg[parts[-1]]
|
97 |
-
OmegaConf.set_struct(cfg, True)
|
98 |
-
|
99 |
-
|
100 |
-
def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
|
101 |
-
pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)
|
102 |
-
cfg = OmegaConf.create(pkg['xp.cfg'])
|
103 |
-
cfg.device = str(device)
|
104 |
-
if cfg.device == 'cpu':
|
105 |
-
cfg.dtype = 'float32'
|
106 |
-
else:
|
107 |
-
cfg.dtype = 'float16'
|
108 |
-
_delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')
|
109 |
-
_delete_param(cfg, 'conditioners.args.merge_text_conditions_p')
|
110 |
-
_delete_param(cfg, 'conditioners.args.drop_desc_p')
|
111 |
-
model = builders.get_lm_model(cfg)
|
112 |
-
model.load_state_dict(pkg['best_state'])
|
113 |
-
model.eval()
|
114 |
-
model.cfg = cfg
|
115 |
-
return model
|
116 |
-
|
117 |
-
|
118 |
-
def load_mbd_ckpt(file_or_url_or_id: tp.Union[Path, str], cache_dir: tp.Optional[str] = None):
|
119 |
-
return _get_state_dict(file_or_url_or_id, filename="all_in_one.pt", cache_dir=cache_dir)
|
120 |
-
|
121 |
-
|
122 |
-
def load_diffusion_models(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
|
123 |
-
pkg = load_mbd_ckpt(file_or_url_or_id, cache_dir=cache_dir)
|
124 |
-
models = []
|
125 |
-
processors = []
|
126 |
-
cfgs = []
|
127 |
-
sample_rate = pkg['sample_rate']
|
128 |
-
for i in range(pkg['n_bands']):
|
129 |
-
cfg = pkg[i]['cfg']
|
130 |
-
model = builders.get_diffusion_model(cfg)
|
131 |
-
model_dict = pkg[i]['model_state']
|
132 |
-
model.load_state_dict(model_dict)
|
133 |
-
model.to(device)
|
134 |
-
processor = builders.get_processor(cfg=cfg.processor, sample_rate=sample_rate)
|
135 |
-
processor_dict = pkg[i]['processor_state']
|
136 |
-
processor.load_state_dict(processor_dict)
|
137 |
-
processor.to(device)
|
138 |
-
models.append(model)
|
139 |
-
processors.append(processor)
|
140 |
-
cfgs.append(cfg)
|
141 |
-
return models, processors, cfgs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIDHD/audio-video-transcriber/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Audio Video Transcriber
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.14.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/fused_bias_act.cpp
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
#include <torch/extension.h>
|
2 |
-
|
3 |
-
|
4 |
-
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
5 |
-
int act, int grad, float alpha, float scale);
|
6 |
-
|
7 |
-
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
|
8 |
-
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
9 |
-
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
|
10 |
-
|
11 |
-
torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
|
12 |
-
int act, int grad, float alpha, float scale) {
|
13 |
-
CHECK_CUDA(input);
|
14 |
-
CHECK_CUDA(bias);
|
15 |
-
|
16 |
-
return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
|
17 |
-
}
|
18 |
-
|
19 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
20 |
-
m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/models/modules.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from torch.nn.utils.rnn import pack_padded_sequence
|
4 |
-
|
5 |
-
def init_weight(m):
|
6 |
-
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose1d):
|
7 |
-
nn.init.xavier_normal_(m.weight)
|
8 |
-
# m.bias.data.fill_(0.01)
|
9 |
-
if m.bias is not None:
|
10 |
-
nn.init.constant_(m.bias, 0)
|
11 |
-
|
12 |
-
|
13 |
-
class MovementConvEncoder(nn.Module):
|
14 |
-
def __init__(self, input_size, hidden_size, output_size):
|
15 |
-
super(MovementConvEncoder, self).__init__()
|
16 |
-
self.main = nn.Sequential(
|
17 |
-
nn.Conv1d(input_size, hidden_size, 4, 2, 1),
|
18 |
-
nn.Dropout(0.2, inplace=True),
|
19 |
-
nn.LeakyReLU(0.2, inplace=True),
|
20 |
-
nn.Conv1d(hidden_size, output_size, 4, 2, 1),
|
21 |
-
nn.Dropout(0.2, inplace=True),
|
22 |
-
nn.LeakyReLU(0.2, inplace=True),
|
23 |
-
)
|
24 |
-
self.out_net = nn.Linear(output_size, output_size)
|
25 |
-
self.main.apply(init_weight)
|
26 |
-
self.out_net.apply(init_weight)
|
27 |
-
|
28 |
-
def forward(self, inputs):
|
29 |
-
inputs = inputs.permute(0, 2, 1)
|
30 |
-
outputs = self.main(inputs).permute(0, 2, 1)
|
31 |
-
# print(outputs.shape)
|
32 |
-
return self.out_net(outputs)
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
class TextEncoderBiGRUCo(nn.Module):
|
37 |
-
def __init__(self, word_size, pos_size, hidden_size, output_size, device):
|
38 |
-
super(TextEncoderBiGRUCo, self).__init__()
|
39 |
-
self.device = device
|
40 |
-
|
41 |
-
self.pos_emb = nn.Linear(pos_size, word_size)
|
42 |
-
self.input_emb = nn.Linear(word_size, hidden_size)
|
43 |
-
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
|
44 |
-
self.output_net = nn.Sequential(
|
45 |
-
nn.Linear(hidden_size * 2, hidden_size),
|
46 |
-
nn.LayerNorm(hidden_size),
|
47 |
-
nn.LeakyReLU(0.2, inplace=True),
|
48 |
-
nn.Linear(hidden_size, output_size)
|
49 |
-
)
|
50 |
-
|
51 |
-
self.input_emb.apply(init_weight)
|
52 |
-
self.pos_emb.apply(init_weight)
|
53 |
-
self.output_net.apply(init_weight)
|
54 |
-
self.hidden_size = hidden_size
|
55 |
-
self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
|
56 |
-
|
57 |
-
# input(batch_size, seq_len, dim)
|
58 |
-
def forward(self, word_embs, pos_onehot, cap_lens):
|
59 |
-
num_samples = word_embs.shape[0]
|
60 |
-
|
61 |
-
pos_embs = self.pos_emb(pos_onehot)
|
62 |
-
inputs = word_embs + pos_embs
|
63 |
-
input_embs = self.input_emb(inputs)
|
64 |
-
hidden = self.hidden.repeat(1, num_samples, 1)
|
65 |
-
|
66 |
-
cap_lens = cap_lens.data.tolist()
|
67 |
-
emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
|
68 |
-
|
69 |
-
gru_seq, gru_last = self.gru(emb, hidden)
|
70 |
-
|
71 |
-
gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
|
72 |
-
|
73 |
-
return self.output_net(gru_last)
|
74 |
-
|
75 |
-
|
76 |
-
class MotionEncoderBiGRUCo(nn.Module):
|
77 |
-
def __init__(self, input_size, hidden_size, output_size, device):
|
78 |
-
super(MotionEncoderBiGRUCo, self).__init__()
|
79 |
-
self.device = device
|
80 |
-
|
81 |
-
self.input_emb = nn.Linear(input_size, hidden_size)
|
82 |
-
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
|
83 |
-
self.output_net = nn.Sequential(
|
84 |
-
nn.Linear(hidden_size*2, hidden_size),
|
85 |
-
nn.LayerNorm(hidden_size),
|
86 |
-
nn.LeakyReLU(0.2, inplace=True),
|
87 |
-
nn.Linear(hidden_size, output_size)
|
88 |
-
)
|
89 |
-
|
90 |
-
self.input_emb.apply(init_weight)
|
91 |
-
self.output_net.apply(init_weight)
|
92 |
-
self.hidden_size = hidden_size
|
93 |
-
self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
|
94 |
-
|
95 |
-
# input(batch_size, seq_len, dim)
|
96 |
-
def forward(self, inputs, m_lens):
|
97 |
-
num_samples = inputs.shape[0]
|
98 |
-
|
99 |
-
input_embs = self.input_emb(inputs)
|
100 |
-
hidden = self.hidden.repeat(1, num_samples, 1)
|
101 |
-
|
102 |
-
cap_lens = m_lens.data.tolist()
|
103 |
-
emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True, enforce_sorted=False)
|
104 |
-
|
105 |
-
gru_seq, gru_last = self.gru(emb, hidden)
|
106 |
-
|
107 |
-
gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
|
108 |
-
|
109 |
-
return self.output_net(gru_last)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
"""RAdam optimizer.
|
4 |
-
|
5 |
-
This code is drived from https://github.com/LiyuanLucasLiu/RAdam.
|
6 |
-
"""
|
7 |
-
|
8 |
-
import math
|
9 |
-
import torch
|
10 |
-
|
11 |
-
from torch.optim.optimizer import Optimizer
|
12 |
-
|
13 |
-
|
14 |
-
class RAdam(Optimizer):
|
15 |
-
"""Rectified Adam optimizer."""
|
16 |
-
|
17 |
-
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
|
18 |
-
"""Initilize RAdam optimizer."""
|
19 |
-
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
|
20 |
-
self.buffer = [[None, None, None] for ind in range(10)]
|
21 |
-
super(RAdam, self).__init__(params, defaults)
|
22 |
-
|
23 |
-
def __setstate__(self, state):
|
24 |
-
"""Set state."""
|
25 |
-
super(RAdam, self).__setstate__(state)
|
26 |
-
|
27 |
-
def step(self, closure=None):
|
28 |
-
"""Run one step."""
|
29 |
-
loss = None
|
30 |
-
if closure is not None:
|
31 |
-
loss = closure()
|
32 |
-
|
33 |
-
for group in self.param_groups:
|
34 |
-
|
35 |
-
for p in group['params']:
|
36 |
-
if p.grad is None:
|
37 |
-
continue
|
38 |
-
grad = p.grad.data.float()
|
39 |
-
if grad.is_sparse:
|
40 |
-
raise RuntimeError('RAdam does not support sparse gradients')
|
41 |
-
|
42 |
-
p_data_fp32 = p.data.float()
|
43 |
-
|
44 |
-
state = self.state[p]
|
45 |
-
|
46 |
-
if len(state) == 0:
|
47 |
-
state['step'] = 0
|
48 |
-
state['exp_avg'] = torch.zeros_like(p_data_fp32)
|
49 |
-
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
|
50 |
-
else:
|
51 |
-
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
|
52 |
-
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
|
53 |
-
|
54 |
-
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
55 |
-
beta1, beta2 = group['betas']
|
56 |
-
|
57 |
-
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
|
58 |
-
exp_avg.mul_(beta1).add_(1 - beta1, grad)
|
59 |
-
|
60 |
-
state['step'] += 1
|
61 |
-
buffered = self.buffer[int(state['step'] % 10)]
|
62 |
-
if state['step'] == buffered[0]:
|
63 |
-
N_sma, step_size = buffered[1], buffered[2]
|
64 |
-
else:
|
65 |
-
buffered[0] = state['step']
|
66 |
-
beta2_t = beta2 ** state['step']
|
67 |
-
N_sma_max = 2 / (1 - beta2) - 1
|
68 |
-
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
|
69 |
-
buffered[1] = N_sma
|
70 |
-
|
71 |
-
# more conservative since it's an approximated value
|
72 |
-
if N_sma >= 5:
|
73 |
-
step_size = math.sqrt(
|
74 |
-
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA
|
75 |
-
else:
|
76 |
-
step_size = 1.0 / (1 - beta1 ** state['step'])
|
77 |
-
buffered[2] = step_size
|
78 |
-
|
79 |
-
if group['weight_decay'] != 0:
|
80 |
-
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
|
81 |
-
|
82 |
-
# more conservative since it's an approximated value
|
83 |
-
if N_sma >= 5:
|
84 |
-
denom = exp_avg_sq.sqrt().add_(group['eps'])
|
85 |
-
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
|
86 |
-
else:
|
87 |
-
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
|
88 |
-
|
89 |
-
p.data.copy_(p_data_fp32)
|
90 |
-
|
91 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/linear_probe.py
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
from .model import MLPLayers
|
5 |
-
|
6 |
-
|
7 |
-
class LinearProbe(nn.Module):
|
8 |
-
def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None):
|
9 |
-
"""
|
10 |
-
Args:
|
11 |
-
model: nn.Module
|
12 |
-
mlp: bool, if True, then use the MLP layer as the linear probe module
|
13 |
-
freeze: bool, if Ture, then freeze all the CLAP model's layers when training the linear probe
|
14 |
-
in_ch: int, the output channel from CLAP model
|
15 |
-
out_ch: int, the output channel from linear probe (class_num)
|
16 |
-
act: torch.nn.functional, the activation function before the loss function
|
17 |
-
"""
|
18 |
-
super().__init__()
|
19 |
-
in_ch = 512
|
20 |
-
self.clap_model = model
|
21 |
-
self.clap_model.text_branch = None # to save memory
|
22 |
-
self.freeze = freeze
|
23 |
-
if mlp:
|
24 |
-
self.lp_layer = MLPLayers(units=[in_ch, in_ch * 2, out_ch])
|
25 |
-
else:
|
26 |
-
self.lp_layer = nn.Linear(in_ch, out_ch)
|
27 |
-
|
28 |
-
if self.freeze:
|
29 |
-
for param in self.clap_model.parameters():
|
30 |
-
param.requires_grad = False
|
31 |
-
|
32 |
-
if act == 'None':
|
33 |
-
self.act = None
|
34 |
-
elif act == 'relu':
|
35 |
-
self.act = nn.ReLU()
|
36 |
-
elif act == 'elu':
|
37 |
-
self.act = nn.ELU()
|
38 |
-
elif act == 'prelu':
|
39 |
-
self.act = nn.PReLU(num_parameters=in_ch)
|
40 |
-
elif act == 'softmax':
|
41 |
-
self.act = nn.Softmax(dim=-1)
|
42 |
-
elif act == 'sigmoid':
|
43 |
-
self.act = nn.Sigmoid()
|
44 |
-
|
45 |
-
def forward(self, x, mix_lambda=None, device=None):
|
46 |
-
"""
|
47 |
-
Args:
|
48 |
-
x: waveform, torch.tensor [batch, t_samples] / batch of mel_spec and longer list
|
49 |
-
mix_lambda: torch.tensor [batch], the mixup lambda
|
50 |
-
Returns:
|
51 |
-
class_prob: torch.tensor [batch, class_num]
|
52 |
-
|
53 |
-
"""
|
54 |
-
# batchnorm cancel grandient
|
55 |
-
if self.freeze:
|
56 |
-
self.clap_model.eval()
|
57 |
-
|
58 |
-
x = self.clap_model.audio_projection(
|
59 |
-
self.clap_model.audio_branch(x, mixup_lambda=mix_lambda, device=device)["embedding"])
|
60 |
-
out = self.lp_layer(x)
|
61 |
-
if self.act is not None:
|
62 |
-
out = self.act(out)
|
63 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ARTeLab/DTM_Estimation_SRandD/models/modelNetB.py
DELETED
@@ -1,307 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch import Tensor
|
5 |
-
|
6 |
-
__all__ = [
|
7 |
-
"ResidualDenseBlock", "ResidualResidualDenseBlock", "Generator",
|
8 |
-
"DownSamplingNetwork"
|
9 |
-
]
|
10 |
-
|
11 |
-
|
12 |
-
class ResidualDenseBlock(nn.Module):
|
13 |
-
"""Achieves densely connected convolutional layers.
|
14 |
-
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
channels (int): The number of channels in the input image.
|
18 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
19 |
-
"""
|
20 |
-
|
21 |
-
def __init__(self, channels: int, growths: int) -> None:
|
22 |
-
super(ResidualDenseBlock, self).__init__()
|
23 |
-
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
|
24 |
-
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
|
25 |
-
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
|
26 |
-
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
|
27 |
-
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
|
28 |
-
|
29 |
-
self.leaky_relu = nn.LeakyReLU(0.2, True)
|
30 |
-
self.identity = nn.Identity()
|
31 |
-
|
32 |
-
def forward(self, x: Tensor) -> Tensor:
|
33 |
-
identity = x
|
34 |
-
|
35 |
-
out1 = self.leaky_relu(self.conv1(x))
|
36 |
-
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
|
37 |
-
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
|
38 |
-
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
|
39 |
-
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
|
40 |
-
out = out5 * 0.2 + identity
|
41 |
-
|
42 |
-
return out
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
class ResidualDenseBlock(nn.Module):
|
47 |
-
"""Achieves densely connected convolutional layers.
|
48 |
-
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
channels (int): The number of channels in the input image.
|
52 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
53 |
-
"""
|
54 |
-
|
55 |
-
def __init__(self, channels: int, growths: int) -> None:
|
56 |
-
super(ResidualDenseBlock, self).__init__()
|
57 |
-
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
|
58 |
-
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
|
59 |
-
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
|
60 |
-
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
|
61 |
-
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
|
62 |
-
|
63 |
-
self.leaky_relu = nn.LeakyReLU(0.2, True)
|
64 |
-
self.identity = nn.Identity()
|
65 |
-
|
66 |
-
def forward(self, x: Tensor) -> Tensor:
|
67 |
-
identity = x
|
68 |
-
|
69 |
-
out1 = self.leaky_relu(self.conv1(x))
|
70 |
-
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
|
71 |
-
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
|
72 |
-
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
|
73 |
-
out5 = self.identity(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
|
74 |
-
out = out5 * 0.2 + identity
|
75 |
-
|
76 |
-
return out
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
class MiniResidualDenseBlock(nn.Module):
|
81 |
-
"""Achieves densely connected convolutional layers.
|
82 |
-
`Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993v5.pdf>` paper.
|
83 |
-
|
84 |
-
Args:
|
85 |
-
channels (int): The number of channels in the input image.
|
86 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
87 |
-
"""
|
88 |
-
|
89 |
-
def __init__(self, channels: int, growths: int) -> None:
|
90 |
-
super(MiniResidualDenseBlock, self).__init__()
|
91 |
-
self.conv1 = nn.Conv2d(channels + growths * 0, growths, (3, 3), (1, 1), (1, 1))
|
92 |
-
self.conv2 = nn.Conv2d(channels + growths * 1, growths, (3, 3), (1, 1), (1, 1))
|
93 |
-
self.conv3 = nn.Conv2d(channels + growths * 2, growths, (3, 3), (1, 1), (1, 1))
|
94 |
-
self.conv4 = nn.Conv2d(channels + growths * 3, growths, (3, 3), (1, 1), (1, 1))
|
95 |
-
self.conv5 = nn.Conv2d(channels + growths * 4, channels, (3, 3), (1, 1), (1, 1))
|
96 |
-
|
97 |
-
self.leaky_relu = nn.LeakyReLU(0.2, True)
|
98 |
-
|
99 |
-
def forward(self, x: Tensor) -> Tensor:
|
100 |
-
identity = x
|
101 |
-
|
102 |
-
out1 = self.leaky_relu(self.conv1(x))
|
103 |
-
out2 = self.leaky_relu(self.conv2(torch.cat([x, out1], 1)))
|
104 |
-
out3 = self.leaky_relu(self.conv3(torch.cat([x, out1, out2], 1)))
|
105 |
-
out4 = self.leaky_relu(self.conv4(torch.cat([x, out1, out2, out3], 1)))
|
106 |
-
out5 = self.leaky_relu(self.conv5(torch.cat([x, out1, out2, out3, out4], 1)))
|
107 |
-
out = out5 * 0.2 + identity
|
108 |
-
|
109 |
-
return out
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
class ResidualResidualDenseBlock(nn.Module):
|
114 |
-
"""Multi-layer residual dense convolution block.
|
115 |
-
|
116 |
-
Args:
|
117 |
-
channels (int): The number of channels in the input image.
|
118 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
119 |
-
"""
|
120 |
-
|
121 |
-
def __init__(self, channels: int, growths: int) -> None:
|
122 |
-
super(ResidualResidualDenseBlock, self).__init__()
|
123 |
-
self.rdb1 = ResidualDenseBlock(channels, growths)
|
124 |
-
self.rdb2 = ResidualDenseBlock(channels, growths)
|
125 |
-
self.rdb3 = ResidualDenseBlock(channels, growths)
|
126 |
-
|
127 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
128 |
-
identity = x
|
129 |
-
|
130 |
-
out = self.rdb1(x)
|
131 |
-
out = self.rdb2(out)
|
132 |
-
out = self.rdb3(out)
|
133 |
-
out = out * 0.2 + identity
|
134 |
-
|
135 |
-
return out
|
136 |
-
|
137 |
-
|
138 |
-
class MiniResidualResidualDenseBlock(nn.Module):
|
139 |
-
"""Multi-layer residual dense convolution block.
|
140 |
-
|
141 |
-
Args:
|
142 |
-
channels (int): The number of channels in the input image.
|
143 |
-
growths (int): The number of channels that increase in each layer of convolution.
|
144 |
-
"""
|
145 |
-
|
146 |
-
def __init__(self, channels: int, growths: int) -> None:
|
147 |
-
super(MiniResidualResidualDenseBlock, self).__init__()
|
148 |
-
self.M_rdb1 = MiniResidualDenseBlock(channels, growths)
|
149 |
-
self.M_rdb2 = MiniResidualDenseBlock(channels, growths)
|
150 |
-
self.M_rdb3 = MiniResidualDenseBlock(channels, growths)
|
151 |
-
|
152 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
153 |
-
identity = x
|
154 |
-
out = self.M_rdb1(x)
|
155 |
-
out = self.M_rdb2(out)
|
156 |
-
out = self.M_rdb3(out)
|
157 |
-
out = out * 0.2 + identity
|
158 |
-
return out
|
159 |
-
|
160 |
-
|
161 |
-
class Generator(nn.Module):
|
162 |
-
def __init__(self) -> None:
|
163 |
-
super(Generator, self).__init__()
|
164 |
-
|
165 |
-
#RLNet
|
166 |
-
self.RLNetconv_block1 = nn.Conv2d(1, 64, (3, 3), (1, 1), (1, 1))
|
167 |
-
RLNettrunk = []
|
168 |
-
for _ in range(4):
|
169 |
-
RLNettrunk += [ResidualResidualDenseBlock(64, 32)]
|
170 |
-
self.RLNettrunk = nn.Sequential(*RLNettrunk)
|
171 |
-
self.RLNetconv_block2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
|
172 |
-
self.RLNetconv_block3 = nn.Sequential(
|
173 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
174 |
-
nn.LeakyReLU(0.2, True)
|
175 |
-
)
|
176 |
-
self.RLNetconv_block4 = nn.Sequential(
|
177 |
-
nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
|
178 |
-
nn.Tanh()
|
179 |
-
)
|
180 |
-
|
181 |
-
#############################################################################
|
182 |
-
# Generator
|
183 |
-
self.conv_block1 = nn.Conv2d(1, 64, (3, 3), (1, 1), (1, 1))
|
184 |
-
trunk = []
|
185 |
-
for _ in range(16):
|
186 |
-
trunk += [ResidualResidualDenseBlock(64, 32)]
|
187 |
-
self.trunk = nn.Sequential(*trunk)
|
188 |
-
|
189 |
-
# After the feature extraction network, reconnect a layer of convolutional blocks.
|
190 |
-
self.conv_block2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
|
191 |
-
|
192 |
-
|
193 |
-
# Upsampling convolutional layer.
|
194 |
-
self.upsampling = nn.Sequential(
|
195 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
196 |
-
nn.LeakyReLU(0.2, True)
|
197 |
-
)
|
198 |
-
|
199 |
-
# Reconnect a layer of convolution block after upsampling.
|
200 |
-
self.conv_block3 = nn.Sequential(
|
201 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
202 |
-
nn.LeakyReLU(0.2, True)
|
203 |
-
)
|
204 |
-
|
205 |
-
self.conv_block4 = nn.Sequential(
|
206 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
207 |
-
#nn.Sigmoid()
|
208 |
-
)
|
209 |
-
|
210 |
-
self.conv_block0_branch0 = nn.Sequential(
|
211 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
212 |
-
nn.LeakyReLU(0.2, True),
|
213 |
-
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
|
214 |
-
nn.LeakyReLU(0.2, True),
|
215 |
-
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)),
|
216 |
-
nn.LeakyReLU(0.2, True),
|
217 |
-
nn.Conv2d(128, 64, (3, 3), (1, 1), (1, 1)),
|
218 |
-
nn.Tanh()
|
219 |
-
)
|
220 |
-
|
221 |
-
self.conv_block0_branch1 = nn.Sequential(
|
222 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
223 |
-
nn.LeakyReLU(0.2, True),
|
224 |
-
nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1)),
|
225 |
-
nn.LeakyReLU(0.2, True),
|
226 |
-
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1)),
|
227 |
-
nn.LeakyReLU(0.2, True),
|
228 |
-
nn.Conv2d(128, 64, (3, 3), (1, 1), (1, 1)),
|
229 |
-
nn.Tanh()
|
230 |
-
)
|
231 |
-
|
232 |
-
self.conv_block1_branch0 = nn.Sequential(
|
233 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
234 |
-
nn.LeakyReLU(0.2, True),
|
235 |
-
nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
|
236 |
-
#nn.LeakyReLU(0.2, True),
|
237 |
-
#nn.Conv2d(32, 1, (3, 3), (1, 1), (1, 1)),
|
238 |
-
nn.Sigmoid()
|
239 |
-
)
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
self.conv_block1_branch1 = nn.Sequential(
|
244 |
-
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
|
245 |
-
nn.LeakyReLU(0.2, True),
|
246 |
-
nn.Conv2d(64, 1, (3, 3), (1, 1), (1, 1)),
|
247 |
-
nn.Sigmoid())
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
def _forward_impl(self, x: Tensor) -> Tensor:
|
253 |
-
#RLNet
|
254 |
-
out1 = self.RLNetconv_block1(x)
|
255 |
-
out = self.RLNettrunk(out1)
|
256 |
-
out2 = self.RLNetconv_block2(out)
|
257 |
-
out = out1 + out2
|
258 |
-
out = self.RLNetconv_block3(out)
|
259 |
-
out = self.RLNetconv_block4(out)
|
260 |
-
rlNet_out = out + x
|
261 |
-
|
262 |
-
#Generator
|
263 |
-
out1 = self.conv_block1(rlNet_out)
|
264 |
-
out = self.trunk(out1)
|
265 |
-
out2 = self.conv_block2(out)
|
266 |
-
out = out1 + out2
|
267 |
-
out = self.upsampling(F.interpolate(out, scale_factor=2, mode="bicubic"))
|
268 |
-
out = self.upsampling(F.interpolate(out, scale_factor=2, mode="bicubic"))
|
269 |
-
out = self.conv_block3(out)
|
270 |
-
#
|
271 |
-
out = self.conv_block4(out)
|
272 |
-
|
273 |
-
#demResidual = out[:, 1:2, :, :]
|
274 |
-
#grayResidual = out[:, 0:1, :, :]
|
275 |
-
|
276 |
-
# out = self.trunkRGB(out_4)
|
277 |
-
#
|
278 |
-
# out_dem = out[:, 3:4, :, :] * 0.2 + demResidual # DEM images extracted
|
279 |
-
# out_rgb = out[:, 0:3, :, :] * 0.2 + rgbResidual # RGB images extracted
|
280 |
-
|
281 |
-
#ra0
|
282 |
-
#out_rgb= rgbResidual + self.conv_block0_branch0(rgbResidual)
|
283 |
-
|
284 |
-
out_dem = out + self.conv_block0_branch1(out) #out+ tanh()
|
285 |
-
out_gray = out + self.conv_block0_branch0(out) #out+ tanh()
|
286 |
-
|
287 |
-
out_gray = self.conv_block1_branch0(out_gray) #sigmoid()
|
288 |
-
out_dem = self.conv_block1_branch1(out_dem) #sigmoid()
|
289 |
-
|
290 |
-
return out_gray, out_dem, rlNet_out
|
291 |
-
|
292 |
-
|
293 |
-
def forward(self, x: Tensor) -> Tensor:
|
294 |
-
return self._forward_impl(x)
|
295 |
-
|
296 |
-
def _initialize_weights(self) -> None:
|
297 |
-
for m in self.modules():
|
298 |
-
if isinstance(m, nn.Conv2d):
|
299 |
-
nn.init.kaiming_normal_(m.weight)
|
300 |
-
if m.bias is not None:
|
301 |
-
nn.init.constant_(m.bias, 0)
|
302 |
-
m.weight.data *= 0.1
|
303 |
-
elif isinstance(m, nn.BatchNorm2d):
|
304 |
-
nn.init.constant_(m.weight, 1)
|
305 |
-
m.weight.data *= 0.1
|
306 |
-
|
307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/quantization/core_vq.py
DELETED
@@ -1,400 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import typing as tp
|
8 |
-
|
9 |
-
from einops import rearrange, repeat
|
10 |
-
import flashy
|
11 |
-
import torch
|
12 |
-
from torch import nn, einsum
|
13 |
-
import torch.nn.functional as F
|
14 |
-
|
15 |
-
|
16 |
-
def exists(val: tp.Optional[tp.Any]) -> bool:
|
17 |
-
return val is not None
|
18 |
-
|
19 |
-
|
20 |
-
def default(val: tp.Any, d: tp.Any) -> tp.Any:
|
21 |
-
return val if exists(val) else d
|
22 |
-
|
23 |
-
|
24 |
-
def l2norm(t):
|
25 |
-
return F.normalize(t, p=2, dim=-1)
|
26 |
-
|
27 |
-
|
28 |
-
def ema_inplace(moving_avg, new, decay: float):
|
29 |
-
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
|
30 |
-
|
31 |
-
|
32 |
-
def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
|
33 |
-
return (x + epsilon) / (x.sum() + n_categories * epsilon)
|
34 |
-
|
35 |
-
|
36 |
-
def uniform_init(*shape: int):
|
37 |
-
t = torch.empty(shape)
|
38 |
-
nn.init.kaiming_uniform_(t)
|
39 |
-
return t
|
40 |
-
|
41 |
-
|
42 |
-
def sample_vectors(samples, num: int):
|
43 |
-
num_samples, device = samples.shape[0], samples.device
|
44 |
-
|
45 |
-
if num_samples >= num:
|
46 |
-
indices = torch.randperm(num_samples, device=device)[:num]
|
47 |
-
else:
|
48 |
-
indices = torch.randint(0, num_samples, (num,), device=device)
|
49 |
-
|
50 |
-
return samples[indices]
|
51 |
-
|
52 |
-
|
53 |
-
def kmeans(samples, num_clusters: int, num_iters: int = 10):
|
54 |
-
dim, dtype = samples.shape[-1], samples.dtype
|
55 |
-
|
56 |
-
means = sample_vectors(samples, num_clusters)
|
57 |
-
|
58 |
-
for _ in range(num_iters):
|
59 |
-
diffs = rearrange(samples, "n d -> n () d") - rearrange(
|
60 |
-
means, "c d -> () c d"
|
61 |
-
)
|
62 |
-
dists = -(diffs ** 2).sum(dim=-1)
|
63 |
-
|
64 |
-
buckets = dists.max(dim=-1).indices
|
65 |
-
bins = torch.bincount(buckets, minlength=num_clusters)
|
66 |
-
zero_mask = bins == 0
|
67 |
-
bins_min_clamped = bins.masked_fill(zero_mask, 1)
|
68 |
-
|
69 |
-
new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
|
70 |
-
new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
|
71 |
-
new_means = new_means / bins_min_clamped[..., None]
|
72 |
-
|
73 |
-
means = torch.where(zero_mask[..., None], means, new_means)
|
74 |
-
|
75 |
-
return means, bins
|
76 |
-
|
77 |
-
|
78 |
-
def orthgonal_loss_fn(t):
|
79 |
-
# eq (2) from https://arxiv.org/abs/2112.00384
|
80 |
-
n = t.shape[0]
|
81 |
-
normed_codes = l2norm(t)
|
82 |
-
identity = torch.eye(n, device=t.device)
|
83 |
-
cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes)
|
84 |
-
return ((cosine_sim - identity) ** 2).sum() / (n ** 2)
|
85 |
-
|
86 |
-
|
87 |
-
class EuclideanCodebook(nn.Module):
|
88 |
-
"""Codebook with Euclidean distance.
|
89 |
-
|
90 |
-
Args:
|
91 |
-
dim (int): Dimension.
|
92 |
-
codebook_size (int): Codebook size.
|
93 |
-
kmeans_init (bool): Whether to use k-means to initialize the codebooks.
|
94 |
-
If set to true, run the k-means algorithm on the first training batch and use
|
95 |
-
the learned centroids as initialization.
|
96 |
-
kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
|
97 |
-
decay (float): Decay for exponential moving average over the codebooks.
|
98 |
-
epsilon (float): Epsilon value for numerical stability.
|
99 |
-
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
|
100 |
-
that have an exponential moving average cluster size less than the specified threshold with
|
101 |
-
randomly selected vector from the current batch.
|
102 |
-
"""
|
103 |
-
def __init__(
|
104 |
-
self,
|
105 |
-
dim: int,
|
106 |
-
codebook_size: int,
|
107 |
-
kmeans_init: int = False,
|
108 |
-
kmeans_iters: int = 10,
|
109 |
-
decay: float = 0.8,
|
110 |
-
epsilon: float = 1e-5,
|
111 |
-
threshold_ema_dead_code: int = 2,
|
112 |
-
):
|
113 |
-
super().__init__()
|
114 |
-
self.decay = decay
|
115 |
-
init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
|
116 |
-
embed = init_fn(codebook_size, dim)
|
117 |
-
|
118 |
-
self.codebook_size = codebook_size
|
119 |
-
|
120 |
-
self.kmeans_iters = kmeans_iters
|
121 |
-
self.epsilon = epsilon
|
122 |
-
self.threshold_ema_dead_code = threshold_ema_dead_code
|
123 |
-
|
124 |
-
self.register_buffer("inited", torch.Tensor([not kmeans_init]))
|
125 |
-
self.register_buffer("cluster_size", torch.zeros(codebook_size))
|
126 |
-
self.register_buffer("embed", embed)
|
127 |
-
self.register_buffer("embed_avg", embed.clone())
|
128 |
-
|
129 |
-
@torch.jit.ignore
|
130 |
-
def init_embed_(self, data):
|
131 |
-
if self.inited:
|
132 |
-
return
|
133 |
-
|
134 |
-
embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
|
135 |
-
self.embed.data.copy_(embed)
|
136 |
-
self.embed_avg.data.copy_(embed.clone())
|
137 |
-
self.cluster_size.data.copy_(cluster_size)
|
138 |
-
self.inited.data.copy_(torch.Tensor([True]))
|
139 |
-
# Make sure all buffers across workers are in sync after initialization
|
140 |
-
flashy.distrib.broadcast_tensors(self.buffers())
|
141 |
-
|
142 |
-
def replace_(self, samples, mask):
|
143 |
-
modified_codebook = torch.where(
|
144 |
-
mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
|
145 |
-
)
|
146 |
-
self.embed.data.copy_(modified_codebook)
|
147 |
-
|
148 |
-
def expire_codes_(self, batch_samples):
|
149 |
-
if self.threshold_ema_dead_code == 0:
|
150 |
-
return
|
151 |
-
|
152 |
-
expired_codes = self.cluster_size < self.threshold_ema_dead_code
|
153 |
-
if not torch.any(expired_codes):
|
154 |
-
return
|
155 |
-
|
156 |
-
batch_samples = rearrange(batch_samples, "... d -> (...) d")
|
157 |
-
self.replace_(batch_samples, mask=expired_codes)
|
158 |
-
flashy.distrib.broadcast_tensors(self.buffers())
|
159 |
-
|
160 |
-
def preprocess(self, x):
|
161 |
-
x = rearrange(x, "... d -> (...) d")
|
162 |
-
return x
|
163 |
-
|
164 |
-
def quantize(self, x):
|
165 |
-
embed = self.embed.t()
|
166 |
-
dist = -(
|
167 |
-
x.pow(2).sum(1, keepdim=True)
|
168 |
-
- 2 * x @ embed
|
169 |
-
+ embed.pow(2).sum(0, keepdim=True)
|
170 |
-
)
|
171 |
-
embed_ind = dist.max(dim=-1).indices
|
172 |
-
return embed_ind
|
173 |
-
|
174 |
-
def postprocess_emb(self, embed_ind, shape):
|
175 |
-
return embed_ind.view(*shape[:-1])
|
176 |
-
|
177 |
-
def dequantize(self, embed_ind):
|
178 |
-
quantize = F.embedding(embed_ind, self.embed)
|
179 |
-
return quantize
|
180 |
-
|
181 |
-
def encode(self, x):
|
182 |
-
shape = x.shape
|
183 |
-
# pre-process
|
184 |
-
x = self.preprocess(x)
|
185 |
-
# quantize
|
186 |
-
embed_ind = self.quantize(x)
|
187 |
-
# post-process
|
188 |
-
embed_ind = self.postprocess_emb(embed_ind, shape)
|
189 |
-
return embed_ind
|
190 |
-
|
191 |
-
def decode(self, embed_ind):
|
192 |
-
quantize = self.dequantize(embed_ind)
|
193 |
-
return quantize
|
194 |
-
|
195 |
-
def forward(self, x):
|
196 |
-
shape, dtype = x.shape, x.dtype
|
197 |
-
x = self.preprocess(x)
|
198 |
-
self.init_embed_(x)
|
199 |
-
|
200 |
-
embed_ind = self.quantize(x)
|
201 |
-
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
|
202 |
-
embed_ind = self.postprocess_emb(embed_ind, shape)
|
203 |
-
quantize = self.dequantize(embed_ind)
|
204 |
-
|
205 |
-
if self.training:
|
206 |
-
# We do the expiry of code at that point as buffers are in sync
|
207 |
-
# and all the workers will take the same decision.
|
208 |
-
self.expire_codes_(x)
|
209 |
-
ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
|
210 |
-
embed_sum = x.t() @ embed_onehot
|
211 |
-
ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
|
212 |
-
cluster_size = (
|
213 |
-
laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
|
214 |
-
* self.cluster_size.sum()
|
215 |
-
)
|
216 |
-
embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
|
217 |
-
self.embed.data.copy_(embed_normalized)
|
218 |
-
|
219 |
-
return quantize, embed_ind
|
220 |
-
|
221 |
-
|
222 |
-
class VectorQuantization(nn.Module):
|
223 |
-
"""Vector quantization implementation.
|
224 |
-
Currently supports only euclidean distance.
|
225 |
-
|
226 |
-
Args:
|
227 |
-
dim (int): Dimension
|
228 |
-
codebook_size (int): Codebook size
|
229 |
-
codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
|
230 |
-
decay (float): Decay for exponential moving average over the codebooks.
|
231 |
-
epsilon (float): Epsilon value for numerical stability.
|
232 |
-
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
|
233 |
-
kmeans_iters (int): Number of iterations used for kmeans initialization.
|
234 |
-
threshold_ema_dead_code (int):
|
235 |
-
channels_last (bool): Channels are the last dimension in the input tensors.
|
236 |
-
commitment_weight (float): Weight for commitment loss.
|
237 |
-
orthogonal_reg_weight (float): Orthogonal regularization weights.
|
238 |
-
orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
|
239 |
-
orthogonal_reg_max_codes (optional int): Maximum number of codes to consider
|
240 |
-
for orthogonal regulariation.
|
241 |
-
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
|
242 |
-
that have an exponential moving average cluster size less than the specified threshold with
|
243 |
-
randomly selected vector from the current batch.
|
244 |
-
"""
|
245 |
-
def __init__(
|
246 |
-
self,
|
247 |
-
dim: int,
|
248 |
-
codebook_size: int,
|
249 |
-
codebook_dim: tp.Optional[int] = None,
|
250 |
-
decay: float = 0.8,
|
251 |
-
epsilon: float = 1e-5,
|
252 |
-
kmeans_init: bool = False,
|
253 |
-
kmeans_iters: int = 10,
|
254 |
-
threshold_ema_dead_code: int = 2,
|
255 |
-
channels_last: bool = False,
|
256 |
-
commitment_weight: float = 1.,
|
257 |
-
orthogonal_reg_weight: float = 0.0,
|
258 |
-
orthogonal_reg_active_codes_only: bool = False,
|
259 |
-
orthogonal_reg_max_codes: tp.Optional[int] = None,
|
260 |
-
):
|
261 |
-
super().__init__()
|
262 |
-
_codebook_dim: int = default(codebook_dim, dim)
|
263 |
-
|
264 |
-
requires_projection = _codebook_dim != dim
|
265 |
-
self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity())
|
266 |
-
self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity())
|
267 |
-
|
268 |
-
self.epsilon = epsilon
|
269 |
-
self.commitment_weight = commitment_weight
|
270 |
-
|
271 |
-
self.orthogonal_reg_weight = orthogonal_reg_weight
|
272 |
-
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
|
273 |
-
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
|
274 |
-
|
275 |
-
self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size,
|
276 |
-
kmeans_init=kmeans_init, kmeans_iters=kmeans_iters,
|
277 |
-
decay=decay, epsilon=epsilon,
|
278 |
-
threshold_ema_dead_code=threshold_ema_dead_code)
|
279 |
-
self.codebook_size = codebook_size
|
280 |
-
|
281 |
-
self.channels_last = channels_last
|
282 |
-
|
283 |
-
@property
|
284 |
-
def codebook(self):
|
285 |
-
return self._codebook.embed
|
286 |
-
|
287 |
-
@property
|
288 |
-
def inited(self):
|
289 |
-
return self._codebook.inited
|
290 |
-
|
291 |
-
def _preprocess(self, x):
|
292 |
-
if not self.channels_last:
|
293 |
-
x = rearrange(x, "b d n -> b n d")
|
294 |
-
return x
|
295 |
-
|
296 |
-
def _postprocess(self, quantize):
|
297 |
-
if not self.channels_last:
|
298 |
-
quantize = rearrange(quantize, "b n d -> b d n")
|
299 |
-
return quantize
|
300 |
-
|
301 |
-
def encode(self, x):
|
302 |
-
x = self._preprocess(x)
|
303 |
-
x = self.project_in(x)
|
304 |
-
embed_in = self._codebook.encode(x)
|
305 |
-
return embed_in
|
306 |
-
|
307 |
-
def decode(self, embed_ind):
|
308 |
-
quantize = self._codebook.decode(embed_ind)
|
309 |
-
quantize = self.project_out(quantize)
|
310 |
-
quantize = self._postprocess(quantize)
|
311 |
-
return quantize
|
312 |
-
|
313 |
-
def forward(self, x):
|
314 |
-
device = x.device
|
315 |
-
x = self._preprocess(x)
|
316 |
-
|
317 |
-
x = self.project_in(x)
|
318 |
-
quantize, embed_ind = self._codebook(x)
|
319 |
-
|
320 |
-
if self.training:
|
321 |
-
quantize = x + (quantize - x).detach()
|
322 |
-
|
323 |
-
loss = torch.tensor([0.0], device=device, requires_grad=self.training)
|
324 |
-
|
325 |
-
if self.training:
|
326 |
-
if self.commitment_weight > 0:
|
327 |
-
commit_loss = F.mse_loss(quantize.detach(), x)
|
328 |
-
loss = loss + commit_loss * self.commitment_weight
|
329 |
-
|
330 |
-
if self.orthogonal_reg_weight > 0:
|
331 |
-
codebook = self.codebook
|
332 |
-
|
333 |
-
if self.orthogonal_reg_active_codes_only:
|
334 |
-
# only calculate orthogonal loss for the activated codes for this batch
|
335 |
-
unique_code_ids = torch.unique(embed_ind)
|
336 |
-
codebook = codebook[unique_code_ids]
|
337 |
-
|
338 |
-
num_codes = codebook.shape[0]
|
339 |
-
if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
|
340 |
-
rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes]
|
341 |
-
codebook = codebook[rand_ids]
|
342 |
-
|
343 |
-
orthogonal_reg_loss = orthgonal_loss_fn(codebook)
|
344 |
-
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
|
345 |
-
|
346 |
-
quantize = self.project_out(quantize)
|
347 |
-
quantize = self._postprocess(quantize)
|
348 |
-
|
349 |
-
return quantize, embed_ind, loss
|
350 |
-
|
351 |
-
|
352 |
-
class ResidualVectorQuantization(nn.Module):
|
353 |
-
"""Residual vector quantization implementation.
|
354 |
-
|
355 |
-
Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
|
356 |
-
"""
|
357 |
-
def __init__(self, *, num_quantizers, **kwargs):
|
358 |
-
super().__init__()
|
359 |
-
self.layers = nn.ModuleList(
|
360 |
-
[VectorQuantization(**kwargs) for _ in range(num_quantizers)]
|
361 |
-
)
|
362 |
-
|
363 |
-
def forward(self, x, n_q: tp.Optional[int] = None):
|
364 |
-
quantized_out = 0.0
|
365 |
-
residual = x
|
366 |
-
|
367 |
-
all_losses = []
|
368 |
-
all_indices = []
|
369 |
-
|
370 |
-
n_q = n_q or len(self.layers)
|
371 |
-
|
372 |
-
for i, layer in enumerate(self.layers[:n_q]):
|
373 |
-
quantized, indices, loss = layer(residual)
|
374 |
-
residual = residual - quantized
|
375 |
-
quantized_out = quantized_out + quantized
|
376 |
-
all_indices.append(indices)
|
377 |
-
all_losses.append(loss)
|
378 |
-
|
379 |
-
out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
|
380 |
-
return quantized_out, out_indices, out_losses
|
381 |
-
|
382 |
-
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor:
|
383 |
-
residual = x
|
384 |
-
all_indices = []
|
385 |
-
n_q = n_q or len(self.layers)
|
386 |
-
for layer in self.layers[:n_q]:
|
387 |
-
indices = layer.encode(residual)
|
388 |
-
quantized = layer.decode(indices)
|
389 |
-
residual = residual - quantized
|
390 |
-
all_indices.append(indices)
|
391 |
-
out_indices = torch.stack(all_indices)
|
392 |
-
return out_indices
|
393 |
-
|
394 |
-
def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
|
395 |
-
quantized_out = torch.tensor(0.0, device=q_indices.device)
|
396 |
-
for i, indices in enumerate(q_indices):
|
397 |
-
layer = self.layers[i]
|
398 |
-
quantized = layer.decode(indices)
|
399 |
-
quantized_out = quantized_out + quantized
|
400 |
-
return quantized_out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/InTouching.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import InTouching from '../../../plugins/intouching'
|
2 |
-
export default InTouching;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenSizers.js
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
var GetChildrenSizers = function(out) {
|
2 |
-
if (out === undefined) {
|
3 |
-
out = [];
|
4 |
-
}
|
5 |
-
if (this.child && this.child.isRexSizer) {
|
6 |
-
out.push(this.child);
|
7 |
-
}
|
8 |
-
return out;
|
9 |
-
}
|
10 |
-
export default GetChildrenSizers;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/upfirdn2d.cpp
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
//
|
3 |
-
// NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
// and proprietary rights in and to this software, related documentation
|
5 |
-
// and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
// distribution of this software and related documentation without an express
|
7 |
-
// license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
#include <torch/extension.h>
|
10 |
-
#include <ATen/cuda/CUDAContext.h>
|
11 |
-
#include <c10/cuda/CUDAGuard.h>
|
12 |
-
#include "upfirdn2d.h"
|
13 |
-
|
14 |
-
//------------------------------------------------------------------------
|
15 |
-
|
16 |
-
static torch::Tensor upfirdn2d(torch::Tensor x, torch::Tensor f, int upx, int upy, int downx, int downy, int padx0, int padx1, int pady0, int pady1, bool flip, float gain)
|
17 |
-
{
|
18 |
-
// Validate arguments.
|
19 |
-
TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
|
20 |
-
TORCH_CHECK(f.device() == x.device(), "f must reside on the same device as x");
|
21 |
-
TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32");
|
22 |
-
TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
|
23 |
-
TORCH_CHECK(f.numel() <= INT_MAX, "f is too large");
|
24 |
-
TORCH_CHECK(x.numel() > 0, "x has zero size");
|
25 |
-
TORCH_CHECK(f.numel() > 0, "f has zero size");
|
26 |
-
TORCH_CHECK(x.dim() == 4, "x must be rank 4");
|
27 |
-
TORCH_CHECK(f.dim() == 2, "f must be rank 2");
|
28 |
-
TORCH_CHECK((x.size(0)-1)*x.stride(0) + (x.size(1)-1)*x.stride(1) + (x.size(2)-1)*x.stride(2) + (x.size(3)-1)*x.stride(3) <= INT_MAX, "x memory footprint is too large");
|
29 |
-
TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1");
|
30 |
-
TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1");
|
31 |
-
TORCH_CHECK(downx >= 1 && downy >= 1, "downsampling factor must be at least 1");
|
32 |
-
|
33 |
-
// Create output tensor.
|
34 |
-
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
|
35 |
-
int outW = ((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx;
|
36 |
-
int outH = ((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy;
|
37 |
-
TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1");
|
38 |
-
torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW}, x.options(), x.suggest_memory_format());
|
39 |
-
TORCH_CHECK(y.numel() <= INT_MAX, "output is too large");
|
40 |
-
TORCH_CHECK((y.size(0)-1)*y.stride(0) + (y.size(1)-1)*y.stride(1) + (y.size(2)-1)*y.stride(2) + (y.size(3)-1)*y.stride(3) <= INT_MAX, "output memory footprint is too large");
|
41 |
-
|
42 |
-
// Initialize CUDA kernel parameters.
|
43 |
-
upfirdn2d_kernel_params p;
|
44 |
-
p.x = x.data_ptr();
|
45 |
-
p.f = f.data_ptr<float>();
|
46 |
-
p.y = y.data_ptr();
|
47 |
-
p.up = make_int2(upx, upy);
|
48 |
-
p.down = make_int2(downx, downy);
|
49 |
-
p.pad0 = make_int2(padx0, pady0);
|
50 |
-
p.flip = (flip) ? 1 : 0;
|
51 |
-
p.gain = gain;
|
52 |
-
p.inSize = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
|
53 |
-
p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1), (int)x.stride(0));
|
54 |
-
p.filterSize = make_int2((int)f.size(1), (int)f.size(0));
|
55 |
-
p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0));
|
56 |
-
p.outSize = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
|
57 |
-
p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1), (int)y.stride(0));
|
58 |
-
p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z;
|
59 |
-
p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1;
|
60 |
-
|
61 |
-
// Choose CUDA kernel.
|
62 |
-
upfirdn2d_kernel_spec spec;
|
63 |
-
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
|
64 |
-
{
|
65 |
-
spec = choose_upfirdn2d_kernel<scalar_t>(p);
|
66 |
-
});
|
67 |
-
|
68 |
-
// Set looping options.
|
69 |
-
p.loopMajor = (p.sizeMajor - 1) / 16384 + 1;
|
70 |
-
p.loopMinor = spec.loopMinor;
|
71 |
-
p.loopX = spec.loopX;
|
72 |
-
p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1;
|
73 |
-
p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1;
|
74 |
-
|
75 |
-
// Compute grid size.
|
76 |
-
dim3 blockSize, gridSize;
|
77 |
-
if (spec.tileOutW < 0) // large
|
78 |
-
{
|
79 |
-
blockSize = dim3(4, 32, 1);
|
80 |
-
gridSize = dim3(
|
81 |
-
((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor,
|
82 |
-
(p.outSize.x - 1) / (blockSize.y * p.loopX) + 1,
|
83 |
-
p.launchMajor);
|
84 |
-
}
|
85 |
-
else // small
|
86 |
-
{
|
87 |
-
blockSize = dim3(256, 1, 1);
|
88 |
-
gridSize = dim3(
|
89 |
-
((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor,
|
90 |
-
(p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1,
|
91 |
-
p.launchMajor);
|
92 |
-
}
|
93 |
-
|
94 |
-
// Launch CUDA kernel.
|
95 |
-
void* args[] = {&p};
|
96 |
-
AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
|
97 |
-
return y;
|
98 |
-
}
|
99 |
-
|
100 |
-
//------------------------------------------------------------------------
|
101 |
-
|
102 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
103 |
-
{
|
104 |
-
m.def("upfirdn2d", &upfirdn2d);
|
105 |
-
}
|
106 |
-
|
107 |
-
//------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_panorama.py
DELETED
@@ -1,725 +0,0 @@
|
|
1 |
-
# Copyright 2023 MultiDiffusion Authors and The HuggingFace Team. All rights reserved."
|
2 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
-
# you may not use this file except in compliance with the License.
|
4 |
-
# You may obtain a copy of the License at
|
5 |
-
#
|
6 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
#
|
8 |
-
# Unless required by applicable law or agreed to in writing, software
|
9 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
-
# See the License for the specific language governing permissions and
|
12 |
-
# limitations under the License.
|
13 |
-
|
14 |
-
import copy
|
15 |
-
import inspect
|
16 |
-
import warnings
|
17 |
-
from typing import Any, Callable, Dict, List, Optional, Union
|
18 |
-
|
19 |
-
import torch
|
20 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
21 |
-
|
22 |
-
from ...image_processor import VaeImageProcessor
|
23 |
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
24 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
25 |
-
from ...schedulers import DDIMScheduler
|
26 |
-
from ...utils import logging, randn_tensor, replace_example_docstring
|
27 |
-
from ..pipeline_utils import DiffusionPipeline
|
28 |
-
from . import StableDiffusionPipelineOutput
|
29 |
-
from .safety_checker import StableDiffusionSafetyChecker
|
30 |
-
|
31 |
-
|
32 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
33 |
-
|
34 |
-
EXAMPLE_DOC_STRING = """
|
35 |
-
Examples:
|
36 |
-
```py
|
37 |
-
>>> import torch
|
38 |
-
>>> from diffusers import StableDiffusionPanoramaPipeline, DDIMScheduler
|
39 |
-
|
40 |
-
>>> model_ckpt = "stabilityai/stable-diffusion-2-base"
|
41 |
-
>>> scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
|
42 |
-
>>> pipe = StableDiffusionPanoramaPipeline.from_pretrained(
|
43 |
-
... model_ckpt, scheduler=scheduler, torch_dtype=torch.float16
|
44 |
-
... )
|
45 |
-
|
46 |
-
>>> pipe = pipe.to("cuda")
|
47 |
-
|
48 |
-
>>> prompt = "a photo of the dolomites"
|
49 |
-
>>> image = pipe(prompt).images[0]
|
50 |
-
```
|
51 |
-
"""
|
52 |
-
|
53 |
-
|
54 |
-
class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
55 |
-
r"""
|
56 |
-
Pipeline for text-to-image generation using MultiDiffusion.
|
57 |
-
|
58 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
59 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
60 |
-
|
61 |
-
Args:
|
62 |
-
vae ([`AutoencoderKL`]):
|
63 |
-
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
64 |
-
text_encoder ([`~transformers.CLIPTextModel`]):
|
65 |
-
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
66 |
-
tokenizer ([`~transformers.CLIPTokenizer`]):
|
67 |
-
A `CLIPTokenizer` to tokenize text.
|
68 |
-
unet ([`UNet2DConditionModel`]):
|
69 |
-
A `UNet2DConditionModel` to denoise the encoded image latents.
|
70 |
-
scheduler ([`SchedulerMixin`]):
|
71 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
72 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
73 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
74 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
75 |
-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
76 |
-
about a model's potential harms.
|
77 |
-
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
78 |
-
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
79 |
-
"""
|
80 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
81 |
-
|
82 |
-
def __init__(
|
83 |
-
self,
|
84 |
-
vae: AutoencoderKL,
|
85 |
-
text_encoder: CLIPTextModel,
|
86 |
-
tokenizer: CLIPTokenizer,
|
87 |
-
unet: UNet2DConditionModel,
|
88 |
-
scheduler: DDIMScheduler,
|
89 |
-
safety_checker: StableDiffusionSafetyChecker,
|
90 |
-
feature_extractor: CLIPImageProcessor,
|
91 |
-
requires_safety_checker: bool = True,
|
92 |
-
):
|
93 |
-
super().__init__()
|
94 |
-
|
95 |
-
if safety_checker is None and requires_safety_checker:
|
96 |
-
logger.warning(
|
97 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
98 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
99 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
100 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
101 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
102 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
103 |
-
)
|
104 |
-
|
105 |
-
if safety_checker is not None and feature_extractor is None:
|
106 |
-
raise ValueError(
|
107 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
108 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
109 |
-
)
|
110 |
-
|
111 |
-
self.register_modules(
|
112 |
-
vae=vae,
|
113 |
-
text_encoder=text_encoder,
|
114 |
-
tokenizer=tokenizer,
|
115 |
-
unet=unet,
|
116 |
-
scheduler=scheduler,
|
117 |
-
safety_checker=safety_checker,
|
118 |
-
feature_extractor=feature_extractor,
|
119 |
-
)
|
120 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
121 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
122 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
123 |
-
|
124 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
125 |
-
def enable_vae_slicing(self):
|
126 |
-
r"""
|
127 |
-
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
128 |
-
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
129 |
-
"""
|
130 |
-
self.vae.enable_slicing()
|
131 |
-
|
132 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
133 |
-
def disable_vae_slicing(self):
|
134 |
-
r"""
|
135 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
136 |
-
computing decoding in one step.
|
137 |
-
"""
|
138 |
-
self.vae.disable_slicing()
|
139 |
-
|
140 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
141 |
-
def _encode_prompt(
|
142 |
-
self,
|
143 |
-
prompt,
|
144 |
-
device,
|
145 |
-
num_images_per_prompt,
|
146 |
-
do_classifier_free_guidance,
|
147 |
-
negative_prompt=None,
|
148 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
149 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
150 |
-
lora_scale: Optional[float] = None,
|
151 |
-
):
|
152 |
-
r"""
|
153 |
-
Encodes the prompt into text encoder hidden states.
|
154 |
-
|
155 |
-
Args:
|
156 |
-
prompt (`str` or `List[str]`, *optional*):
|
157 |
-
prompt to be encoded
|
158 |
-
device: (`torch.device`):
|
159 |
-
torch device
|
160 |
-
num_images_per_prompt (`int`):
|
161 |
-
number of images that should be generated per prompt
|
162 |
-
do_classifier_free_guidance (`bool`):
|
163 |
-
whether to use classifier free guidance or not
|
164 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
165 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
166 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
167 |
-
less than `1`).
|
168 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
169 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
170 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
171 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
172 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
173 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
174 |
-
argument.
|
175 |
-
lora_scale (`float`, *optional*):
|
176 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
177 |
-
"""
|
178 |
-
# set lora scale so that monkey patched LoRA
|
179 |
-
# function of text encoder can correctly access it
|
180 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
181 |
-
self._lora_scale = lora_scale
|
182 |
-
|
183 |
-
if prompt is not None and isinstance(prompt, str):
|
184 |
-
batch_size = 1
|
185 |
-
elif prompt is not None and isinstance(prompt, list):
|
186 |
-
batch_size = len(prompt)
|
187 |
-
else:
|
188 |
-
batch_size = prompt_embeds.shape[0]
|
189 |
-
|
190 |
-
if prompt_embeds is None:
|
191 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
192 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
193 |
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
194 |
-
|
195 |
-
text_inputs = self.tokenizer(
|
196 |
-
prompt,
|
197 |
-
padding="max_length",
|
198 |
-
max_length=self.tokenizer.model_max_length,
|
199 |
-
truncation=True,
|
200 |
-
return_tensors="pt",
|
201 |
-
)
|
202 |
-
text_input_ids = text_inputs.input_ids
|
203 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
204 |
-
|
205 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
206 |
-
text_input_ids, untruncated_ids
|
207 |
-
):
|
208 |
-
removed_text = self.tokenizer.batch_decode(
|
209 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
210 |
-
)
|
211 |
-
logger.warning(
|
212 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
213 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
214 |
-
)
|
215 |
-
|
216 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
217 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
218 |
-
else:
|
219 |
-
attention_mask = None
|
220 |
-
|
221 |
-
prompt_embeds = self.text_encoder(
|
222 |
-
text_input_ids.to(device),
|
223 |
-
attention_mask=attention_mask,
|
224 |
-
)
|
225 |
-
prompt_embeds = prompt_embeds[0]
|
226 |
-
|
227 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
228 |
-
|
229 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
230 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
231 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
232 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
233 |
-
|
234 |
-
# get unconditional embeddings for classifier free guidance
|
235 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
236 |
-
uncond_tokens: List[str]
|
237 |
-
if negative_prompt is None:
|
238 |
-
uncond_tokens = [""] * batch_size
|
239 |
-
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
240 |
-
raise TypeError(
|
241 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
242 |
-
f" {type(prompt)}."
|
243 |
-
)
|
244 |
-
elif isinstance(negative_prompt, str):
|
245 |
-
uncond_tokens = [negative_prompt]
|
246 |
-
elif batch_size != len(negative_prompt):
|
247 |
-
raise ValueError(
|
248 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
249 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
250 |
-
" the batch size of `prompt`."
|
251 |
-
)
|
252 |
-
else:
|
253 |
-
uncond_tokens = negative_prompt
|
254 |
-
|
255 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
256 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
257 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
258 |
-
|
259 |
-
max_length = prompt_embeds.shape[1]
|
260 |
-
uncond_input = self.tokenizer(
|
261 |
-
uncond_tokens,
|
262 |
-
padding="max_length",
|
263 |
-
max_length=max_length,
|
264 |
-
truncation=True,
|
265 |
-
return_tensors="pt",
|
266 |
-
)
|
267 |
-
|
268 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
269 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
270 |
-
else:
|
271 |
-
attention_mask = None
|
272 |
-
|
273 |
-
negative_prompt_embeds = self.text_encoder(
|
274 |
-
uncond_input.input_ids.to(device),
|
275 |
-
attention_mask=attention_mask,
|
276 |
-
)
|
277 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
278 |
-
|
279 |
-
if do_classifier_free_guidance:
|
280 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
281 |
-
seq_len = negative_prompt_embeds.shape[1]
|
282 |
-
|
283 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
284 |
-
|
285 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
286 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
287 |
-
|
288 |
-
# For classifier free guidance, we need to do two forward passes.
|
289 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
290 |
-
# to avoid doing two forward passes
|
291 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
292 |
-
|
293 |
-
return prompt_embeds
|
294 |
-
|
295 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
296 |
-
def run_safety_checker(self, image, device, dtype):
|
297 |
-
if self.safety_checker is None:
|
298 |
-
has_nsfw_concept = None
|
299 |
-
else:
|
300 |
-
if torch.is_tensor(image):
|
301 |
-
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
302 |
-
else:
|
303 |
-
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
304 |
-
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
305 |
-
image, has_nsfw_concept = self.safety_checker(
|
306 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
307 |
-
)
|
308 |
-
return image, has_nsfw_concept
|
309 |
-
|
310 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
311 |
-
def decode_latents(self, latents):
|
312 |
-
warnings.warn(
|
313 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
314 |
-
" use VaeImageProcessor instead",
|
315 |
-
FutureWarning,
|
316 |
-
)
|
317 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
318 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
319 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
320 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
321 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
322 |
-
return image
|
323 |
-
|
324 |
-
def decode_latents_with_padding(self, latents, padding=8):
|
325 |
-
# Add padding to latents for circular inference
|
326 |
-
# padding is the number of latents to add on each side
|
327 |
-
# it would slightly increase the memory usage, but remove the boundary artifacts
|
328 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
329 |
-
latents_left = latents[..., :padding]
|
330 |
-
latents_right = latents[..., -padding:]
|
331 |
-
latents = torch.cat((latents_right, latents, latents_left), axis=-1)
|
332 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
333 |
-
padding_pix = self.vae_scale_factor * padding
|
334 |
-
image = image[..., padding_pix:-padding_pix]
|
335 |
-
return image
|
336 |
-
|
337 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
338 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
339 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
340 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
341 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
342 |
-
# and should be between [0, 1]
|
343 |
-
|
344 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
345 |
-
extra_step_kwargs = {}
|
346 |
-
if accepts_eta:
|
347 |
-
extra_step_kwargs["eta"] = eta
|
348 |
-
|
349 |
-
# check if the scheduler accepts generator
|
350 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
351 |
-
if accepts_generator:
|
352 |
-
extra_step_kwargs["generator"] = generator
|
353 |
-
return extra_step_kwargs
|
354 |
-
|
355 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
356 |
-
def check_inputs(
|
357 |
-
self,
|
358 |
-
prompt,
|
359 |
-
height,
|
360 |
-
width,
|
361 |
-
callback_steps,
|
362 |
-
negative_prompt=None,
|
363 |
-
prompt_embeds=None,
|
364 |
-
negative_prompt_embeds=None,
|
365 |
-
):
|
366 |
-
if height % 8 != 0 or width % 8 != 0:
|
367 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
368 |
-
|
369 |
-
if (callback_steps is None) or (
|
370 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
371 |
-
):
|
372 |
-
raise ValueError(
|
373 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
374 |
-
f" {type(callback_steps)}."
|
375 |
-
)
|
376 |
-
|
377 |
-
if prompt is not None and prompt_embeds is not None:
|
378 |
-
raise ValueError(
|
379 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
380 |
-
" only forward one of the two."
|
381 |
-
)
|
382 |
-
elif prompt is None and prompt_embeds is None:
|
383 |
-
raise ValueError(
|
384 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
385 |
-
)
|
386 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
387 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
388 |
-
|
389 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
390 |
-
raise ValueError(
|
391 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
392 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
393 |
-
)
|
394 |
-
|
395 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
396 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
397 |
-
raise ValueError(
|
398 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
399 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
400 |
-
f" {negative_prompt_embeds.shape}."
|
401 |
-
)
|
402 |
-
|
403 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
404 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
405 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
406 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
407 |
-
raise ValueError(
|
408 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
409 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
410 |
-
)
|
411 |
-
|
412 |
-
if latents is None:
|
413 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
414 |
-
else:
|
415 |
-
latents = latents.to(device)
|
416 |
-
|
417 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
418 |
-
latents = latents * self.scheduler.init_noise_sigma
|
419 |
-
return latents
|
420 |
-
|
421 |
-
def get_views(self, panorama_height, panorama_width, window_size=64, stride=8, circular_padding=False):
|
422 |
-
# Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113)
|
423 |
-
# if panorama's height/width < window_size, num_blocks of height/width should return 1
|
424 |
-
panorama_height /= 8
|
425 |
-
panorama_width /= 8
|
426 |
-
num_blocks_height = (panorama_height - window_size) // stride + 1 if panorama_height > window_size else 1
|
427 |
-
if circular_padding:
|
428 |
-
num_blocks_width = panorama_width // stride if panorama_width > window_size else 1
|
429 |
-
else:
|
430 |
-
num_blocks_width = (panorama_width - window_size) // stride + 1 if panorama_width > window_size else 1
|
431 |
-
total_num_blocks = int(num_blocks_height * num_blocks_width)
|
432 |
-
views = []
|
433 |
-
for i in range(total_num_blocks):
|
434 |
-
h_start = int((i // num_blocks_width) * stride)
|
435 |
-
h_end = h_start + window_size
|
436 |
-
w_start = int((i % num_blocks_width) * stride)
|
437 |
-
w_end = w_start + window_size
|
438 |
-
views.append((h_start, h_end, w_start, w_end))
|
439 |
-
return views
|
440 |
-
|
441 |
-
@torch.no_grad()
|
442 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
443 |
-
def __call__(
|
444 |
-
self,
|
445 |
-
prompt: Union[str, List[str]] = None,
|
446 |
-
height: Optional[int] = 512,
|
447 |
-
width: Optional[int] = 2048,
|
448 |
-
num_inference_steps: int = 50,
|
449 |
-
guidance_scale: float = 7.5,
|
450 |
-
view_batch_size: int = 1,
|
451 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
452 |
-
num_images_per_prompt: Optional[int] = 1,
|
453 |
-
eta: float = 0.0,
|
454 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
455 |
-
latents: Optional[torch.FloatTensor] = None,
|
456 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
457 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
458 |
-
output_type: Optional[str] = "pil",
|
459 |
-
return_dict: bool = True,
|
460 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
461 |
-
callback_steps: Optional[int] = 1,
|
462 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
463 |
-
circular_padding: bool = False,
|
464 |
-
):
|
465 |
-
r"""
|
466 |
-
The call function to the pipeline for generation.
|
467 |
-
|
468 |
-
Args:
|
469 |
-
prompt (`str` or `List[str]`, *optional*):
|
470 |
-
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
471 |
-
height (`int`, *optional*, defaults to 512):
|
472 |
-
The height in pixels of the generated image.
|
473 |
-
width (`int`, *optional*, defaults to 2048):
|
474 |
-
The width in pixels of the generated image. The width is kept high because the pipeline is supposed
|
475 |
-
generate panorama-like images.
|
476 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
477 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
478 |
-
expense of slower inference.
|
479 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
480 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
481 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
482 |
-
view_batch_size (`int`, *optional*, defaults to 1):
|
483 |
-
The batch size to denoise split views. For some GPUs with high performance, higher view batch size can
|
484 |
-
speedup the generation and increase the VRAM usage.
|
485 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
486 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
487 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
488 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
489 |
-
The number of images to generate per prompt.
|
490 |
-
eta (`float`, *optional*, defaults to 0.0):
|
491 |
-
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
492 |
-
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
493 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
494 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
495 |
-
generation deterministic.
|
496 |
-
latents (`torch.FloatTensor`, *optional*):
|
497 |
-
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
498 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
499 |
-
tensor is generated by sampling using the supplied random `generator`.
|
500 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
501 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
502 |
-
provided, text embeddings are generated from the `prompt` input argument.
|
503 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
504 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
505 |
-
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
506 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
507 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
508 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
509 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
510 |
-
plain tuple.
|
511 |
-
callback (`Callable`, *optional*):
|
512 |
-
A function that calls every `callback_steps` steps during inference. The function is called with the
|
513 |
-
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
514 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
515 |
-
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
516 |
-
every step.
|
517 |
-
cross_attention_kwargs (`dict`, *optional*):
|
518 |
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
519 |
-
`self.processor` in
|
520 |
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
521 |
-
circular_padding (`bool`, *optional*, defaults to `False`):
|
522 |
-
If set to `True`, circular padding is applied to ensure there are no stitching artifacts. Circular
|
523 |
-
padding allows the model to seamlessly generate a transition from the rightmost part of the image to
|
524 |
-
the leftmost part, maintaining consistency in a 360-degree sense.
|
525 |
-
|
526 |
-
Examples:
|
527 |
-
|
528 |
-
Returns:
|
529 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
530 |
-
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
531 |
-
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
532 |
-
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
533 |
-
"not-safe-for-work" (nsfw) content.
|
534 |
-
"""
|
535 |
-
# 0. Default height and width to unet
|
536 |
-
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
537 |
-
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
538 |
-
|
539 |
-
# 1. Check inputs. Raise error if not correct
|
540 |
-
self.check_inputs(
|
541 |
-
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
542 |
-
)
|
543 |
-
|
544 |
-
# 2. Define call parameters
|
545 |
-
if prompt is not None and isinstance(prompt, str):
|
546 |
-
batch_size = 1
|
547 |
-
elif prompt is not None and isinstance(prompt, list):
|
548 |
-
batch_size = len(prompt)
|
549 |
-
else:
|
550 |
-
batch_size = prompt_embeds.shape[0]
|
551 |
-
|
552 |
-
device = self._execution_device
|
553 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
554 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
555 |
-
# corresponds to doing no classifier free guidance.
|
556 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
557 |
-
|
558 |
-
# 3. Encode input prompt
|
559 |
-
text_encoder_lora_scale = (
|
560 |
-
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
561 |
-
)
|
562 |
-
prompt_embeds = self._encode_prompt(
|
563 |
-
prompt,
|
564 |
-
device,
|
565 |
-
num_images_per_prompt,
|
566 |
-
do_classifier_free_guidance,
|
567 |
-
negative_prompt,
|
568 |
-
prompt_embeds=prompt_embeds,
|
569 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
570 |
-
lora_scale=text_encoder_lora_scale,
|
571 |
-
)
|
572 |
-
|
573 |
-
# 4. Prepare timesteps
|
574 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
575 |
-
timesteps = self.scheduler.timesteps
|
576 |
-
|
577 |
-
# 5. Prepare latent variables
|
578 |
-
num_channels_latents = self.unet.config.in_channels
|
579 |
-
latents = self.prepare_latents(
|
580 |
-
batch_size * num_images_per_prompt,
|
581 |
-
num_channels_latents,
|
582 |
-
height,
|
583 |
-
width,
|
584 |
-
prompt_embeds.dtype,
|
585 |
-
device,
|
586 |
-
generator,
|
587 |
-
latents,
|
588 |
-
)
|
589 |
-
|
590 |
-
# 6. Define panorama grid and initialize views for synthesis.
|
591 |
-
# prepare batch grid
|
592 |
-
views = self.get_views(height, width, circular_padding=circular_padding)
|
593 |
-
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
594 |
-
views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)] * len(views_batch)
|
595 |
-
count = torch.zeros_like(latents)
|
596 |
-
value = torch.zeros_like(latents)
|
597 |
-
|
598 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
599 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
600 |
-
|
601 |
-
# 8. Denoising loop
|
602 |
-
# Each denoising step also includes refinement of the latents with respect to the
|
603 |
-
# views.
|
604 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
605 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
606 |
-
for i, t in enumerate(timesteps):
|
607 |
-
count.zero_()
|
608 |
-
value.zero_()
|
609 |
-
|
610 |
-
# generate views
|
611 |
-
# Here, we iterate through different spatial crops of the latents and denoise them. These
|
612 |
-
# denoised (latent) crops are then averaged to produce the final latent
|
613 |
-
# for the current timestep via MultiDiffusion. Please see Sec. 4.1 in the
|
614 |
-
# MultiDiffusion paper for more details: https://arxiv.org/abs/2302.08113
|
615 |
-
# Batch views denoise
|
616 |
-
for j, batch_view in enumerate(views_batch):
|
617 |
-
vb_size = len(batch_view)
|
618 |
-
# get the latents corresponding to the current view coordinates
|
619 |
-
if circular_padding:
|
620 |
-
latents_for_view = []
|
621 |
-
for h_start, h_end, w_start, w_end in batch_view:
|
622 |
-
if w_end > latents.shape[3]:
|
623 |
-
# Add circular horizontal padding
|
624 |
-
latent_view = torch.cat(
|
625 |
-
(
|
626 |
-
latents[:, :, h_start:h_end, w_start:],
|
627 |
-
latents[:, :, h_start:h_end, : w_end - latents.shape[3]],
|
628 |
-
),
|
629 |
-
axis=-1,
|
630 |
-
)
|
631 |
-
else:
|
632 |
-
latent_view = latents[:, :, h_start:h_end, w_start:w_end]
|
633 |
-
latents_for_view.append(latent_view)
|
634 |
-
latents_for_view = torch.cat(latents_for_view)
|
635 |
-
else:
|
636 |
-
latents_for_view = torch.cat(
|
637 |
-
[
|
638 |
-
latents[:, :, h_start:h_end, w_start:w_end]
|
639 |
-
for h_start, h_end, w_start, w_end in batch_view
|
640 |
-
]
|
641 |
-
)
|
642 |
-
|
643 |
-
# rematch block's scheduler status
|
644 |
-
self.scheduler.__dict__.update(views_scheduler_status[j])
|
645 |
-
|
646 |
-
# expand the latents if we are doing classifier free guidance
|
647 |
-
latent_model_input = (
|
648 |
-
latents_for_view.repeat_interleave(2, dim=0)
|
649 |
-
if do_classifier_free_guidance
|
650 |
-
else latents_for_view
|
651 |
-
)
|
652 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
653 |
-
|
654 |
-
# repeat prompt_embeds for batch
|
655 |
-
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
656 |
-
|
657 |
-
# predict the noise residual
|
658 |
-
noise_pred = self.unet(
|
659 |
-
latent_model_input,
|
660 |
-
t,
|
661 |
-
encoder_hidden_states=prompt_embeds_input,
|
662 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
663 |
-
).sample
|
664 |
-
|
665 |
-
# perform guidance
|
666 |
-
if do_classifier_free_guidance:
|
667 |
-
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
668 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
669 |
-
|
670 |
-
# compute the previous noisy sample x_t -> x_t-1
|
671 |
-
latents_denoised_batch = self.scheduler.step(
|
672 |
-
noise_pred, t, latents_for_view, **extra_step_kwargs
|
673 |
-
).prev_sample
|
674 |
-
|
675 |
-
# save views scheduler status after sample
|
676 |
-
views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__)
|
677 |
-
|
678 |
-
# extract value from batch
|
679 |
-
for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
|
680 |
-
latents_denoised_batch.chunk(vb_size), batch_view
|
681 |
-
):
|
682 |
-
if circular_padding and w_end > latents.shape[3]:
|
683 |
-
# Case for circular padding
|
684 |
-
value[:, :, h_start:h_end, w_start:] += latents_view_denoised[
|
685 |
-
:, :, h_start:h_end, : latents.shape[3] - w_start
|
686 |
-
]
|
687 |
-
value[:, :, h_start:h_end, : w_end - latents.shape[3]] += latents_view_denoised[
|
688 |
-
:, :, h_start:h_end, latents.shape[3] - w_start :
|
689 |
-
]
|
690 |
-
count[:, :, h_start:h_end, w_start:] += 1
|
691 |
-
count[:, :, h_start:h_end, : w_end - latents.shape[3]] += 1
|
692 |
-
else:
|
693 |
-
value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
|
694 |
-
count[:, :, h_start:h_end, w_start:w_end] += 1
|
695 |
-
|
696 |
-
# take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113
|
697 |
-
latents = torch.where(count > 0, value / count, value)
|
698 |
-
|
699 |
-
# call the callback, if provided
|
700 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
701 |
-
progress_bar.update()
|
702 |
-
if callback is not None and i % callback_steps == 0:
|
703 |
-
callback(i, t, latents)
|
704 |
-
|
705 |
-
if not output_type == "latent":
|
706 |
-
if circular_padding:
|
707 |
-
image = self.decode_latents_with_padding(latents)
|
708 |
-
else:
|
709 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
710 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
711 |
-
else:
|
712 |
-
image = latents
|
713 |
-
has_nsfw_concept = None
|
714 |
-
|
715 |
-
if has_nsfw_concept is None:
|
716 |
-
do_denormalize = [True] * image.shape[0]
|
717 |
-
else:
|
718 |
-
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
719 |
-
|
720 |
-
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
721 |
-
|
722 |
-
if not return_dict:
|
723 |
-
return (image, has_nsfw_concept)
|
724 |
-
|
725 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
DELETED
@@ -1,159 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from .builder import IOU_CALCULATORS
|
4 |
-
|
5 |
-
|
6 |
-
@IOU_CALCULATORS.register_module()
|
7 |
-
class BboxOverlaps2D(object):
|
8 |
-
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
|
9 |
-
|
10 |
-
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
|
11 |
-
"""Calculate IoU between 2D bboxes.
|
12 |
-
|
13 |
-
Args:
|
14 |
-
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
|
15 |
-
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
|
16 |
-
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
|
17 |
-
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
|
18 |
-
empty. If ``is_aligned `` is ``True``, then m and n must be
|
19 |
-
equal.
|
20 |
-
mode (str): "iou" (intersection over union), "iof" (intersection
|
21 |
-
over foreground), or "giou" (generalized intersection over
|
22 |
-
union).
|
23 |
-
is_aligned (bool, optional): If True, then m and n must be equal.
|
24 |
-
Default False.
|
25 |
-
|
26 |
-
Returns:
|
27 |
-
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
|
28 |
-
"""
|
29 |
-
assert bboxes1.size(-1) in [0, 4, 5]
|
30 |
-
assert bboxes2.size(-1) in [0, 4, 5]
|
31 |
-
if bboxes2.size(-1) == 5:
|
32 |
-
bboxes2 = bboxes2[..., :4]
|
33 |
-
if bboxes1.size(-1) == 5:
|
34 |
-
bboxes1 = bboxes1[..., :4]
|
35 |
-
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
|
36 |
-
|
37 |
-
def __repr__(self):
|
38 |
-
"""str: a string describing the module"""
|
39 |
-
repr_str = self.__class__.__name__ + '()'
|
40 |
-
return repr_str
|
41 |
-
|
42 |
-
|
43 |
-
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
|
44 |
-
"""Calculate overlap between two set of bboxes.
|
45 |
-
|
46 |
-
If ``is_aligned `` is ``False``, then calculate the overlaps between each
|
47 |
-
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
|
48 |
-
pair of bboxes1 and bboxes2.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
|
52 |
-
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
|
53 |
-
B indicates the batch dim, in shape (B1, B2, ..., Bn).
|
54 |
-
If ``is_aligned `` is ``True``, then m and n must be equal.
|
55 |
-
mode (str): "iou" (intersection over union), "iof" (intersection over
|
56 |
-
foreground) or "giou" (generalized intersection over union).
|
57 |
-
Default "iou".
|
58 |
-
is_aligned (bool, optional): If True, then m and n must be equal.
|
59 |
-
Default False.
|
60 |
-
eps (float, optional): A value added to the denominator for numerical
|
61 |
-
stability. Default 1e-6.
|
62 |
-
|
63 |
-
Returns:
|
64 |
-
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
|
65 |
-
|
66 |
-
Example:
|
67 |
-
>>> bboxes1 = torch.FloatTensor([
|
68 |
-
>>> [0, 0, 10, 10],
|
69 |
-
>>> [10, 10, 20, 20],
|
70 |
-
>>> [32, 32, 38, 42],
|
71 |
-
>>> ])
|
72 |
-
>>> bboxes2 = torch.FloatTensor([
|
73 |
-
>>> [0, 0, 10, 20],
|
74 |
-
>>> [0, 10, 10, 19],
|
75 |
-
>>> [10, 10, 20, 20],
|
76 |
-
>>> ])
|
77 |
-
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
|
78 |
-
>>> assert overlaps.shape == (3, 3)
|
79 |
-
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
|
80 |
-
>>> assert overlaps.shape == (3, )
|
81 |
-
|
82 |
-
Example:
|
83 |
-
>>> empty = torch.empty(0, 4)
|
84 |
-
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
|
85 |
-
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
|
86 |
-
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
|
87 |
-
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
|
88 |
-
"""
|
89 |
-
|
90 |
-
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
|
91 |
-
# Either the boxes are empty or the length of boxes' last dimension is 4
|
92 |
-
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
|
93 |
-
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
|
94 |
-
|
95 |
-
# Batch dim must be the same
|
96 |
-
# Batch dim: (B1, B2, ... Bn)
|
97 |
-
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
|
98 |
-
batch_shape = bboxes1.shape[:-2]
|
99 |
-
|
100 |
-
rows = bboxes1.size(-2)
|
101 |
-
cols = bboxes2.size(-2)
|
102 |
-
if is_aligned:
|
103 |
-
assert rows == cols
|
104 |
-
|
105 |
-
if rows * cols == 0:
|
106 |
-
if is_aligned:
|
107 |
-
return bboxes1.new(batch_shape + (rows, ))
|
108 |
-
else:
|
109 |
-
return bboxes1.new(batch_shape + (rows, cols))
|
110 |
-
|
111 |
-
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
|
112 |
-
bboxes1[..., 3] - bboxes1[..., 1])
|
113 |
-
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
|
114 |
-
bboxes2[..., 3] - bboxes2[..., 1])
|
115 |
-
|
116 |
-
if is_aligned:
|
117 |
-
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
|
118 |
-
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
|
119 |
-
|
120 |
-
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
|
121 |
-
overlap = wh[..., 0] * wh[..., 1]
|
122 |
-
|
123 |
-
if mode in ['iou', 'giou']:
|
124 |
-
union = area1 + area2 - overlap
|
125 |
-
else:
|
126 |
-
union = area1
|
127 |
-
if mode == 'giou':
|
128 |
-
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
|
129 |
-
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
|
130 |
-
else:
|
131 |
-
lt = torch.max(bboxes1[..., :, None, :2],
|
132 |
-
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
|
133 |
-
rb = torch.min(bboxes1[..., :, None, 2:],
|
134 |
-
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
|
135 |
-
|
136 |
-
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
|
137 |
-
overlap = wh[..., 0] * wh[..., 1]
|
138 |
-
|
139 |
-
if mode in ['iou', 'giou']:
|
140 |
-
union = area1[..., None] + area2[..., None, :] - overlap
|
141 |
-
else:
|
142 |
-
union = area1[..., None]
|
143 |
-
if mode == 'giou':
|
144 |
-
enclosed_lt = torch.min(bboxes1[..., :, None, :2],
|
145 |
-
bboxes2[..., None, :, :2])
|
146 |
-
enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
|
147 |
-
bboxes2[..., None, :, 2:])
|
148 |
-
|
149 |
-
eps = union.new_tensor([eps])
|
150 |
-
union = torch.max(union, eps)
|
151 |
-
ious = overlap / union
|
152 |
-
if mode in ['iou', 'iof']:
|
153 |
-
return ious
|
154 |
-
# calculate gious
|
155 |
-
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
|
156 |
-
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
|
157 |
-
enclose_area = torch.max(enclose_area, eps)
|
158 |
-
gious = ious - (enclose_area - union) / enclose_area
|
159 |
-
return gious
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/instaboost.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
from ..builder import PIPELINES
|
4 |
-
|
5 |
-
|
6 |
-
@PIPELINES.register_module()
|
7 |
-
class InstaBoost(object):
|
8 |
-
r"""Data augmentation method in `InstaBoost: Boosting Instance
|
9 |
-
Segmentation Via Probability Map Guided Copy-Pasting
|
10 |
-
<https://arxiv.org/abs/1908.07801>`_.
|
11 |
-
|
12 |
-
Refer to https://github.com/GothicAi/Instaboost for implementation details.
|
13 |
-
"""
|
14 |
-
|
15 |
-
def __init__(self,
|
16 |
-
action_candidate=('normal', 'horizontal', 'skip'),
|
17 |
-
action_prob=(1, 0, 0),
|
18 |
-
scale=(0.8, 1.2),
|
19 |
-
dx=15,
|
20 |
-
dy=15,
|
21 |
-
theta=(-1, 1),
|
22 |
-
color_prob=0.5,
|
23 |
-
hflag=False,
|
24 |
-
aug_ratio=0.5):
|
25 |
-
try:
|
26 |
-
import instaboostfast as instaboost
|
27 |
-
except ImportError:
|
28 |
-
raise ImportError(
|
29 |
-
'Please run "pip install instaboostfast" '
|
30 |
-
'to install instaboostfast first for instaboost augmentation.')
|
31 |
-
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
|
32 |
-
scale, dx, dy, theta,
|
33 |
-
color_prob, hflag)
|
34 |
-
self.aug_ratio = aug_ratio
|
35 |
-
|
36 |
-
def _load_anns(self, results):
|
37 |
-
labels = results['ann_info']['labels']
|
38 |
-
masks = results['ann_info']['masks']
|
39 |
-
bboxes = results['ann_info']['bboxes']
|
40 |
-
n = len(labels)
|
41 |
-
|
42 |
-
anns = []
|
43 |
-
for i in range(n):
|
44 |
-
label = labels[i]
|
45 |
-
bbox = bboxes[i]
|
46 |
-
mask = masks[i]
|
47 |
-
x1, y1, x2, y2 = bbox
|
48 |
-
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
|
49 |
-
bbox = [x1, y1, x2 - x1, y2 - y1]
|
50 |
-
anns.append({
|
51 |
-
'category_id': label,
|
52 |
-
'segmentation': mask,
|
53 |
-
'bbox': bbox
|
54 |
-
})
|
55 |
-
|
56 |
-
return anns
|
57 |
-
|
58 |
-
def _parse_anns(self, results, anns, img):
|
59 |
-
gt_bboxes = []
|
60 |
-
gt_labels = []
|
61 |
-
gt_masks_ann = []
|
62 |
-
for ann in anns:
|
63 |
-
x1, y1, w, h = ann['bbox']
|
64 |
-
# TODO: more essential bug need to be fixed in instaboost
|
65 |
-
if w <= 0 or h <= 0:
|
66 |
-
continue
|
67 |
-
bbox = [x1, y1, x1 + w, y1 + h]
|
68 |
-
gt_bboxes.append(bbox)
|
69 |
-
gt_labels.append(ann['category_id'])
|
70 |
-
gt_masks_ann.append(ann['segmentation'])
|
71 |
-
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
|
72 |
-
gt_labels = np.array(gt_labels, dtype=np.int64)
|
73 |
-
results['ann_info']['labels'] = gt_labels
|
74 |
-
results['ann_info']['bboxes'] = gt_bboxes
|
75 |
-
results['ann_info']['masks'] = gt_masks_ann
|
76 |
-
results['img'] = img
|
77 |
-
return results
|
78 |
-
|
79 |
-
def __call__(self, results):
|
80 |
-
img = results['img']
|
81 |
-
orig_type = img.dtype
|
82 |
-
anns = self._load_anns(results)
|
83 |
-
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
|
84 |
-
try:
|
85 |
-
import instaboostfast as instaboost
|
86 |
-
except ImportError:
|
87 |
-
raise ImportError('Please run "pip install instaboostfast" '
|
88 |
-
'to install instaboostfast first.')
|
89 |
-
anns, img = instaboost.get_new_data(
|
90 |
-
anns, img.astype(np.uint8), self.cfg, background=None)
|
91 |
-
|
92 |
-
results = self._parse_anns(results, anns, img.astype(orig_type))
|
93 |
-
return results
|
94 |
-
|
95 |
-
def __repr__(self):
|
96 |
-
repr_str = self.__class__.__name__
|
97 |
-
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
|
98 |
-
return repr_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_80k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './gcnet_r50-d8_512x512_80k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/colorspace.py
DELETED
@@ -1,306 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
def imconvert(img, src, dst):
|
7 |
-
"""Convert an image from the src colorspace to dst colorspace.
|
8 |
-
|
9 |
-
Args:
|
10 |
-
img (ndarray): The input image.
|
11 |
-
src (str): The source colorspace, e.g., 'rgb', 'hsv'.
|
12 |
-
dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.
|
13 |
-
|
14 |
-
Returns:
|
15 |
-
ndarray: The converted image.
|
16 |
-
"""
|
17 |
-
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
|
18 |
-
out_img = cv2.cvtColor(img, code)
|
19 |
-
return out_img
|
20 |
-
|
21 |
-
|
22 |
-
def bgr2gray(img, keepdim=False):
|
23 |
-
"""Convert a BGR image to grayscale image.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
img (ndarray): The input image.
|
27 |
-
keepdim (bool): If False (by default), then return the grayscale image
|
28 |
-
with 2 dims, otherwise 3 dims.
|
29 |
-
|
30 |
-
Returns:
|
31 |
-
ndarray: The converted grayscale image.
|
32 |
-
"""
|
33 |
-
out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
34 |
-
if keepdim:
|
35 |
-
out_img = out_img[..., None]
|
36 |
-
return out_img
|
37 |
-
|
38 |
-
|
39 |
-
def rgb2gray(img, keepdim=False):
|
40 |
-
"""Convert a RGB image to grayscale image.
|
41 |
-
|
42 |
-
Args:
|
43 |
-
img (ndarray): The input image.
|
44 |
-
keepdim (bool): If False (by default), then return the grayscale image
|
45 |
-
with 2 dims, otherwise 3 dims.
|
46 |
-
|
47 |
-
Returns:
|
48 |
-
ndarray: The converted grayscale image.
|
49 |
-
"""
|
50 |
-
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
51 |
-
if keepdim:
|
52 |
-
out_img = out_img[..., None]
|
53 |
-
return out_img
|
54 |
-
|
55 |
-
|
56 |
-
def gray2bgr(img):
|
57 |
-
"""Convert a grayscale image to BGR image.
|
58 |
-
|
59 |
-
Args:
|
60 |
-
img (ndarray): The input image.
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
ndarray: The converted BGR image.
|
64 |
-
"""
|
65 |
-
img = img[..., None] if img.ndim == 2 else img
|
66 |
-
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
67 |
-
return out_img
|
68 |
-
|
69 |
-
|
70 |
-
def gray2rgb(img):
|
71 |
-
"""Convert a grayscale image to RGB image.
|
72 |
-
|
73 |
-
Args:
|
74 |
-
img (ndarray): The input image.
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
ndarray: The converted RGB image.
|
78 |
-
"""
|
79 |
-
img = img[..., None] if img.ndim == 2 else img
|
80 |
-
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
81 |
-
return out_img
|
82 |
-
|
83 |
-
|
84 |
-
def _convert_input_type_range(img):
|
85 |
-
"""Convert the type and range of the input image.
|
86 |
-
|
87 |
-
It converts the input image to np.float32 type and range of [0, 1].
|
88 |
-
It is mainly used for pre-processing the input image in colorspace
|
89 |
-
conversion functions such as rgb2ycbcr and ycbcr2rgb.
|
90 |
-
|
91 |
-
Args:
|
92 |
-
img (ndarray): The input image. It accepts:
|
93 |
-
1. np.uint8 type with range [0, 255];
|
94 |
-
2. np.float32 type with range [0, 1].
|
95 |
-
|
96 |
-
Returns:
|
97 |
-
(ndarray): The converted image with type of np.float32 and range of
|
98 |
-
[0, 1].
|
99 |
-
"""
|
100 |
-
img_type = img.dtype
|
101 |
-
img = img.astype(np.float32)
|
102 |
-
if img_type == np.float32:
|
103 |
-
pass
|
104 |
-
elif img_type == np.uint8:
|
105 |
-
img /= 255.
|
106 |
-
else:
|
107 |
-
raise TypeError('The img type should be np.float32 or np.uint8, '
|
108 |
-
f'but got {img_type}')
|
109 |
-
return img
|
110 |
-
|
111 |
-
|
112 |
-
def _convert_output_type_range(img, dst_type):
|
113 |
-
"""Convert the type and range of the image according to dst_type.
|
114 |
-
|
115 |
-
It converts the image to desired type and range. If `dst_type` is np.uint8,
|
116 |
-
images will be converted to np.uint8 type with range [0, 255]. If
|
117 |
-
`dst_type` is np.float32, it converts the image to np.float32 type with
|
118 |
-
range [0, 1].
|
119 |
-
It is mainly used for post-processing images in colorspace conversion
|
120 |
-
functions such as rgb2ycbcr and ycbcr2rgb.
|
121 |
-
|
122 |
-
Args:
|
123 |
-
img (ndarray): The image to be converted with np.float32 type and
|
124 |
-
range [0, 255].
|
125 |
-
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
|
126 |
-
converts the image to np.uint8 type with range [0, 255]. If
|
127 |
-
dst_type is np.float32, it converts the image to np.float32 type
|
128 |
-
with range [0, 1].
|
129 |
-
|
130 |
-
Returns:
|
131 |
-
(ndarray): The converted image with desired type and range.
|
132 |
-
"""
|
133 |
-
if dst_type not in (np.uint8, np.float32):
|
134 |
-
raise TypeError('The dst_type should be np.float32 or np.uint8, '
|
135 |
-
f'but got {dst_type}')
|
136 |
-
if dst_type == np.uint8:
|
137 |
-
img = img.round()
|
138 |
-
else:
|
139 |
-
img /= 255.
|
140 |
-
return img.astype(dst_type)
|
141 |
-
|
142 |
-
|
143 |
-
def rgb2ycbcr(img, y_only=False):
|
144 |
-
"""Convert a RGB image to YCbCr image.
|
145 |
-
|
146 |
-
This function produces the same results as Matlab's `rgb2ycbcr` function.
|
147 |
-
It implements the ITU-R BT.601 conversion for standard-definition
|
148 |
-
television. See more details in
|
149 |
-
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
150 |
-
|
151 |
-
It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
|
152 |
-
In OpenCV, it implements a JPEG conversion. See more details in
|
153 |
-
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
154 |
-
|
155 |
-
Args:
|
156 |
-
img (ndarray): The input image. It accepts:
|
157 |
-
1. np.uint8 type with range [0, 255];
|
158 |
-
2. np.float32 type with range [0, 1].
|
159 |
-
y_only (bool): Whether to only return Y channel. Default: False.
|
160 |
-
|
161 |
-
Returns:
|
162 |
-
ndarray: The converted YCbCr image. The output image has the same type
|
163 |
-
and range as input image.
|
164 |
-
"""
|
165 |
-
img_type = img.dtype
|
166 |
-
img = _convert_input_type_range(img)
|
167 |
-
if y_only:
|
168 |
-
out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
|
169 |
-
else:
|
170 |
-
out_img = np.matmul(
|
171 |
-
img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
|
172 |
-
[24.966, 112.0, -18.214]]) + [16, 128, 128]
|
173 |
-
out_img = _convert_output_type_range(out_img, img_type)
|
174 |
-
return out_img
|
175 |
-
|
176 |
-
|
177 |
-
def bgr2ycbcr(img, y_only=False):
|
178 |
-
"""Convert a BGR image to YCbCr image.
|
179 |
-
|
180 |
-
The bgr version of rgb2ycbcr.
|
181 |
-
It implements the ITU-R BT.601 conversion for standard-definition
|
182 |
-
television. See more details in
|
183 |
-
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
184 |
-
|
185 |
-
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
|
186 |
-
In OpenCV, it implements a JPEG conversion. See more details in
|
187 |
-
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
188 |
-
|
189 |
-
Args:
|
190 |
-
img (ndarray): The input image. It accepts:
|
191 |
-
1. np.uint8 type with range [0, 255];
|
192 |
-
2. np.float32 type with range [0, 1].
|
193 |
-
y_only (bool): Whether to only return Y channel. Default: False.
|
194 |
-
|
195 |
-
Returns:
|
196 |
-
ndarray: The converted YCbCr image. The output image has the same type
|
197 |
-
and range as input image.
|
198 |
-
"""
|
199 |
-
img_type = img.dtype
|
200 |
-
img = _convert_input_type_range(img)
|
201 |
-
if y_only:
|
202 |
-
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
|
203 |
-
else:
|
204 |
-
out_img = np.matmul(
|
205 |
-
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
|
206 |
-
[65.481, -37.797, 112.0]]) + [16, 128, 128]
|
207 |
-
out_img = _convert_output_type_range(out_img, img_type)
|
208 |
-
return out_img
|
209 |
-
|
210 |
-
|
211 |
-
def ycbcr2rgb(img):
|
212 |
-
"""Convert a YCbCr image to RGB image.
|
213 |
-
|
214 |
-
This function produces the same results as Matlab's ycbcr2rgb function.
|
215 |
-
It implements the ITU-R BT.601 conversion for standard-definition
|
216 |
-
television. See more details in
|
217 |
-
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
218 |
-
|
219 |
-
It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
|
220 |
-
In OpenCV, it implements a JPEG conversion. See more details in
|
221 |
-
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
222 |
-
|
223 |
-
Args:
|
224 |
-
img (ndarray): The input image. It accepts:
|
225 |
-
1. np.uint8 type with range [0, 255];
|
226 |
-
2. np.float32 type with range [0, 1].
|
227 |
-
|
228 |
-
Returns:
|
229 |
-
ndarray: The converted RGB image. The output image has the same type
|
230 |
-
and range as input image.
|
231 |
-
"""
|
232 |
-
img_type = img.dtype
|
233 |
-
img = _convert_input_type_range(img) * 255
|
234 |
-
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
|
235 |
-
[0, -0.00153632, 0.00791071],
|
236 |
-
[0.00625893, -0.00318811, 0]]) * 255.0 + [
|
237 |
-
-222.921, 135.576, -276.836
|
238 |
-
]
|
239 |
-
out_img = _convert_output_type_range(out_img, img_type)
|
240 |
-
return out_img
|
241 |
-
|
242 |
-
|
243 |
-
def ycbcr2bgr(img):
|
244 |
-
"""Convert a YCbCr image to BGR image.
|
245 |
-
|
246 |
-
The bgr version of ycbcr2rgb.
|
247 |
-
It implements the ITU-R BT.601 conversion for standard-definition
|
248 |
-
television. See more details in
|
249 |
-
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
250 |
-
|
251 |
-
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
|
252 |
-
In OpenCV, it implements a JPEG conversion. See more details in
|
253 |
-
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
254 |
-
|
255 |
-
Args:
|
256 |
-
img (ndarray): The input image. It accepts:
|
257 |
-
1. np.uint8 type with range [0, 255];
|
258 |
-
2. np.float32 type with range [0, 1].
|
259 |
-
|
260 |
-
Returns:
|
261 |
-
ndarray: The converted BGR image. The output image has the same type
|
262 |
-
and range as input image.
|
263 |
-
"""
|
264 |
-
img_type = img.dtype
|
265 |
-
img = _convert_input_type_range(img) * 255
|
266 |
-
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
|
267 |
-
[0.00791071, -0.00153632, 0],
|
268 |
-
[0, -0.00318811, 0.00625893]]) * 255.0 + [
|
269 |
-
-276.836, 135.576, -222.921
|
270 |
-
]
|
271 |
-
out_img = _convert_output_type_range(out_img, img_type)
|
272 |
-
return out_img
|
273 |
-
|
274 |
-
|
275 |
-
def convert_color_factory(src, dst):
|
276 |
-
|
277 |
-
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
|
278 |
-
|
279 |
-
def convert_color(img):
|
280 |
-
out_img = cv2.cvtColor(img, code)
|
281 |
-
return out_img
|
282 |
-
|
283 |
-
convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()}
|
284 |
-
image.
|
285 |
-
|
286 |
-
Args:
|
287 |
-
img (ndarray or str): The input image.
|
288 |
-
|
289 |
-
Returns:
|
290 |
-
ndarray: The converted {dst.upper()} image.
|
291 |
-
"""
|
292 |
-
|
293 |
-
return convert_color
|
294 |
-
|
295 |
-
|
296 |
-
bgr2rgb = convert_color_factory('bgr', 'rgb')
|
297 |
-
|
298 |
-
rgb2bgr = convert_color_factory('rgb', 'bgr')
|
299 |
-
|
300 |
-
bgr2hsv = convert_color_factory('bgr', 'hsv')
|
301 |
-
|
302 |
-
hsv2bgr = convert_color_factory('hsv', 'bgr')
|
303 |
-
|
304 |
-
bgr2hls = convert_color_factory('bgr', 'hls')
|
305 |
-
|
306 |
-
hls2bgr = convert_color_factory('hls', 'bgr')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/ipython.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
|
2 |
-
from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore
|
3 |
-
parse_argstring) # type: ignore
|
4 |
-
|
5 |
-
from .main import find_dotenv, load_dotenv
|
6 |
-
|
7 |
-
|
8 |
-
@magics_class
|
9 |
-
class IPythonDotEnv(Magics):
|
10 |
-
|
11 |
-
@magic_arguments()
|
12 |
-
@argument(
|
13 |
-
'-o', '--override', action='store_true',
|
14 |
-
help="Indicate to override existing variables"
|
15 |
-
)
|
16 |
-
@argument(
|
17 |
-
'-v', '--verbose', action='store_true',
|
18 |
-
help="Indicate function calls to be verbose"
|
19 |
-
)
|
20 |
-
@argument('dotenv_path', nargs='?', type=str, default='.env',
|
21 |
-
help='Search in increasingly higher folders for the `dotenv_path`')
|
22 |
-
@line_magic
|
23 |
-
def dotenv(self, line):
|
24 |
-
args = parse_argstring(self.dotenv, line)
|
25 |
-
# Locate the .env file
|
26 |
-
dotenv_path = args.dotenv_path
|
27 |
-
try:
|
28 |
-
dotenv_path = find_dotenv(dotenv_path, True, True)
|
29 |
-
except IOError:
|
30 |
-
print("cannot find .env file")
|
31 |
-
return
|
32 |
-
|
33 |
-
# Load the .env file
|
34 |
-
load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
|
35 |
-
|
36 |
-
|
37 |
-
def load_ipython_extension(ipython):
|
38 |
-
"""Register the %dotenv magic."""
|
39 |
-
ipython.register_magics(IPythonDotEnv)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/status_codes.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
r"""
|
2 |
-
The ``codes`` object defines a mapping from common names for HTTP statuses
|
3 |
-
to their numerical codes, accessible either as attributes or as dictionary
|
4 |
-
items.
|
5 |
-
|
6 |
-
Example::
|
7 |
-
|
8 |
-
>>> import requests
|
9 |
-
>>> requests.codes['temporary_redirect']
|
10 |
-
307
|
11 |
-
>>> requests.codes.teapot
|
12 |
-
418
|
13 |
-
>>> requests.codes['\o/']
|
14 |
-
200
|
15 |
-
|
16 |
-
Some codes have multiple names, and both upper- and lower-case versions of
|
17 |
-
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
|
18 |
-
``codes.okay`` all correspond to the HTTP status code 200.
|
19 |
-
"""
|
20 |
-
|
21 |
-
from .structures import LookupDict
|
22 |
-
|
23 |
-
_codes = {
|
24 |
-
# Informational.
|
25 |
-
100: ("continue",),
|
26 |
-
101: ("switching_protocols",),
|
27 |
-
102: ("processing",),
|
28 |
-
103: ("checkpoint",),
|
29 |
-
122: ("uri_too_long", "request_uri_too_long"),
|
30 |
-
200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
|
31 |
-
201: ("created",),
|
32 |
-
202: ("accepted",),
|
33 |
-
203: ("non_authoritative_info", "non_authoritative_information"),
|
34 |
-
204: ("no_content",),
|
35 |
-
205: ("reset_content", "reset"),
|
36 |
-
206: ("partial_content", "partial"),
|
37 |
-
207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
|
38 |
-
208: ("already_reported",),
|
39 |
-
226: ("im_used",),
|
40 |
-
# Redirection.
|
41 |
-
300: ("multiple_choices",),
|
42 |
-
301: ("moved_permanently", "moved", "\\o-"),
|
43 |
-
302: ("found",),
|
44 |
-
303: ("see_other", "other"),
|
45 |
-
304: ("not_modified",),
|
46 |
-
305: ("use_proxy",),
|
47 |
-
306: ("switch_proxy",),
|
48 |
-
307: ("temporary_redirect", "temporary_moved", "temporary"),
|
49 |
-
308: (
|
50 |
-
"permanent_redirect",
|
51 |
-
"resume_incomplete",
|
52 |
-
"resume",
|
53 |
-
), # "resume" and "resume_incomplete" to be removed in 3.0
|
54 |
-
# Client Error.
|
55 |
-
400: ("bad_request", "bad"),
|
56 |
-
401: ("unauthorized",),
|
57 |
-
402: ("payment_required", "payment"),
|
58 |
-
403: ("forbidden",),
|
59 |
-
404: ("not_found", "-o-"),
|
60 |
-
405: ("method_not_allowed", "not_allowed"),
|
61 |
-
406: ("not_acceptable",),
|
62 |
-
407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
|
63 |
-
408: ("request_timeout", "timeout"),
|
64 |
-
409: ("conflict",),
|
65 |
-
410: ("gone",),
|
66 |
-
411: ("length_required",),
|
67 |
-
412: ("precondition_failed", "precondition"),
|
68 |
-
413: ("request_entity_too_large",),
|
69 |
-
414: ("request_uri_too_large",),
|
70 |
-
415: ("unsupported_media_type", "unsupported_media", "media_type"),
|
71 |
-
416: (
|
72 |
-
"requested_range_not_satisfiable",
|
73 |
-
"requested_range",
|
74 |
-
"range_not_satisfiable",
|
75 |
-
),
|
76 |
-
417: ("expectation_failed",),
|
77 |
-
418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
|
78 |
-
421: ("misdirected_request",),
|
79 |
-
422: ("unprocessable_entity", "unprocessable"),
|
80 |
-
423: ("locked",),
|
81 |
-
424: ("failed_dependency", "dependency"),
|
82 |
-
425: ("unordered_collection", "unordered"),
|
83 |
-
426: ("upgrade_required", "upgrade"),
|
84 |
-
428: ("precondition_required", "precondition"),
|
85 |
-
429: ("too_many_requests", "too_many"),
|
86 |
-
431: ("header_fields_too_large", "fields_too_large"),
|
87 |
-
444: ("no_response", "none"),
|
88 |
-
449: ("retry_with", "retry"),
|
89 |
-
450: ("blocked_by_windows_parental_controls", "parental_controls"),
|
90 |
-
451: ("unavailable_for_legal_reasons", "legal_reasons"),
|
91 |
-
499: ("client_closed_request",),
|
92 |
-
# Server Error.
|
93 |
-
500: ("internal_server_error", "server_error", "/o\\", "✗"),
|
94 |
-
501: ("not_implemented",),
|
95 |
-
502: ("bad_gateway",),
|
96 |
-
503: ("service_unavailable", "unavailable"),
|
97 |
-
504: ("gateway_timeout",),
|
98 |
-
505: ("http_version_not_supported", "http_version"),
|
99 |
-
506: ("variant_also_negotiates",),
|
100 |
-
507: ("insufficient_storage",),
|
101 |
-
509: ("bandwidth_limit_exceeded", "bandwidth"),
|
102 |
-
510: ("not_extended",),
|
103 |
-
511: ("network_authentication_required", "network_auth", "network_authentication"),
|
104 |
-
}
|
105 |
-
|
106 |
-
codes = LookupDict(name="status_codes")
|
107 |
-
|
108 |
-
|
109 |
-
def _init():
|
110 |
-
for code, titles in _codes.items():
|
111 |
-
for title in titles:
|
112 |
-
setattr(codes, title, code)
|
113 |
-
if not title.startswith(("\\", "/")):
|
114 |
-
setattr(codes, title.upper(), code)
|
115 |
-
|
116 |
-
def doc(code):
|
117 |
-
names = ", ".join(f"``{n}``" for n in _codes[code])
|
118 |
-
return "* %d: %s" % (code, names)
|
119 |
-
|
120 |
-
global __doc__
|
121 |
-
__doc__ = (
|
122 |
-
__doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
|
123 |
-
if __doc__ is not None
|
124 |
-
else None
|
125 |
-
)
|
126 |
-
|
127 |
-
|
128 |
-
_init()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/docker/build.alpine.sh
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
CHATGPT_ON_WECHAT_TAG=1.0.2
|
4 |
-
|
5 |
-
docker build -f Dockerfile.alpine \
|
6 |
-
--build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
|
7 |
-
-t zhayujie/chatgpt-on-wechat .
|
8 |
-
|
9 |
-
docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-alpine
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AvaterClasher/Food_Classifier_Refined_MONI/app.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
### 1. Imports and class names setup ###
|
2 |
-
import gradio as gr
|
3 |
-
import os
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from model import create_effnetb2_model
|
7 |
-
from timeit import default_timer as timer
|
8 |
-
from typing import Tuple, Dict
|
9 |
-
|
10 |
-
# Setup class names
|
11 |
-
with open("class_names.txt", "r") as f:
|
12 |
-
class_names = [food_name.strip() for food_name in f.readlines()]
|
13 |
-
|
14 |
-
### 2. Model and transforms preparation ###
|
15 |
-
# Create model and transforms
|
16 |
-
effnetb2, effnetb2_transforms = create_effnetb2_model(num_classes=101)
|
17 |
-
|
18 |
-
# Load saved weights
|
19 |
-
effnetb2.load_state_dict(
|
20 |
-
torch.load(f="food101.pth",
|
21 |
-
map_location=torch.device("cpu")) # load to CPU
|
22 |
-
)
|
23 |
-
|
24 |
-
### 3. Predict function ###
|
25 |
-
|
26 |
-
def predict(img) -> Tuple[Dict, float]:
|
27 |
-
# Start a timer
|
28 |
-
start_time = timer()
|
29 |
-
|
30 |
-
# Transform the input image for use with EffNetB2
|
31 |
-
img = effnetb2_transforms(img).unsqueeze(0) # unsqueeze = add batch dimension on 0th index
|
32 |
-
|
33 |
-
# Put model into eval mode, make prediction
|
34 |
-
effnetb2.eval()
|
35 |
-
with torch.inference_mode():
|
36 |
-
# Pass transformed image through the model and turn the prediction logits into probaiblities
|
37 |
-
pred_probs = torch.softmax(effnetb2(img), dim=1)
|
38 |
-
|
39 |
-
# Create a prediction label and prediction probability dictionary
|
40 |
-
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
41 |
-
|
42 |
-
# Calculate pred time
|
43 |
-
end_time = timer()
|
44 |
-
pred_time = round(end_time - start_time, 4)
|
45 |
-
|
46 |
-
# Return pred dict and pred time
|
47 |
-
return pred_labels_and_probs, pred_time
|
48 |
-
|
49 |
-
### 4. Gradio app ###
|
50 |
-
|
51 |
-
# Create title, description and article
|
52 |
-
title = "Food Classifier [Food 101] 🍥🍥🍥"
|
53 |
-
description = ""
|
54 |
-
article = ""
|
55 |
-
|
56 |
-
# Create example list
|
57 |
-
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
58 |
-
|
59 |
-
# Create the Gradio demo
|
60 |
-
demo = gr.Interface(fn=predict, # maps inputs to outputs
|
61 |
-
inputs=gr.Image(type="pil"),
|
62 |
-
outputs=[gr.Label(num_top_classes=5, label="Predictions"),
|
63 |
-
gr.Number(label="Prediction time (s)")],
|
64 |
-
examples=example_list,
|
65 |
-
title=title,
|
66 |
-
description=description,
|
67 |
-
article=article)
|
68 |
-
|
69 |
-
# Launch the demo!
|
70 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BLACKHOST/timer/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Timer
|
3 |
-
emoji: 💩
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.10.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bambicita/rvc-models/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Rvc Models
|
3 |
-
emoji: 🎤
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: ArkanDash/rvc-models
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cuerda Hroe 1.3.3 Mod Apk.md
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>héroe de la cuerda 1.3.3 Mod Apk: Un juego de superhéroes con dinero ilimitado y diversión</h1>
|
3 |
-
<p>Si usted está buscando un juego de superhéroes que le permite girar alrededor de una ciudad con una cuerda, luchar contra el crimen, y tienen dinero y recursos ilimitados, entonces usted debe probar <strong>Rope Hero 1.3.3 Mod Apk</strong>. Esta es una versión modificada del popular juego de acción <a href="( 4 )">Rope Hero: Vice Town</a>, que se ha descargado más de 100 millones de veces en Google Play Store. En este artículo, le diremos lo que es héroe de cuerda 1.3.3 Mod Apk, ¿por qué debe jugar, cómo jugarlo, y responder a algunas preguntas frecuentes sobre él. </p>
|
4 |
-
<h2>¿Qué es el héroe de cuerda 1.3.3 Mod Apk? </h2>
|
5 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
6 |
-
<p>Héroe de cuerda 1.3.3 Mod Apk es un juego de acción en tercera persona que te pone en el papel de un superhéroe azul que tiene una cuerda con superpoderes ilimitados. Puedes usar tu cuerda para saltar como una araña de un edificio a otro, escalar paredes, volar por el aire y aterrizar con poder. También puedes usar tu cuerda para agarrar enemigos, vehículos, objetos e incluso helicópteros. El juego tiene un gran mundo abierto que se puede explorar libremente, con diferentes distritos, misiones, actividades y secretos. También puedes personalizar a tu héroe con diferentes pieles, armas, vehículos y habilidades. El juego tiene física realista, gráficos impresionantes y un juego suave. </p>
|
7 |
-
<h2>cuerda héroe 1.3.3 mod apk</h2><br /><p><b><b>Download File</b> ✯ <a href="https://bltlly.com/2v6IOU">https://bltlly.com/2v6IOU</a></b></p><br /><br />
|
8 |
-
<h3>Cómo descargar e instalar el apk mod en su dispositivo</h3>
|
9 |
-
<p>Para descargar e instalar Rope Hero 1.3.3 Mod Apk en su dispositivo, es necesario seguir estos sencillos pasos:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Haga clic en este <a href="( 1 )">link</a> para descargar el archivo mod apk. Asegúrese de que tiene suficiente espacio de almacenamiento en su dispositivo. </li>
|
12 |
-
<li>Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
|
13 |
-
<li>Busque el archivo descargado en su administrador de archivos y toque en él para instalarlo. </li>
|
14 |
-
<li>Espere a que el proceso de instalación termine y lance el juego. </li>
|
15 |
-
|
16 |
-
</ol>
|
17 |
-
<h2>¿Por qué jugar héroe de cuerda 1.3.3 Mod Apk? </h2>
|
18 |
-
<h3>Los beneficios de jugar con dinero ilimitado y otras características de mod</h3>
|
19 |
-
<p>Una de las principales razones por las que debe jugar héroe de la cuerda 1.3.3 Mod Apk es que se puede disfrutar del juego con dinero ilimitado y otras características mod. Con dinero ilimitado, puede comprar cualquier arma, vehículo, piel o capacidad que desee sin preocuparse por el costo. También puede actualizar su héroe al máximo nivel y desbloquear todas las habilidades y beneficios. Con otras características de mod, puede tener salud ilimitada, munición, energía y sin anuncios. También puede habilitar el modo dios, matar un golpe y comprar gratis. Estas características te harán invencible e imparable en el juego. </p>
|
20 |
-
<h3>Los retos y misiones que puedes disfrutar en el juego</h3>
|
21 |
-
<p>Otra razón por la que debe jugar héroe de la cuerda 1.3.3 Mod Apk es que se puede disfrutar de varios desafíos y misiones que le mantendrá entretenido y comprometido en el juego. El juego tiene una historia principal que implica la lucha contra una organización criminal llamada el Clan Oscuro. Tendrás que enfrentarte a diferentes enemigos, jefes y misiones a medida que avanzas en la historia. El juego también tiene misiones secundarias que puedes completar para recompensas adicionales y diversión. Puedes ayudar a ciudadanos necesitados, detener robos, perseguir criminales, rescatar rehenes y más. El juego también tiene misiones diarias que te darán dinero de bonificación y objetos. El juego tiene mucho contenido y variedad que te mantendrá enganchado durante horas. </p>
|
22 |
-
<h3>Los consejos y trucos para dominar el juego y convertirse en un superhéroe</h3>
|
23 |
-
<p>La última razón por la que debe jugar héroe de la cuerda 1.3.3 Mod Apk es que usted puede dominar el juego y convertirse en un superhéroe con algunos consejos y trucos. Estos son algunos de ellos:</p>
|
24 |
-
<ul>
|
25 |
-
|
26 |
-
<li>Elige cuidadosamente tus armas. El juego tiene una amplia gama de armas que puedes usar para luchar contra tus enemigos. Puedes elegir entre armas, granadas, cohetes, láseres, espadas, martillos y más. Cada arma tiene sus propias ventajas y desventajas, así que elige la que se adapte a tu estilo y situación. También puede cambiar entre armas durante el combate para mayor flexibilidad. </li>
|
27 |
-
<li>Actualiza tu héroe regularmente. El juego le permite actualizar su héroe con diferentes habilidades y beneficios que mejorarán su rendimiento en el juego. Puedes mejorar tu salud, daño, velocidad, energía, defensa y más. También puedes desbloquear nuevas habilidades que te darán poderes especiales como bolas de fuego, rayos, telequinesis y más. Actualizar tu héroe te hará más fuerte y más versátil en el juego. </li>
|
28 |
-
</ul>
|
29 |
-
<h2>¿Cómo se juega héroe de cuerda 1.3.3 Mod Apk? </h2>
|
30 |
-
<h3>Los controles básicos y la mecánica de juego</h3>
|
31 |
-
<p>Héroe de cuerda 1.3.3 Mod Apk es fácil de jugar con controles simples y mecánica de juego. El juego tiene un joystick virtual en el lado izquierdo de la pantalla que te permite mover a tu héroe. El juego también tiene botones en el lado derecho de la pantalla que te permiten realizar diferentes acciones como saltar, disparar, usar la cuerda o cambiar de arma. El juego tiene un mini-mapa en la esquina superior izquierda de la pantalla que te muestra tu ubicación, objetivos, enemigos y aliados. El juego también tiene un botón de menú en la esquina superior derecha de la pantalla que le permite acceder a su inventario, ajustes, misiones, mapa, tienda y más. El juego tiene una interfaz sencilla que facilita la navegación y el juego. </p>
|
32 |
-
<h3>Las mejores armas y vehículos para usar en el juego</h3>
|
33 |
-
<p>Héroe de cuerda 1.3.3 Mod Apk tiene un montón de armas y vehículos que se pueden utilizar en el juego. Aquí están algunos de los mejores:</p>
|
34 |
-
<tabla>
|
35 |
-
<tr><th>Arma</th><th>Descripción</th></tr>
|
36 |
-
|
37 |
-
<tr><td>Pistola láser</td><td>Un arma futurista que dispara rayos de energía que pueden atravesar enemigos y objetos. </td></tr>
|
38 |
-
<tr><td>Espada</td><td>Un arma cuerpo a cuerpo que te permite cortar a tus enemigos con estilo y precisión. </td></tr>
|
39 |
-
<tr><th>Vehículo</th><th>Descripción</th></tr>
|
40 |
-
<tr><td>Motocicleta</td><td>Un vehículo rápido y ágil que te permite acercarte por las calles y realizar acrobacias. </td></tr>
|
41 |
-
<tr><td>Tanque</td><td>Un vehículo pesado y blindado que te permite destruir a tus enemigos y aplastar obstáculos. </td></tr>
|
42 |
-
<tr><td>Helicóptero</td><td>Un vehículo volador y versátil que te permite volar por encima de la ciudad y disparar desde el aire. </td></tr>
|
43 |
-
</tabla>
|
44 |
-
<h3>Los diferentes modos y distritos para explorar en el juego</h3>
|
45 |
-
<p>Héroe de cuerda 1.3.3 Mod Apk tiene diferentes modos y distritos que se pueden explorar en el juego. Estos son algunos de ellos:</p>
|
46 |
-
<p></p>
|
47 |
-
<ul>
|
48 |
-
Modo historia: Este es el modo principal del juego, donde sigues la trama y completas misiones para derrotar al Clan Oscuro. Encontrará diferentes personajes, ubicaciones y eventos en este modo. </li>
|
49 |
-
<li>Modo libre: Este es el modo en el que puedes deambular por la ciudad libremente y hacer lo que quieras. Puede encontrar misiones secundarias, actividades, secretos y desafíos en este modo. También puede interactuar con otros PNJ, vehículos y objetos en este modo. </li>
|
50 |
-
<li>Modo de supervivencia: Este es el modo en el que tienes que sobrevivir el mayor tiempo posible contra oleadas de enemigos que te atacarán desde todas las direcciones. Puedes usar tus armas, vehículos y habilidades para defenderte de ellos. También puedes ganar dinero y objetos en este modo. </li>
|
51 |
-
<li>Distritos: El juego tiene diferentes distritos que puedes explorar en la ciudad, cada uno con su propio tema, atmósfera y características. Algunos de los distritos son Chinatown, Downtown, Zona Industrial, Base Militar y Aeropuerto. Cada distrito tiene sus propios enemigos, misiones, secretos y puntos de referencia. </li>
|
52 |
-
</ul>
|
53 |
-
<h2>Conclusión</h2>
|
54 |
-
<h3>Un resumen de los puntos principales y una llamada a la acción</h3>
|
55 |
-
|
56 |
-
<h2>Preguntas frecuentes</h2>
|
57 |
-
<h4>¿Es seguro descargar y jugar Rope Hero 1.3.3 Mod Apk? </h4>
|
58 |
-
<p>Sí, Héroe de cuerda 1.3.3 Mod Apk es seguro para descargar y jugar. El archivo mod apk se escanea en busca de virus y malware antes de ser subido a nuestro sitio. El mod apk tampoco requiere ninguna raíz o jailbreak para ejecutarse en su dispositivo. Sin embargo, le recomendamos que descargue el apk mod solo desde nuestro sitio, ya que otras fuentes pueden contener archivos dañinos o falsos. </p>
|
59 |
-
<h4> ¿Cuáles son los requisitos mínimos para jugar Rope Hero 1.3.3 Mod Apk? </h4>
|
60 |
-
<p>Los requisitos mínimos para jugar héroe de cuerda 1.3.3 Mod Apk son los siguientes:</p>
|
61 |
-
<ul>
|
62 |
-
<li>Android 4.4 o superior</li>
|
63 |
-
<li>Al menos 100 MB de espacio de almacenamiento libre</li>
|
64 |
-
<li>Una conexión a Internet estable</li>
|
65 |
-
</ul>
|
66 |
-
<h4> ¿Cómo actualizar Rope Hero 1.3.3 Mod Apk a la última versión? </h4>
|
67 |
-
<p>Para actualizar Rope Hero 1.3.3 Mod Apk a la última versión, es necesario seguir estos pasos:</p>
|
68 |
-
<ol>
|
69 |
-
<li>Eliminar la versión anterior de la apk mod de su dispositivo. </li>
|
70 |
-
<li>Descargar la última versión de la apk mod de nuestro sitio. </li>
|
71 |
-
<li>Instalar la nueva versión de la apk mod en su dispositivo. </li>
|
72 |
-
<li>Iniciar el juego y disfrutar de las nuevas características. </li>
|
73 |
-
</ol>
|
74 |
-
<h4>Cómo ponerse en contacto con los desarrolladores de Rope Hero 1.3.3 Mod Apk para obtener información o apoyo? </h4>
|
75 |
-
<p>Para contactar a los desarrolladores de Rope Hero 1.3.3 Mod Apk para obtener información o apoyo, puede utilizar uno de estos métodos:</p>
|
76 |
-
<ul>
|
77 |
-
<li>Correo electrónico: [email protected]</li>
|
78 |
-
<li>Facebook: <a href=">https://www.facebook.com/Rope-Hero-103984361733634/</a></li>
|
79 |
-
<li>Twitter: <a href=">https://twitter.com/RopeHeroGame</a> </li>
|
80 |
-
<li>YouTube: <a href=">https://www.youtube.com/channel/UCwZtQWpeuohjDtDjx80uGJg</a> </li>
|
81 |
-
</ul>
|
82 |
-
<h4>¿Dónde puedo encontrar más información sobre Rope Hero 1.3.3 Mod Apk? </h4>
|
83 |
-
<p>Para encontrar más información sobre Rope Hero 1.3.3 Mod Apk, puede visitar estos sitios:</p>
|
84 |
-
<ul>
|
85 |
-
|
86 |
-
<li><a href=">https://apkpure.com/rope-hero-vice-town/com.mgc.RopeHero.ViceTown</a>: Este es un sitio donde puedes descargar la versión original del juego, así como otras versiones y mods. </li>
|
87 |
-
<li><a href=">https://www.reddit.com/r/RopeHero/</a>: Este es un subreddit donde puedes unirte a la comunidad de fans de Rope Hero, compartir tus experiencias, hacer preguntas y obtener consejos y trucos de otros jugadores. </li>
|
88 |
-
</ul>
|
89 |
-
<p>Espero que haya disfrutado de la lectura de este artículo y aprendido algo nuevo sobre Rope Hero 1.3.3 Mod Apk. Si lo hiciste, por favor compártelo con tus amigos y familiares que podrían estar interesados en este juego. Además, no se olvide de descargar y jugar Rope Hero 1.3.3 Mod Apk y divertirse con sus aventuras de superhéroes. </p> 64aa2da5cf<br />
|
90 |
-
<br />
|
91 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/logging.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import logging
|
3 |
-
import distutils.log
|
4 |
-
from . import monkey
|
5 |
-
|
6 |
-
|
7 |
-
def _not_warning(record):
|
8 |
-
return record.levelno < logging.WARNING
|
9 |
-
|
10 |
-
|
11 |
-
def configure():
|
12 |
-
"""
|
13 |
-
Configure logging to emit warning and above to stderr
|
14 |
-
and everything else to stdout. This behavior is provided
|
15 |
-
for compatibility with distutils.log but may change in
|
16 |
-
the future.
|
17 |
-
"""
|
18 |
-
err_handler = logging.StreamHandler()
|
19 |
-
err_handler.setLevel(logging.WARNING)
|
20 |
-
out_handler = logging.StreamHandler(sys.stdout)
|
21 |
-
out_handler.addFilter(_not_warning)
|
22 |
-
handlers = err_handler, out_handler
|
23 |
-
logging.basicConfig(
|
24 |
-
format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
|
25 |
-
if hasattr(distutils.log, 'Log'):
|
26 |
-
monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
|
27 |
-
# For some reason `distutils.log` module is getting cached in `distutils.dist`
|
28 |
-
# and then loaded again when patched,
|
29 |
-
# implying: id(distutils.log) != id(distutils.dist.log).
|
30 |
-
# Make sure the same module object is used everywhere:
|
31 |
-
distutils.dist.log = distutils.log
|
32 |
-
|
33 |
-
|
34 |
-
def set_threshold(level):
|
35 |
-
logging.root.setLevel(level*10)
|
36 |
-
return set_threshold.unpatched(level)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BillBojangeles2000/WikiGPT/app.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import pinecone
|
2 |
-
from pprint import pprint
|
3 |
-
import streamlit as st
|
4 |
-
import torch
|
5 |
-
from transformers import AutoTokenizer, AutoModel, AutoModelForSeq2SeqLM
|
6 |
-
model_name = "vblagoje/bart_lfqa"
|
7 |
-
# connect to pinecone environment
|
8 |
-
pinecone.init(
|
9 |
-
api_key="e5d4972e-0045-43d5-a55e-efdeafe442dd",
|
10 |
-
environment="us-central1-gcp" # find next to API key in console
|
11 |
-
)
|
12 |
-
|
13 |
-
index_name = "abstractive-question-answering"
|
14 |
-
|
15 |
-
# check if the abstractive-question-answering index exists
|
16 |
-
if index_name not in pinecone.list_indexes():
|
17 |
-
# create the index if it does not exist
|
18 |
-
pinecone.create_index(
|
19 |
-
index_name,
|
20 |
-
dimension=768,
|
21 |
-
metric="cosine"
|
22 |
-
)
|
23 |
-
|
24 |
-
# connect to abstractive-question-answering index we created
|
25 |
-
index = pinecone.Index(index_name)
|
26 |
-
|
27 |
-
from transformers import BartTokenizer, BartForConditionalGeneration
|
28 |
-
|
29 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
30 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
31 |
-
model = model.to('cpu')
|
32 |
-
|
33 |
-
import torch
|
34 |
-
from sentence_transformers import SentenceTransformer
|
35 |
-
|
36 |
-
# set device to GPU if available
|
37 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
38 |
-
# load the retriever model from huggingface model hub
|
39 |
-
retriever = SentenceTransformer("flax-sentence-embeddings/all_datasets_v3_mpnet-base", device=device)
|
40 |
-
|
41 |
-
def query_pinecone(query, top_k):
|
42 |
-
# generate embeddings for the query
|
43 |
-
xq = retriever.encode([query]).tolist()
|
44 |
-
# search pinecone index for context passage with the answer
|
45 |
-
xc = index.query(xq, top_k=top_k, include_metadata=True)
|
46 |
-
return xc
|
47 |
-
|
48 |
-
def format_query(query, context):
|
49 |
-
# extract passage_text from Pinecone search result and add the <P> tag
|
50 |
-
context = [f"<P> {m['metadata']['text']}" for m in context]
|
51 |
-
# concatinate all context passages
|
52 |
-
context = " ".join(context)
|
53 |
-
# contcatinate the query and context passages
|
54 |
-
query = f"question: {query} context: {context}"
|
55 |
-
return query
|
56 |
-
def generate_answer(query):
|
57 |
-
query_and_docs = query
|
58 |
-
|
59 |
-
model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt")
|
60 |
-
|
61 |
-
generated_answers_encoded = model.generate(input_ids=model_input["input_ids"].to(device),
|
62 |
-
attention_mask=model_input["attention_mask"].to(device),
|
63 |
-
min_length=64,
|
64 |
-
max_length=256,
|
65 |
-
do_sample=False,
|
66 |
-
early_stopping=True,
|
67 |
-
num_beams=8,
|
68 |
-
temperature=1.0,
|
69 |
-
top_k=None,
|
70 |
-
top_p=None,
|
71 |
-
eos_token_id=tokenizer.eos_token_id,
|
72 |
-
no_repeat_ngram_size=3,
|
73 |
-
num_return_sequences=1)
|
74 |
-
res = tokenizer.batch_decode(generated_answers_encoded, skip_special_tokens=True,clean_up_tokenization_spaces=True)
|
75 |
-
st.write(str(res))
|
76 |
-
|
77 |
-
query = st.text_area('Enter Question:')
|
78 |
-
b = st.button('Submit!')
|
79 |
-
if b:
|
80 |
-
st.write("Processing, please wait!")
|
81 |
-
context = query_pinecone(query, top_k=5)
|
82 |
-
query = format_query(query, context["matches"])
|
83 |
-
generate_answer(query)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/README.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
# Read the docs:
|
2 |
-
|
3 |
-
The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/).
|
4 |
-
Documents in this directory are not meant to be read on github.
|
5 |
-
|
6 |
-
# Build the docs:
|
7 |
-
|
8 |
-
1. Install detectron2 according to [INSTALL.md](INSTALL.md).
|
9 |
-
2. Install additional libraries required to build docs:
|
10 |
-
- docutils>=0.14
|
11 |
-
- Sphinx>=1.7
|
12 |
-
- recommonmark==0.4.0
|
13 |
-
- sphinx_rtd_theme
|
14 |
-
- mock
|
15 |
-
|
16 |
-
3. Run `make html` from this directory.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/setup.py
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
# Setup script for PyPI; use CMakeFile.txt to build extension modules
|
5 |
-
|
6 |
-
from setuptools import setup
|
7 |
-
from distutils.command.install_headers import install_headers
|
8 |
-
from distutils.command.build_py import build_py
|
9 |
-
from pybind11 import __version__
|
10 |
-
import os
|
11 |
-
|
12 |
-
package_data = [
|
13 |
-
'include/pybind11/detail/class.h',
|
14 |
-
'include/pybind11/detail/common.h',
|
15 |
-
'include/pybind11/detail/descr.h',
|
16 |
-
'include/pybind11/detail/init.h',
|
17 |
-
'include/pybind11/detail/internals.h',
|
18 |
-
'include/pybind11/detail/typeid.h',
|
19 |
-
'include/pybind11/attr.h',
|
20 |
-
'include/pybind11/buffer_info.h',
|
21 |
-
'include/pybind11/cast.h',
|
22 |
-
'include/pybind11/chrono.h',
|
23 |
-
'include/pybind11/common.h',
|
24 |
-
'include/pybind11/complex.h',
|
25 |
-
'include/pybind11/eigen.h',
|
26 |
-
'include/pybind11/embed.h',
|
27 |
-
'include/pybind11/eval.h',
|
28 |
-
'include/pybind11/functional.h',
|
29 |
-
'include/pybind11/iostream.h',
|
30 |
-
'include/pybind11/numpy.h',
|
31 |
-
'include/pybind11/operators.h',
|
32 |
-
'include/pybind11/options.h',
|
33 |
-
'include/pybind11/pybind11.h',
|
34 |
-
'include/pybind11/pytypes.h',
|
35 |
-
'include/pybind11/stl.h',
|
36 |
-
'include/pybind11/stl_bind.h',
|
37 |
-
]
|
38 |
-
|
39 |
-
# Prevent installation of pybind11 headers by setting
|
40 |
-
# PYBIND11_USE_CMAKE.
|
41 |
-
if os.environ.get('PYBIND11_USE_CMAKE'):
|
42 |
-
headers = []
|
43 |
-
else:
|
44 |
-
headers = package_data
|
45 |
-
|
46 |
-
|
47 |
-
class InstallHeaders(install_headers):
|
48 |
-
"""Use custom header installer because the default one flattens subdirectories"""
|
49 |
-
def run(self):
|
50 |
-
if not self.distribution.headers:
|
51 |
-
return
|
52 |
-
|
53 |
-
for header in self.distribution.headers:
|
54 |
-
subdir = os.path.dirname(os.path.relpath(header, 'include/pybind11'))
|
55 |
-
install_dir = os.path.join(self.install_dir, subdir)
|
56 |
-
self.mkpath(install_dir)
|
57 |
-
|
58 |
-
(out, _) = self.copy_file(header, install_dir)
|
59 |
-
self.outfiles.append(out)
|
60 |
-
|
61 |
-
|
62 |
-
# Install the headers inside the package as well
|
63 |
-
class BuildPy(build_py):
|
64 |
-
def build_package_data(self):
|
65 |
-
build_py.build_package_data(self)
|
66 |
-
for header in package_data:
|
67 |
-
target = os.path.join(self.build_lib, 'pybind11', header)
|
68 |
-
self.mkpath(os.path.dirname(target))
|
69 |
-
self.copy_file(header, target, preserve_mode=False)
|
70 |
-
|
71 |
-
def get_outputs(self, include_bytecode=1):
|
72 |
-
outputs = build_py.get_outputs(self, include_bytecode=include_bytecode)
|
73 |
-
for header in package_data:
|
74 |
-
target = os.path.join(self.build_lib, 'pybind11', header)
|
75 |
-
outputs.append(target)
|
76 |
-
return outputs
|
77 |
-
|
78 |
-
|
79 |
-
setup(
|
80 |
-
name='pybind11',
|
81 |
-
version=__version__,
|
82 |
-
description='Seamless operability between C++11 and Python',
|
83 |
-
author='Wenzel Jakob',
|
84 |
-
author_email='[email protected]',
|
85 |
-
url='https://github.com/pybind/pybind11',
|
86 |
-
download_url='https://github.com/pybind/pybind11/tarball/v' + __version__,
|
87 |
-
packages=['pybind11'],
|
88 |
-
license='BSD',
|
89 |
-
headers=headers,
|
90 |
-
zip_safe=False,
|
91 |
-
cmdclass=dict(install_headers=InstallHeaders, build_py=BuildPy),
|
92 |
-
classifiers=[
|
93 |
-
'Development Status :: 5 - Production/Stable',
|
94 |
-
'Intended Audience :: Developers',
|
95 |
-
'Topic :: Software Development :: Libraries :: Python Modules',
|
96 |
-
'Topic :: Utilities',
|
97 |
-
'Programming Language :: C++',
|
98 |
-
'Programming Language :: Python :: 2.7',
|
99 |
-
'Programming Language :: Python :: 3',
|
100 |
-
'Programming Language :: Python :: 3.2',
|
101 |
-
'Programming Language :: Python :: 3.3',
|
102 |
-
'Programming Language :: Python :: 3.4',
|
103 |
-
'Programming Language :: Python :: 3.5',
|
104 |
-
'Programming Language :: Python :: 3.6',
|
105 |
-
'License :: OSI Approved :: BSD License'
|
106 |
-
],
|
107 |
-
keywords='C++11, Python bindings',
|
108 |
-
long_description="""pybind11 is a lightweight header-only library that
|
109 |
-
exposes C++ types in Python and vice versa, mainly to create Python bindings of
|
110 |
-
existing C++ code. Its goals and syntax are similar to the excellent
|
111 |
-
Boost.Python by David Abrahams: to minimize boilerplate code in traditional
|
112 |
-
extension modules by inferring type information using compile-time
|
113 |
-
introspection.
|
114 |
-
|
115 |
-
The main issue with Boost.Python-and the reason for creating such a similar
|
116 |
-
project-is Boost. Boost is an enormously large and complex suite of utility
|
117 |
-
libraries that works with almost every C++ compiler in existence. This
|
118 |
-
compatibility has its cost: arcane template tricks and workarounds are
|
119 |
-
necessary to support the oldest and buggiest of compiler specimens. Now that
|
120 |
-
C++11-compatible compilers are widely available, this heavy machinery has
|
121 |
-
become an excessively large and unnecessary dependency.
|
122 |
-
|
123 |
-
Think of this library as a tiny self-contained version of Boost.Python with
|
124 |
-
everything stripped away that isn't relevant for binding generation. Without
|
125 |
-
comments, the core header files only require ~4K lines of code and depend on
|
126 |
-
Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
|
127 |
-
compact implementation was possible thanks to some of the new C++11 language
|
128 |
-
features (specifically: tuples, lambda functions and variadic templates). Since
|
129 |
-
its creation, this library has grown beyond Boost.Python in many ways, leading
|
130 |
-
to dramatically simpler binding code in many common situations.""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/cmake/ThrustInstallRules.cmake
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# Thrust is a header library; no need to build anything before installing:
|
2 |
-
set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE)
|
3 |
-
|
4 |
-
install(DIRECTORY "${Thrust_SOURCE_DIR}/thrust"
|
5 |
-
TYPE INCLUDE
|
6 |
-
FILES_MATCHING
|
7 |
-
PATTERN "*.h"
|
8 |
-
PATTERN "*.inl"
|
9 |
-
PATTERN "*.cmake"
|
10 |
-
PATTERN "*.md"
|
11 |
-
)
|
12 |
-
|
13 |
-
# Depending on how Thrust is configured, CUB's CMake scripts may or may not be
|
14 |
-
# included, so maintain a set of CUB install rules in both projects. By default
|
15 |
-
# CUB headers are installed alongside Thrust -- this may be disabled by turning
|
16 |
-
# off THRUST_INSTALL_CUB_HEADERS.
|
17 |
-
option(THRUST_INSTALL_CUB_HEADERS "Include cub headers when installing." ON)
|
18 |
-
if (THRUST_INSTALL_CUB_HEADERS)
|
19 |
-
install(DIRECTORY "${Thrust_SOURCE_DIR}/dependencies/cub/cub"
|
20 |
-
TYPE INCLUDE
|
21 |
-
FILES_MATCHING
|
22 |
-
PATTERN "*.cuh"
|
23 |
-
PATTERN "*.cmake"
|
24 |
-
)
|
25 |
-
endif()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/type_traits/is_operator_less_or_greater_function_object.h
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
|
2 |
-
/*
|
3 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
/*! \file is_operator_less_or_greater_function_object.h
|
19 |
-
* \brief Type traits for determining if a \c BinaryFunction is equivalent to
|
20 |
-
/// either \c operator< or \c operator>.
|
21 |
-
*/
|
22 |
-
|
23 |
-
#pragma once
|
24 |
-
|
25 |
-
#include <thrust/detail/config.h>
|
26 |
-
#include <thrust/functional.h>
|
27 |
-
#include <thrust/detail/type_traits.h>
|
28 |
-
#include <thrust/detail/type_traits/pointer_traits.h>
|
29 |
-
|
30 |
-
namespace thrust
|
31 |
-
{
|
32 |
-
|
33 |
-
namespace detail
|
34 |
-
{
|
35 |
-
|
36 |
-
template <typename FunctionObject>
|
37 |
-
struct is_operator_less_function_object_impl;
|
38 |
-
|
39 |
-
template <typename FunctionObject>
|
40 |
-
struct is_operator_greater_function_object_impl;
|
41 |
-
|
42 |
-
} // namespace detail
|
43 |
-
|
44 |
-
/// Unary metafunction returns \c true_type if \c FunctionObject is equivalent
|
45 |
-
/// to \c operator<, and \c false_type otherwise.
|
46 |
-
template <typename FunctionObject>
|
47 |
-
#if THRUST_CPP_DIALECT >= 2011
|
48 |
-
using is_operator_less_function_object =
|
49 |
-
#else
|
50 |
-
struct is_operator_less_function_object :
|
51 |
-
#endif
|
52 |
-
detail::is_operator_less_function_object_impl<FunctionObject>
|
53 |
-
#if THRUST_CPP_DIALECT < 2011
|
54 |
-
{}
|
55 |
-
#endif
|
56 |
-
;
|
57 |
-
|
58 |
-
#if THRUST_CPP_DIALECT >= 2014
|
59 |
-
/// <code>constexpr bool</code> that is \c true if \c FunctionObject is
|
60 |
-
/// equivalent to \c operator<, and \c false otherwise.
|
61 |
-
template <typename FunctionObject>
|
62 |
-
constexpr bool is_operator_less_function_object_v
|
63 |
-
= is_operator_less_function_object<FunctionObject>::value;
|
64 |
-
#endif
|
65 |
-
|
66 |
-
/// Unary metafunction returns \c true_type if \c FunctionObject is equivalent
|
67 |
-
/// to \c operator>, and \c false_type otherwise.
|
68 |
-
template <typename FunctionObject>
|
69 |
-
#if THRUST_CPP_DIALECT >= 2011
|
70 |
-
using is_operator_greater_function_object =
|
71 |
-
#else
|
72 |
-
struct is_operator_greater_function_object :
|
73 |
-
#endif
|
74 |
-
detail::is_operator_greater_function_object_impl<FunctionObject>
|
75 |
-
#if THRUST_CPP_DIALECT < 2011
|
76 |
-
{}
|
77 |
-
#endif
|
78 |
-
;
|
79 |
-
|
80 |
-
#if THRUST_CPP_DIALECT >= 2014
|
81 |
-
/// <code>constexpr bool</code> that is \c true if \c FunctionObject is
|
82 |
-
/// equivalent to \c operator>, and \c false otherwise.
|
83 |
-
template <typename FunctionObject>
|
84 |
-
constexpr bool is_operator_greater_function_object_v
|
85 |
-
= is_operator_greater_function_object<FunctionObject>::value;
|
86 |
-
#endif
|
87 |
-
|
88 |
-
/// Unary metafunction returns \c true_type if \c FunctionObject is equivalent
|
89 |
-
/// to either \c operator<, and \c false_type otherwise.
|
90 |
-
template <typename FunctionObject>
|
91 |
-
#if THRUST_CPP_DIALECT >= 2011
|
92 |
-
using is_operator_less_or_greater_function_object =
|
93 |
-
#else
|
94 |
-
struct is_operator_less_or_greater_function_object :
|
95 |
-
#endif
|
96 |
-
integral_constant<
|
97 |
-
bool
|
98 |
-
, detail::is_operator_less_function_object_impl<FunctionObject>::value
|
99 |
-
|| detail::is_operator_greater_function_object_impl<FunctionObject>::value
|
100 |
-
>
|
101 |
-
#if THRUST_CPP_DIALECT < 2011
|
102 |
-
{}
|
103 |
-
#endif
|
104 |
-
;
|
105 |
-
|
106 |
-
#if THRUST_CPP_DIALECT >= 2014
|
107 |
-
/// <code>constexpr bool</code> that is \c true if \c FunctionObject is
|
108 |
-
/// equivalent to either \c operator< or \c operator>, and \c false otherwise.
|
109 |
-
template <typename FunctionObject>
|
110 |
-
constexpr bool is_operator_less_or_greater_function_object_v
|
111 |
-
= is_operator_less_or_greater_function_object<FunctionObject>::value;
|
112 |
-
#endif
|
113 |
-
|
114 |
-
///////////////////////////////////////////////////////////////////////////////
|
115 |
-
|
116 |
-
namespace detail
|
117 |
-
{
|
118 |
-
|
119 |
-
template <typename FunctionObject>
|
120 |
-
struct is_operator_less_function_object_impl : false_type {};
|
121 |
-
template <typename T>
|
122 |
-
struct is_operator_less_function_object_impl<thrust::less<T> > : true_type {};
|
123 |
-
template <typename T>
|
124 |
-
struct is_operator_less_function_object_impl<std::less<T> > : true_type {};
|
125 |
-
|
126 |
-
template <typename FunctionObject>
|
127 |
-
struct is_operator_greater_function_object_impl : false_type {};
|
128 |
-
template <typename T>
|
129 |
-
struct is_operator_greater_function_object_impl<thrust::greater<T> > : true_type {};
|
130 |
-
template <typename T>
|
131 |
-
struct is_operator_greater_function_object_impl<std::greater<T> > : true_type {};
|
132 |
-
|
133 |
-
} // namespace detail
|
134 |
-
|
135 |
-
} // end namespace thrust
|
136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/post_processing/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from .bbox_nms import fast_nms, multiclass_nms
|
2 |
-
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
|
3 |
-
merge_aug_proposals, merge_aug_scores)
|
4 |
-
|
5 |
-
__all__ = [
|
6 |
-
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
|
7 |
-
'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
|
8 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/necks/__init__.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from .bfp import BFP
|
2 |
-
from .channel_mapper import ChannelMapper
|
3 |
-
from .fpg import FPG
|
4 |
-
from .fpn import FPN
|
5 |
-
from .fpn_carafe import FPN_CARAFE
|
6 |
-
from .hrfpn import HRFPN
|
7 |
-
from .nas_fpn import NASFPN
|
8 |
-
from .nasfcos_fpn import NASFCOS_FPN
|
9 |
-
from .pafpn import PAFPN
|
10 |
-
from .rfp import RFP
|
11 |
-
from .yolo_neck import YOLOV3Neck
|
12 |
-
|
13 |
-
__all__ = [
|
14 |
-
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
|
15 |
-
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG'
|
16 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/bin/paper_runfiles/blur_tests.sh
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
##!/usr/bin/env bash
|
2 |
-
#
|
3 |
-
## !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst
|
4 |
-
#
|
5 |
-
## paths to data are valid for mml7
|
6 |
-
#PLACES_ROOT="/data/inpainting/Places365"
|
7 |
-
#OUT_DIR="/data/inpainting/paper_data/Places365_val_test"
|
8 |
-
#
|
9 |
-
#source "$(dirname $0)/env.sh"
|
10 |
-
#
|
11 |
-
#for datadir in test_large_30k # val_large
|
12 |
-
#do
|
13 |
-
# for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512
|
14 |
-
# do
|
15 |
-
# "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \
|
16 |
-
# "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8
|
17 |
-
#
|
18 |
-
# "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
|
19 |
-
# done
|
20 |
-
#
|
21 |
-
# for conf in segm_256 segm_512
|
22 |
-
# do
|
23 |
-
# "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \
|
24 |
-
# "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2
|
25 |
-
#
|
26 |
-
# "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
|
27 |
-
# done
|
28 |
-
#done
|
29 |
-
#
|
30 |
-
#IN_DIR="/data/inpainting/paper_data/Places365_val_test/test_large_30k/random_medium_512"
|
31 |
-
#PRED_DIR="/data/inpainting/predictions/final/images/r.suvorov_2021-03-05_17-08-35_train_ablv2_work_resume_epoch37/random_medium_512"
|
32 |
-
#BLUR_OUT_DIR="/data/inpainting/predictions/final/blur/images"
|
33 |
-
#
|
34 |
-
#for b in 0.1
|
35 |
-
#
|
36 |
-
#"$BINDIR/blur_predicts.py" "$BASEDIR/../../configs/eval2.yaml" "$CUR_IN_DIR" "$CUR_OUT_DIR" "$CUR_EVAL_DIR"
|
37 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/data/__init__.py
DELETED
File without changes
|
spaces/ChandraMohanNayal/AutoGPT/scripts/check_requirements.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
|
3 |
-
import pkg_resources
|
4 |
-
|
5 |
-
|
6 |
-
def main():
|
7 |
-
requirements_file = sys.argv[1]
|
8 |
-
with open(requirements_file, "r") as f:
|
9 |
-
required_packages = [
|
10 |
-
line.strip().split("#")[0].strip() for line in f.readlines()
|
11 |
-
]
|
12 |
-
|
13 |
-
installed_packages = [package.key for package in pkg_resources.working_set]
|
14 |
-
|
15 |
-
missing_packages = []
|
16 |
-
for package in required_packages:
|
17 |
-
if not package: # Skip empty lines
|
18 |
-
continue
|
19 |
-
package_name = package.strip().split("==")[0]
|
20 |
-
if package_name.lower() not in installed_packages:
|
21 |
-
missing_packages.append(package_name)
|
22 |
-
|
23 |
-
if missing_packages:
|
24 |
-
print("Missing packages:")
|
25 |
-
print(", ".join(missing_packages))
|
26 |
-
sys.exit(1)
|
27 |
-
else:
|
28 |
-
print("All packages are installed.")
|
29 |
-
|
30 |
-
|
31 |
-
if __name__ == "__main__":
|
32 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/anya_suki/__init__.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
from meme_generator.exception import TextOverLength
|
8 |
-
from meme_generator.utils import make_jpg_or_gif
|
9 |
-
|
10 |
-
img_dir = Path(__file__).parent / "images"
|
11 |
-
|
12 |
-
|
13 |
-
def anya_suki(images: List[BuildImage], texts: List[str], args):
|
14 |
-
text = texts[0] if texts else "阿尼亚喜欢这个"
|
15 |
-
frame = BuildImage.open(img_dir / "0.png")
|
16 |
-
try:
|
17 |
-
frame.draw_text(
|
18 |
-
(5, frame.height - 60, frame.width - 5, frame.height - 10),
|
19 |
-
text,
|
20 |
-
max_fontsize=40,
|
21 |
-
fill="white",
|
22 |
-
stroke_fill="black",
|
23 |
-
stroke_ratio=0.06,
|
24 |
-
)
|
25 |
-
except ValueError:
|
26 |
-
raise TextOverLength(text)
|
27 |
-
|
28 |
-
def make(img: BuildImage) -> BuildImage:
|
29 |
-
img = img.convert("RGBA").resize((305, 235), keep_ratio=True)
|
30 |
-
return frame.copy().paste(img, (106, 72), below=True)
|
31 |
-
|
32 |
-
return make_jpg_or_gif(images[0], make)
|
33 |
-
|
34 |
-
|
35 |
-
add_meme(
|
36 |
-
"anya_suki",
|
37 |
-
anya_suki,
|
38 |
-
min_images=1,
|
39 |
-
max_images=1,
|
40 |
-
min_texts=0,
|
41 |
-
max_texts=1,
|
42 |
-
default_texts=["阿尼亚喜欢这个"],
|
43 |
-
keywords=["阿尼亚喜欢"],
|
44 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.b4/g4f/Provider/Providers/Ails.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
import json
|
4 |
-
import uuid
|
5 |
-
import hashlib
|
6 |
-
import requests
|
7 |
-
|
8 |
-
from ...typing import sha256, Dict, get_type_hints
|
9 |
-
from datetime import datetime
|
10 |
-
|
11 |
-
url: str = 'https://ai.ls'
|
12 |
-
model: str = 'gpt-3.5-turbo'
|
13 |
-
supports_stream = True
|
14 |
-
needs_auth = False
|
15 |
-
working = True
|
16 |
-
|
17 |
-
|
18 |
-
class Utils:
|
19 |
-
def hash(json_data: Dict[str, str]) -> sha256:
|
20 |
-
|
21 |
-
base_string: str = '%s:%s:%s:%s' % (
|
22 |
-
json_data['t'],
|
23 |
-
json_data['m'],
|
24 |
-
'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf',
|
25 |
-
len(json_data['m'])
|
26 |
-
)
|
27 |
-
|
28 |
-
return hashlib.sha256(base_string.encode()).hexdigest()
|
29 |
-
|
30 |
-
def format_timestamp(timestamp: int) -> str:
|
31 |
-
|
32 |
-
e = timestamp
|
33 |
-
n = e % 10
|
34 |
-
r = n + 1 if n % 2 == 0 else n
|
35 |
-
return str(e - n + r)
|
36 |
-
|
37 |
-
|
38 |
-
def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
|
39 |
-
|
40 |
-
headers = {
|
41 |
-
'authority': 'api.caipacity.com',
|
42 |
-
'accept': '*/*',
|
43 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
44 |
-
'authorization': 'Bearer free',
|
45 |
-
'client-id': str(uuid.uuid4()),
|
46 |
-
'client-v': '0.1.249',
|
47 |
-
'content-type': 'application/json',
|
48 |
-
'origin': 'https://ai.ls',
|
49 |
-
'referer': 'https://ai.ls/',
|
50 |
-
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
51 |
-
'sec-ch-ua-mobile': '?0',
|
52 |
-
'sec-ch-ua-platform': '"Windows"',
|
53 |
-
'sec-fetch-dest': 'empty',
|
54 |
-
'sec-fetch-mode': 'cors',
|
55 |
-
'sec-fetch-site': 'cross-site',
|
56 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
57 |
-
}
|
58 |
-
|
59 |
-
timestamp = Utils.format_timestamp(int(time.time() * 1000))
|
60 |
-
|
61 |
-
sig = {
|
62 |
-
'd': datetime.now().strftime('%Y-%m-%d'),
|
63 |
-
't': timestamp,
|
64 |
-
's': Utils.hash({
|
65 |
-
't': timestamp,
|
66 |
-
'm': messages[-1]['content']})}
|
67 |
-
|
68 |
-
json_data = json.dumps(separators=(',', ':'), obj={
|
69 |
-
'model': 'gpt-3.5-turbo',
|
70 |
-
'temperature': 0.6,
|
71 |
-
'stream': True,
|
72 |
-
'messages': messages} | sig)
|
73 |
-
|
74 |
-
response = requests.post('https://api.caipacity.com/v1/chat/completions',
|
75 |
-
headers=headers, data=json_data, stream=True)
|
76 |
-
|
77 |
-
for token in response.iter_lines():
|
78 |
-
if b'content' in token:
|
79 |
-
completion_chunk = json.loads(token.decode().replace('data: ', ''))
|
80 |
-
token = completion_chunk['choices'][0]['delta'].get('content')
|
81 |
-
if token != None:
|
82 |
-
yield token
|
83 |
-
|
84 |
-
|
85 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
86 |
-
'(%s)' % ', '.join(
|
87 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cvandi/remake/setup.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
from setuptools import find_packages, setup
|
4 |
-
|
5 |
-
import os
|
6 |
-
import subprocess
|
7 |
-
import time
|
8 |
-
|
9 |
-
version_file = 'realesrgan/version.py'
|
10 |
-
|
11 |
-
|
12 |
-
def readme():
|
13 |
-
with open('README.md', encoding='utf-8') as f:
|
14 |
-
content = f.read()
|
15 |
-
return content
|
16 |
-
|
17 |
-
|
18 |
-
def get_git_hash():
|
19 |
-
|
20 |
-
def _minimal_ext_cmd(cmd):
|
21 |
-
# construct minimal environment
|
22 |
-
env = {}
|
23 |
-
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
|
24 |
-
v = os.environ.get(k)
|
25 |
-
if v is not None:
|
26 |
-
env[k] = v
|
27 |
-
# LANGUAGE is used on win32
|
28 |
-
env['LANGUAGE'] = 'C'
|
29 |
-
env['LANG'] = 'C'
|
30 |
-
env['LC_ALL'] = 'C'
|
31 |
-
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
32 |
-
return out
|
33 |
-
|
34 |
-
try:
|
35 |
-
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
|
36 |
-
sha = out.strip().decode('ascii')
|
37 |
-
except OSError:
|
38 |
-
sha = 'unknown'
|
39 |
-
|
40 |
-
return sha
|
41 |
-
|
42 |
-
|
43 |
-
def get_hash():
|
44 |
-
if os.path.exists('.git'):
|
45 |
-
sha = get_git_hash()[:7]
|
46 |
-
else:
|
47 |
-
sha = 'unknown'
|
48 |
-
|
49 |
-
return sha
|
50 |
-
|
51 |
-
|
52 |
-
def write_version_py():
|
53 |
-
content = """# GENERATED VERSION FILE
|
54 |
-
# TIME: {}
|
55 |
-
__version__ = '{}'
|
56 |
-
__gitsha__ = '{}'
|
57 |
-
version_info = ({})
|
58 |
-
"""
|
59 |
-
sha = get_hash()
|
60 |
-
with open('VERSION', 'r') as f:
|
61 |
-
SHORT_VERSION = f.read().strip()
|
62 |
-
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
|
63 |
-
|
64 |
-
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
|
65 |
-
with open(version_file, 'w') as f:
|
66 |
-
f.write(version_file_str)
|
67 |
-
|
68 |
-
|
69 |
-
def get_version():
|
70 |
-
with open(version_file, 'r') as f:
|
71 |
-
exec(compile(f.read(), version_file, 'exec'))
|
72 |
-
return locals()['__version__']
|
73 |
-
|
74 |
-
|
75 |
-
def get_requirements(filename='requirements.txt'):
|
76 |
-
here = os.path.dirname(os.path.realpath(__file__))
|
77 |
-
with open(os.path.join(here, filename), 'r') as f:
|
78 |
-
requires = [line.replace('\n', '') for line in f.readlines()]
|
79 |
-
return requires
|
80 |
-
|
81 |
-
|
82 |
-
if __name__ == '__main__':
|
83 |
-
write_version_py()
|
84 |
-
setup(
|
85 |
-
name='realesrgan',
|
86 |
-
version=get_version(),
|
87 |
-
description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration',
|
88 |
-
long_description=readme(),
|
89 |
-
long_description_content_type='text/markdown',
|
90 |
-
author='Xintao Wang',
|
91 |
-
author_email='[email protected]',
|
92 |
-
keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan',
|
93 |
-
url='https://github.com/xinntao/Real-ESRGAN',
|
94 |
-
include_package_data=True,
|
95 |
-
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
|
96 |
-
classifiers=[
|
97 |
-
'Development Status :: 4 - Beta',
|
98 |
-
'License :: OSI Approved :: Apache Software License',
|
99 |
-
'Operating System :: OS Independent',
|
100 |
-
'Programming Language :: Python :: 3',
|
101 |
-
'Programming Language :: Python :: 3.7',
|
102 |
-
'Programming Language :: Python :: 3.8',
|
103 |
-
],
|
104 |
-
license='BSD-3-Clause License',
|
105 |
-
setup_requires=['cython', 'numpy'],
|
106 |
-
install_requires=get_requirements(),
|
107 |
-
zip_safe=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiofiles/ospath.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
"""Async executor versions of file functions from the os.path module."""
|
2 |
-
|
3 |
-
from .os import wrap
|
4 |
-
from os import path
|
5 |
-
|
6 |
-
exists = wrap(path.exists)
|
7 |
-
isfile = wrap(path.isfile)
|
8 |
-
isdir = wrap(path.isdir)
|
9 |
-
islink = wrap(path.islink)
|
10 |
-
getsize = wrap(path.getsize)
|
11 |
-
getmtime = wrap(path.getmtime)
|
12 |
-
getatime = wrap(path.getatime)
|
13 |
-
getctime = wrap(path.getctime)
|
14 |
-
samefile = wrap(path.samefile)
|
15 |
-
sameopenfile = wrap(path.sameopenfile)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Danielzero/GPT3.5/modules/shared.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST
|
2 |
-
import os
|
3 |
-
import queue
|
4 |
-
|
5 |
-
class State:
|
6 |
-
interrupted = False
|
7 |
-
multi_api_key = False
|
8 |
-
completion_url = COMPLETION_URL
|
9 |
-
balance_api_url = BALANCE_API_URL
|
10 |
-
usage_api_url = USAGE_API_URL
|
11 |
-
|
12 |
-
def interrupt(self):
|
13 |
-
self.interrupted = True
|
14 |
-
|
15 |
-
def recover(self):
|
16 |
-
self.interrupted = False
|
17 |
-
|
18 |
-
def set_api_host(self, api_host):
|
19 |
-
self.completion_url = f"https://{api_host}/v1/chat/completions"
|
20 |
-
self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants"
|
21 |
-
self.usage_api_url = f"https://{api_host}/dashboard/billing/usage"
|
22 |
-
os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1"
|
23 |
-
|
24 |
-
def reset_api_host(self):
|
25 |
-
self.completion_url = COMPLETION_URL
|
26 |
-
self.balance_api_url = BALANCE_API_URL
|
27 |
-
self.usage_api_url = USAGE_API_URL
|
28 |
-
os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1"
|
29 |
-
return API_HOST
|
30 |
-
|
31 |
-
def reset_all(self):
|
32 |
-
self.interrupted = False
|
33 |
-
self.completion_url = COMPLETION_URL
|
34 |
-
|
35 |
-
def set_api_key_queue(self, api_key_list):
|
36 |
-
self.multi_api_key = True
|
37 |
-
self.api_key_queue = queue.Queue()
|
38 |
-
for api_key in api_key_list:
|
39 |
-
self.api_key_queue.put(api_key)
|
40 |
-
|
41 |
-
def switching_api_key(self, func):
|
42 |
-
if not hasattr(self, "api_key_queue"):
|
43 |
-
return func
|
44 |
-
|
45 |
-
def wrapped(*args, **kwargs):
|
46 |
-
api_key = self.api_key_queue.get()
|
47 |
-
args[0].api_key = api_key
|
48 |
-
ret = func(*args, **kwargs)
|
49 |
-
self.api_key_queue.put(api_key)
|
50 |
-
return ret
|
51 |
-
|
52 |
-
return wrapped
|
53 |
-
|
54 |
-
|
55 |
-
state = State()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|