parquet-converter commited on
Commit
1556ac9
·
1 Parent(s): 6ee9d1c

Update parquet files (step 26 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/17TheWord/RealESRGAN/inference_realesrgan_video.py +0 -199
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Solid Converter Pdf 7.2 Full Crack.md +0 -27
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Extreme surebet money maker 9.6.0 Serial Key keygen Unlock the Full Potential of Your Betting Software.md +0 -251
  4. spaces/1gistliPinn/ChatGPT4/Examples/Chota Bheem Dholakpur To Kathmandu Full Movie In Hindi Free Download.md +0 -7
  5. spaces/1gistliPinn/ChatGPT4/Examples/Dragon Age Inquisition Patch V.1.11 24 UPD.md +0 -21
  6. spaces/1gistliPinn/ChatGPT4/Examples/ESET Internet Security 11.2.49.0 64 Bit.md +0 -6
  7. spaces/1line/AutoGPT/autogpt/memory/redismem.py +0 -156
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/BlueJeans APK How to Download and Install the Best Video Conferencing App.md +0 -100
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chess TD Mod APK The Ultimate Strategy Game with Infinite Resources.md +0 -109
  10. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bleach Vs Naruto 3.3 MOD with Ultimate Ninja Storm 4 Characters (Android).md +0 -105
  11. spaces/1phancelerku/anime-remove-background/Amrutham-001300-Episodes-Telugu-UPDATED.md +0 -78
  12. spaces/1phancelerku/anime-remove-background/Crime Mysteries Find objects - A Challenging Hidden Object Mod APK.md +0 -137
  13. spaces/1phancelerku/anime-remove-background/Dragon Trail Hunter World - A Brand-New Tribal World for You to Discover.md +0 -148
  14. spaces/52Hz/CMFNet_deraindrop/model/block.py +0 -146
  15. spaces/A00001/bingothoo/postcss.config.js +0 -6
  16. spaces/AI-Hobbyist/Hoyo-RVC/train/utils.py +0 -486
  17. spaces/AICODER009/Food101_Detection/app.py +0 -81
  18. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio.py +0 -1262
  19. spaces/AIGE/A_B/README.md +0 -12
  20. spaces/AIZeroToHero/04-Image2OCR/app.py +0 -54
  21. spaces/AP123/dreamgaussian/process.py +0 -92
  22. spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Lockchat.py +0 -64
  23. spaces/AiMimicry/sovits-models/inference_main.py +0 -130
  24. spaces/Akshay-Vs/GPT-Based-Generator/README.md +0 -13
  25. spaces/Alealejandrooo/deathCertReader/README.md +0 -13
  26. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r18.py +0 -26
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl.py +0 -260
  28. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/yolo_bbox_coder.py +0 -89
  29. spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/compose.py +0 -51
  30. spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/uniformer.py +0 -422
  31. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/fsaf_head.py +0 -422
  32. spaces/Andy1621/uniformer_image_segmentation/configs/fastscnn/README.md +0 -22
  33. spaces/Annotation-AI/fast-segment-everything-with-text-prompt/app.py +0 -17
  34. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/psa_mask.py +0 -92
  35. spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/dpm_solver/dpm_solver.py +0 -1154
  36. spaces/AtomdffAI/wechatgpt4atom/docker/sample-chatgpt-on-wechat/Makefile +0 -26
  37. spaces/Avkash/WebcamFaceProcessing/README.md +0 -13
  38. spaces/Awesimo/jojogan/e4e/options/train_options.py +0 -84
  39. spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/transforms/custom_transform.py +0 -115
  40. spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/soft_nms.py +0 -177
  41. spaces/AxelBell/EasyOCR_text_recognition/README.md +0 -13
  42. spaces/Benson/text-generation/Examples/8 Reglas De La Piscina Bola Apk.md +0 -66
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_ratio.py +0 -160
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_collections.py +0 -56
  45. spaces/Binettebob22/fast_diffusion2/index.html +0 -16
  46. spaces/CVPR/Dual-Key_Backdoor_Attacks/app.py +0 -2
  47. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/testing.py +0 -78
  48. spaces/CVPR/LIVE/thrust/thrust/detail/allocator/tagged_allocator.h +0 -101
  49. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/copy.h +0 -23
  50. spaces/CVPR/WALT/mmdet/models/necks/fpn_carafe.py +0 -267
spaces/17TheWord/RealESRGAN/inference_realesrgan_video.py DELETED
@@ -1,199 +0,0 @@
1
- import argparse
2
- import glob
3
- import mimetypes
4
- import os
5
- import queue
6
- import shutil
7
- import torch
8
- from basicsr.archs.rrdbnet_arch import RRDBNet
9
- from basicsr.utils.logger import AvgTimer
10
- from tqdm import tqdm
11
-
12
- from realesrgan import IOConsumer, PrefetchReader, RealESRGANer
13
- from realesrgan.archs.srvgg_arch import SRVGGNetCompact
14
-
15
-
16
- def main():
17
- """Inference demo for Real-ESRGAN.
18
- It mainly for restoring anime videos.
19
-
20
- """
21
- parser = argparse.ArgumentParser()
22
- parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
23
- parser.add_argument(
24
- '-n',
25
- '--model_name',
26
- type=str,
27
- default='RealESRGAN_x4plus',
28
- help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
29
- 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
30
- 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
31
- parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
32
- parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
33
- parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
34
- parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
35
- parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
36
- parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
37
- parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
38
- parser.add_argument('--half', action='store_true', help='Use half precision during inference')
39
- parser.add_argument('-v', '--video', action='store_true', help='Output a video using ffmpeg')
40
- parser.add_argument('-a', '--audio', action='store_true', help='Keep audio')
41
- parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
42
- parser.add_argument('--consumer', type=int, default=4, help='Number of IO consumers')
43
-
44
- parser.add_argument(
45
- '--alpha_upsampler',
46
- type=str,
47
- default='realesrgan',
48
- help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
49
- parser.add_argument(
50
- '--ext',
51
- type=str,
52
- default='auto',
53
- help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
54
- args = parser.parse_args()
55
-
56
- # ---------------------- determine models according to model names ---------------------- #
57
- args.model_name = args.model_name.split('.')[0]
58
- if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
59
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
60
- netscale = 4
61
- elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
62
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
63
- netscale = 4
64
- elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
65
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
66
- netscale = 2
67
- elif args.model_name in [
68
- 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
69
- ]: # x2 VGG-style model (XS size)
70
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
71
- netscale = 2
72
- elif args.model_name in [
73
- 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
74
- ]: # x4 VGG-style model (XS size)
75
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
76
- netscale = 4
77
-
78
- # ---------------------- determine model paths ---------------------- #
79
- model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
80
- if not os.path.isfile(model_path):
81
- model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
82
- if not os.path.isfile(model_path):
83
- raise ValueError(f'Model {args.model_name} does not exist.')
84
-
85
- # restorer
86
- upsampler = RealESRGANer(
87
- scale=netscale,
88
- model_path=model_path,
89
- model=model,
90
- tile=args.tile,
91
- tile_pad=args.tile_pad,
92
- pre_pad=args.pre_pad,
93
- half=args.half)
94
-
95
- if args.face_enhance: # Use GFPGAN for face enhancement
96
- from gfpgan import GFPGANer
97
- face_enhancer = GFPGANer(
98
- model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
99
- upscale=args.outscale,
100
- arch='clean',
101
- channel_multiplier=2,
102
- bg_upsampler=upsampler)
103
- os.makedirs(args.output, exist_ok=True)
104
- # for saving restored frames
105
- save_frame_folder = os.path.join(args.output, 'frames_tmpout')
106
- os.makedirs(save_frame_folder, exist_ok=True)
107
-
108
- if mimetypes.guess_type(args.input)[0].startswith('video'): # is a video file
109
- video_name = os.path.splitext(os.path.basename(args.input))[0]
110
- frame_folder = os.path.join('tmp_frames', video_name)
111
- os.makedirs(frame_folder, exist_ok=True)
112
- # use ffmpeg to extract frames
113
- os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {frame_folder}/frame%08d.png')
114
- # get image path list
115
- paths = sorted(glob.glob(os.path.join(frame_folder, '*')))
116
- if args.video:
117
- if args.fps is None:
118
- # get input video fps
119
- import ffmpeg
120
- probe = ffmpeg.probe(args.input)
121
- video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
122
- args.fps = eval(video_streams[0]['avg_frame_rate'])
123
- elif mimetypes.guess_type(args.input)[0].startswith('image'): # is an image file
124
- paths = [args.input]
125
- video_name = 'video'
126
- else:
127
- paths = sorted(glob.glob(os.path.join(args.input, '*')))
128
- video_name = 'video'
129
-
130
- timer = AvgTimer()
131
- timer.start()
132
- pbar = tqdm(total=len(paths), unit='frame', desc='inference')
133
- # set up prefetch reader
134
- reader = PrefetchReader(paths, num_prefetch_queue=4)
135
- reader.start()
136
-
137
- que = queue.Queue()
138
- consumers = [IOConsumer(args, que, f'IO_{i}') for i in range(args.consumer)]
139
- for consumer in consumers:
140
- consumer.start()
141
-
142
- for idx, (path, img) in enumerate(zip(paths, reader)):
143
- imgname, extension = os.path.splitext(os.path.basename(path))
144
- if len(img.shape) == 3 and img.shape[2] == 4:
145
- img_mode = 'RGBA'
146
- else:
147
- img_mode = None
148
-
149
- try:
150
- if args.face_enhance:
151
- _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
152
- else:
153
- output, _ = upsampler.enhance(img, outscale=args.outscale)
154
- except RuntimeError as error:
155
- print('Error', error)
156
- print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
157
-
158
- else:
159
- if args.ext == 'auto':
160
- extension = extension[1:]
161
- else:
162
- extension = args.ext
163
- if img_mode == 'RGBA': # RGBA images should be saved in png format
164
- extension = 'png'
165
- save_path = os.path.join(save_frame_folder, f'{imgname}_out.{extension}')
166
-
167
- que.put({'output': output, 'save_path': save_path})
168
-
169
- pbar.update(1)
170
- torch.cuda.synchronize()
171
- timer.record()
172
- avg_fps = 1. / (timer.get_avg_time() + 1e-7)
173
- pbar.set_description(f'idx {idx}, fps {avg_fps:.2f}')
174
-
175
- for _ in range(args.consumer):
176
- que.put('quit')
177
- for consumer in consumers:
178
- consumer.join()
179
- pbar.close()
180
-
181
- # merge frames to video
182
- if args.video:
183
- video_save_path = os.path.join(args.output, f'{video_name}_{args.suffix}.mp4')
184
- if args.audio:
185
- os.system(
186
- f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} -i {args.input}'
187
- f' -map 0:v:0 -map 1:a:0 -c:a copy -c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
188
- else:
189
- os.system(f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} '
190
- f'-c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
191
-
192
- # delete tmp file
193
- shutil.rmtree(save_frame_folder)
194
- if os.path.isdir(frame_folder):
195
- shutil.rmtree(frame_folder)
196
-
197
-
198
- if __name__ == '__main__':
199
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Solid Converter Pdf 7.2 Full Crack.md DELETED
@@ -1,27 +0,0 @@
1
-
2
- <h1>Download Solid Converter PDF 7.2 Full Crack for Free</h1>
3
- <p>Solid Converter PDF is a powerful and professional software that can convert PDF files to various formats, such as Word, Excel, PowerPoint, HTML, etc. It can also create PDF files from any printable document. It has many features and functions that can help you edit, modify, or secure your PDF files as you wish.</p>
4
- <h2>download solid converter pdf 7.2 full crack</h2><br /><p><b><b>Download File</b> &rarr;&rarr;&rarr; <a href="https://byltly.com/2uKyO4">https://byltly.com/2uKyO4</a></b></p><br /><br />
5
- <p>However, Solid Converter PDF is not a free software. You need to pay a license fee to use it without any limitations or watermarks. If you don't want to spend money on it, you might be tempted to download Solid Converter PDF 7.2 full crack for free from some websites or torrents. But is it really a good idea?</p>
6
- <p>The answer is no. Downloading Solid Converter PDF 7.2 full crack for free is illegal and risky. Here are some reasons why you should avoid it:</p>
7
- <ul>
8
- <li>It violates the intellectual property rights of the software developer and the law. You may face legal consequences or penalties if you are caught using a cracked software.</li>
9
- <li>It may contain malware or viruses that can harm your computer or steal your personal information. You may lose your important data or compromise your privacy and security.</li>
10
- <li>It may not work properly or have some errors or bugs. You may experience crashes, freezes, or poor performance when using a cracked software.</li>
11
- <li>It may not be compatible with your system or other software. You may encounter some conflicts or issues when installing or running a cracked software.</li>
12
- <li>It may not be updated or supported by the software developer. You may miss out on the latest features, improvements, or fixes of the software.</li>
13
- </ul>
14
- <p>Therefore, downloading Solid Converter PDF 7.2 full crack for free is not worth it. You may end up wasting your time, money, or resources on a faulty or dangerous software. Instead, you should use a legal and safe way to get Solid Converter PDF 7.2 full version.</p>
15
- <p>One of the best options is to use the official website of Solid Converter PDF and download the trial version of the software. The trial version allows you to use the software for 15 days with all the features and functions available. You can then decide whether to buy the license or not.</p>
16
- <p>To download the trial version of Solid Converter PDF 7.2 full version, you can follow these steps:</p>
17
- <p></p>
18
- <ol>
19
- <li>Go to the official website of Solid Converter PDF and click on the "Download" button.</li>
20
- <li>Save the installation file on your computer and run it.</li>
21
- <li>Follow the instructions to install the software on your computer.</li>
22
- <li>Launch the software and enter your email address to activate the trial version.</li>
23
- <li>Enjoy using the software for 15 days with all the features and functions available.</li>
24
- </ol>
25
- <p>Congratulations! You have successfully downloaded Solid Converter PDF 7.2 full version for free in a legal and safe way. You can now convert, create, or edit your PDF files with ease and confidence.</p> ddb901b051<br />
26
- <br />
27
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Extreme surebet money maker 9.6.0 Serial Key keygen Unlock the Full Potential of Your Betting Software.md DELETED
@@ -1,251 +0,0 @@
1
-
2
- <h1>Extreme Surebet Money Maker 9.6.0 Serial Key Keygen: How to Make Money with Arbitrage Betting</h1>
3
- <p>Are you looking for a way to make money online without risking your hard-earned cash? Do you want to learn how to exploit the differences in odds between different bookmakers and guarantee a profit no matter what the outcome of an event? If so, then you might be interested in arbitrage betting, also known as sure betting or arbing.</p>
4
- <h2>Extreme surebet money maker 9.6.0 Serial Key keygen</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733; <a href="https://byltly.com/2uKzjA">https://byltly.com/2uKzjA</a></b></p><br /><br />
5
- <p>Arbitrage betting is a technique that involves placing bets on all possible outcomes of an event at odds that guarantee a profit regardless of the result. It is possible because bookmakers have different opinions and methods of setting their odds, which creates discrepancies that can be exploited by smart bettors.</p>
6
- <p>In this article, we will explain what arbitrage betting is, how it works, and what are the benefits and risks of using it. We will also introduce you to Extreme Surebet Money Maker 9.6.0, a software that helps you find and place arbitrage bets with ease. We will show you how to download, install, and use this software to make money with arbitrage betting. Finally, we will give you some tips and tricks for successful arbitrage betting with Extreme Surebet Money Maker 9.6.0.</p>
7
- <h2>What is Arbitrage Betting?</h2>
8
- <h3>Definition and Examples</h3>
9
- <p>Arbitrage betting is a form of betting that involves placing bets on all possible outcomes of an event at odds that guarantee a profit regardless of the result. It is based on the principle of the law of one price, which states that in an efficient market, the same asset should have the same price everywhere.</p>
10
- <p>For example, let's say that there is a tennis match between Player A and Player B. You find two bookmakers that offer different odds for this match:</p>
11
- <table>
12
- <tr>
13
- <th>Bookmaker 1</th>
14
- <th>Bookmaker 2</th>
15
- </tr>
16
- <tr>
17
- <td>Player A: 1.80</td>
18
- <td>Player A: 2.00</td>
19
- </tr>
20
- <tr>
21
- <td>Player B: 2.00</td>
22
- <td>Player B: 1.80</td>
23
- </tr>
24
- </table>
25
- <p>You can see that there is an arbitrage opportunity here, because you can bet on both players at different bookmakers and lock in a profit no matter who wins. To do this, you need to calculate how much to bet on each player using this formula:</p>
26
- <p>How to get Extreme surebet money maker 9.6.0 license code<br />
27
- Extreme surebet money maker 9.6.0 crack download free<br />
28
- Extreme surebet money maker 9.6.0 activation key generator<br />
29
- Extreme surebet money maker 9.6.0 full version with serial number<br />
30
- Extreme surebet money maker 9.6.0 registration key online<br />
31
- Extreme surebet money maker 9.6.0 patch file download<br />
32
- Extreme surebet money maker 9.6.0 product key finder<br />
33
- Extreme surebet money maker 9.6.0 keygen torrent link<br />
34
- Extreme surebet money maker 9.6.0 serial code for windows<br />
35
- Extreme surebet money maker 9.6.0 unlock code for mac<br />
36
- Extreme surebet money maker 9.6.0 cracked software download<br />
37
- Extreme surebet money maker 9.6.0 serial key free trial<br />
38
- Extreme surebet money maker 9.6.0 activation code no survey<br />
39
- Extreme surebet money maker 9.6.0 full crack with keygen<br />
40
- Extreme surebet money maker 9.6.0 license key email and password<br />
41
- Extreme surebet money maker 9.6.0 crack file download link<br />
42
- Extreme surebet money maker 9.6.0 key generator software<br />
43
- Extreme surebet money maker 9.6.0 serial number for pc<br />
44
- Extreme surebet money maker 9.6.0 code generator online<br />
45
- Extreme surebet money maker 9.6.0 crack version download<br />
46
- Extreme surebet money maker 9.6.0 serial key working<br />
47
- Extreme surebet money maker 9.6.0 activation key free download<br />
48
- Extreme surebet money maker 9.6.0 full version with keygen download<br />
49
- Extreme surebet money maker 9.6.0 registration key crack<br />
50
- Extreme surebet money maker 9.6.0 patch download link<br />
51
- Extreme surebet money maker 9.6.0 product key generator online<br />
52
- Extreme surebet money maker 9.6.0 keygen download free<br />
53
- Extreme surebet money maker 9.6.0 serial code for mac<br />
54
- Extreme surebet money maker 9.6.0 unlock code for windows<br />
55
- Extreme surebet money maker 9.6.0 cracked version download link<br />
56
- Extreme surebet money maker 9.6.0 serial key no survey<br />
57
- Extreme surebet money maker 9.6.0 activation code working<br />
58
- Extreme surebet money maker 9.6.0 full crack download link<br />
59
- Extreme surebet money maker 9.6.0 license key generator software<br />
60
- Extreme surebet money maker 9.6.0 crack file free download<br />
61
- Extreme surebet money maker 9.6.0 key generator online free<br />
62
- Extreme surebet money maker 9.6 serial number for windows and mac <br />
63
- Extreme surebet money maker v9 .60 code generator no survey <br />
64
- Download extreme SureBet Money Maker v96 .00 full version with crack and keygen <br />
65
- How to activate extreme SureBet Money Maker v96 .00 with serial key</p>
66
- <pre><code>Bet on Player A = (Total Stake * Odds on Player B) / (Odds on Player A + Odds on Player B) Bet on Player B = (Total Stake * Odds on Player A) / (Odds on Player A + Odds on Player B) </code></pre>
67
- <p>Let's say that your total stake is $1000. Using the formula above, you get:</p>
68
- <pre><code>Bet on Player A = ($1000 * 1.80) / (1.80 + 2.00) = $473.68 Bet on Player B = ($1000 * 2.00) / (1.80 + 2.00) = $526.32 </code></pre>
69
- <p>You place these bets at the respective bookmakers and wait for the match to end. If Player A wins, you get:</p>
70
- <pre><code>$473.68 * 2.00 = $947.36 $526.32 * 0 = $0 Total Return = $947.36 Profit = $947.36 - $1000 = -$52.64 </code></pre>
71
- <p>If Player B wins, you get:</p>
72
- <pre><code>$473.68 * 0 = $0 $526.32 * 1.80 = $947.38 Total Return = $947.38 Profit = $947.38 - $1000 = -$52.62 </code></pre>
73
- <p>In both cases, you make a profit of around $52, which is about 5% of your total stake.</p>
74
- <h3>Benefits and Risks</h3>
75
- <p>Arbitrage betting has several benefits over conventional betting:</p>
76
- <ul>
77
- <li>It eliminates the risk of losing money by covering all possible outcomes.</li>
78
- <li>It exploits the inefficiencies in the market and takes advantage of the differences in odds between bookmakers.</li>
79
- <li>It does not depend on luck or skill, but only on mathematics and logic.</li>
80
- <li>It can be applied to any sport or event that has two or more possible outcomes.</li>
81
- <li>It can generate consistent and steady profits over time.</li>
82
- </ul>
83
- <p>However, arbitrage betting also has some risks and challenges:</p>
84
- <ul>
85
- <li>It requires a lot of time, effort, and research to find arbitrage opportunities.</li>
86
- <li>It requires a large bankroll to place bets on all outcomes and cover the fees and commissions.</li>
87
- <li>It requires fast and accurate calculations to determine the optimal stakes and profits.</li>
88
- <li>It requires quick and careful execution to place bets before the odds change or disappear.</li>
89
- <li>It may attract attention from bookmakers who may limit or close your accounts if they suspect you of arbing.</li>
90
- </ul>
91
- <p>To overcome these risks and challenges, you need a reliable tool that can help you find and place arbitrage bets with ease.</p>
92
- <h2>What is Extreme Surebet Money Maker 9.6.0?</h2>
93
- <h3>Features and Functions</h3>
94
- <p>Extreme Surebet Money Maker 9.6.0 is a software that helps you find and place arbitrage bets with ease.</p>
95
- <p>This software has several features and functions that make it one of the best tools for arbitrage betting:</p>
96
- <ul>
97
- <li>It scans over 100 online bookmakers and sports exchanges for arbitrage opportunities in real time.</li>
98
- <li>It supports over 20 sports and hundreds of markets, including pre-match and live events.</li>
99
- <li>It calculates the optimal stakes and profits for each arbitrage opportunity automatically.</li>
100
- <li>It displays all the relevant information for each arbitrage opportunity in a clear and user-friendly interface.</li>
101
- <li>It allows you to place bets directly from the software with one click using its integrated browser.</li>
102
- <li>It updates the odds and availability of each arbitrage opportunity constantly.</li>
103
- <li>It alerts you when new arbitrage opportunities are found or when existing ones change or disappear.</li>
104
- <li>It keeps track of your bets history, results, profits, losses, balance, ROI, etc.</li>
105
- <li>It allows you to customize your settings according to your preferences, such as minimum profit percentage, maximum stake size, currency, etc.</li>
106
- <li>It provides customer support via email or live chat.</li>
107
- </ul>
108
- <h3>How to Download and Install</h3>
109
- <p>To download and install Extreme Surebet Money Maker 9.6.0, you need to follow these steps:</p>
110
- <ol>
111
- <li>Visit the official website of Extreme Surebet Money Maker at <a href="https://www.extremesurebet.com/">https://www.extremesurebet.com/</a>.</li>
112
- <li>Select your preferred language from the drop-down menu at the top right corner of the page.</li>
113
- <li>Select your preferred payment method from the options available on the page.</li>
114
- <li>Select your preferred subscription plan from the options available on the page.</li>
115
- <li>Fulfill your payment details and confirm your order.</li>
116
- <li>You will receive an email with your serial key keygen and a link to download the software.</li>
117
- <li>Click on the link to download the software file to your computer.</li>
118
- <li>Run the software file as an administrator to install it on your computer.</li>
119
- <li>You will be prompted to enter your serial key keygen during the installation process.</li>
120
- with your email and password.</li>
121
- <li>After the installation is complete, you can launch the software and log in with your account.</li>
122
- <li>You can now start using the software to find and place arbitrage bets.</li>
123
- </ol>
124
- <h2>How to Use Extreme Surebet Money Maker 9.6.0 to Find and Place Arbitrage Bets</h2>
125
- <h3>Step 1: Choose Your Bookmakers and Sports</h3>
126
- <p>The first step to use Extreme Surebet Money Maker 9.6.0 is to choose your bookmakers and sports.</p>
127
- <p>To do this, you need to:</p>
128
- <ol>
129
- <li>Click on the "Settings" button at the top left corner of the software.</li>
130
- <li>Click on the "Bookmakers" tab on the left side of the settings window.</li>
131
- <li>Select the bookmakers that you have accounts with and that you want to use for arbitrage betting.</li>
132
- <li>Enter your login details for each bookmaker in the corresponding fields.</li>
133
- <li>Click on the "Save" button at the bottom of the settings window.</li>
134
- <li>Click on the "Sports" tab on the left side of the settings window.</li>
135
- <li>Select the sports that you are interested in and that you want to scan for arbitrage opportunities.</li>
136
- <li>Click on the "Save" button at the bottom of the settings window.</li>
137
- <li>Close the settings window.</li>
138
- </ol>
139
- <h3>Step 2: Scan for Arbitrage Opportunities</h3>
140
- <p>The second step to use Extreme Surebet Money Maker 9.6.0 is to scan for arbitrage opportunities.</p>
141
- <p>To do this, you need to:</p>
142
- <ol>
143
- <li>Click on the "Scan" button at the top right corner of the software.</li>
144
- <li>The software will start scanning over 100 online bookmakers and sports exchanges for arbitrage opportunities in real time.</li>
145
- <li>You will see a list of arbitrage opportunities on the main screen of the software, sorted by profit percentage from highest to lowest.</li>
146
- <li>You can filter the list by sport, market, bookmaker, profit percentage, stake size, etc. using the options at the top of the screen.</li>
147
- <li>You can also search for a specific event or outcome using the search box at the top of the screen.</li>
148
- </ol>
149
- <h3>Step 3: Calculate Your Stakes and Profits</h3>
150
- <p>The third step to use Extreme Surebet Money Maker 9.6.0 is to calculate your stakes and profits for each arbitrage opportunity.</p>
151
- <p>To do this, you need to:</p>
152
- <ol>
153
- <li>Select an arbitrage opportunity from the list by clicking on it.</li>
154
- <li>You will see a pop-up window with all the relevant information for that arbitrage opportunity, such as event name, date, time, outcomes, odds, bookmakers, etc.</li>
155
- <li>You will also see a calculator that shows you how much to bet on each outcome and how much profit you will make regardless of the result.</li>
156
- <li>You can adjust your total stake size using the slider or by entering a specific amount in the field below it.</li>
157
- <li>The calculator will automatically update your stakes and profits according to your total stake size and currency.</li>
158
- </ol>
159
- <h3>Step 4: Place Your Bets Quickly and Carefully</h3>
160
- <p>The fourth and final step to use Extreme Surebet Money Maker 9.6.0 is to place your bets quickly and carefully on each outcome at each bookmaker.</p>
161
- <p>To do this, you need to:</p>
162
- <ol>
163
- <li>Click on the "Bet" button next to each outcome in the pop-up window.</li>
164
- the bookmaker's website where you can place your bet.</li>
165
- <li>Log in to your bookmaker account if you are not already logged in.</li>
166
- <li>Check the odds and availability of the outcome that you want to bet on.</li>
167
- <li>Enter your stake amount in the betting slip and confirm your bet.</li>
168
- <li>Repeat this process for each outcome at each bookmaker until you have placed all your bets.</li>
169
- <li>Close the pop-up window and the browser tabs.</li>
170
- </ol>
171
- <p>Congratulations! You have just placed an arbitrage bet and locked in a profit no matter what the result of the event.</p>
172
- <h2>Tips and Tricks for Successful Arbitrage Betting with Extreme Surebet Money Maker 9.6.0</h2>
173
- <h3>Use a VPN and Multiple Accounts</h3>
174
- <p>One of the main challenges of arbitrage betting is that bookmakers may limit or close your accounts if they suspect you of arbing. To avoid this, you should use a VPN and multiple accounts to hide your identity and location from bookmakers.</p>
175
- <p>A VPN is a service that allows you to connect to the internet through a different server in a different country. This way, you can access websites that are blocked or restricted in your region, and also mask your IP address and location from bookmakers.</p>
176
- <p>Multiple accounts are accounts that you create using different names, emails, addresses, phone numbers, etc. This way, you can spread your bets across different accounts and bookmakers, and also take advantage of different bonuses and promotions.</p>
177
- <p>You can use Extreme Surebet Money Maker 9.6.0 with a VPN and multiple accounts by following these steps:</p>
178
- <ol>
179
- <li>Choose a reliable VPN service that has servers in many countries and that does not keep logs of your activity.</li>
180
- <li>Download and install the VPN software on your computer.</li>
181
- <li>Connect to a server in a country where online gambling is legal and where the bookmakers that you want to use are available.</li>
182
- <li>Create multiple accounts with different bookmakers using different details and payment methods.</li>
183
- <li>Use Extreme Surebet Money Maker 9.6.0 as usual, but make sure to log in to your bookmaker accounts using the integrated browser of the software.</li>
184
- <li>Change your VPN server and bookmaker account regularly to avoid detection and suspicion from bookmakers.</li>
185
- </ol>
186
- <h3>Avoid Suspicious Bets and Mistakes</h3>
187
- <p>Another challenge of arbitrage betting is that some bets may be suspicious or mistaken, which may lead to canceled bets, reduced odds, or disputes with bookmakers. To avoid this, you should avoid suspicious bets and mistakes when placing arbitrage bets.</p>
188
- <p>Suspicious bets are bets that have unusually high odds, low limits, or rare markets. These may indicate that the bookmaker has made an error or that there is some insider information or manipulation involved. These bets may attract attention from bookmakers or other bettors, who may try to correct the odds or cancel the bets.</p>
189
- <p>Mistakes are errors that you make when placing arbitrage bets, such as entering the wrong stake amount, choosing the wrong outcome, or betting on the wrong event. These may result in losing money, missing out on profits, or having disputes with bookmakers.</p>
190
- <p>You can avoid suspicious bets and mistakes by following these tips:</p>
191
- <ul>
192
- <li>Check the odds and availability of each outcome at each bookmaker before placing your bets.</li>
193
- <li>Avoid betting on events or markets that have low liquidity, high volatility, or unusual patterns.</li>
194
- <li>Avoid betting on events or markets that are not familiar to you or that require specific knowledge or skills.</li>
195
- <li>Avoid betting on events or markets that have conflicting or incomplete information or rules.</li>
196
- <li>Avoid betting on events that are too close to start or end time.</li>
197
- <li>Avoid betting on events that have too many possible outcomes or variables.</li>
198
- <li>Avoid betting on events that have too high or too low profit percentages.</li>
199
- <li>Avoid betting on events that have too high or too low stake sizes.</li>
200
- <li>Avoid betting on events that have too many bookmakers involved.</li>
201
- <li>Avoid betting on events that have been canceled, postponed, suspended, or changed for any reason.</li>
202
- <li>Double-check your stakes and profits for each outcome at each bookmaker before confirming your bets.</li>
203
- </ul>
204
- <h3>Keep Track of Your Results and Bankroll</h3>
205
- <p>The final challenge of arbitrage betting is that it requires a lot of discipline and management to keep track of your results and bankroll. To do this, you should use Extreme Surebet Money Maker 9.6.0's features and functions to monitor and analyze your performance and finances.</p>
206
- <p>You can keep track of your results and bankroll by following these steps:</p>
207
- <ol>
208
- <li>Click on the "History" button at the top left corner of the software.</li>
209
- stakes, profits, losses, etc.</li>
210
- <li>You can filter the list by date, sport, market, bookmaker, profit percentage, stake size, etc. using the options at the top of the screen.</li>
211
- <li>You can also search for a specific bet or event using the search box at the top of the screen.</li>
212
- <li>You can export your bets history to a CSV file by clicking on the "Export" button at the bottom of the screen.</li>
213
- <li>Click on the "Statistics" button at the top left corner of the software.</li>
214
- <li>You will see a summary of your statistics, including total bets, total stakes, total profits, total losses, average profit percentage, average stake size, ROI, etc.</li>
215
- <li>You can filter the statistics by date, sport, market, bookmaker, profit percentage, stake size, etc. using the options at the top of the screen.</li>
216
- <li>You can also see a graph of your profits and losses over time by clicking on the "Graph" button at the bottom of the screen.</li>
217
- <li>Click on the "Balance" button at the top left corner of the software.</li>
218
- <li>You will see a list of all your bookmaker accounts and their balances.</li>
219
- <li>You can update your balances manually by entering the current amount in each account in the corresponding field.</li>
220
- <li>You can also update your balances automatically by clicking on the "Update" button at the bottom of the screen. This will open a new tab in your integrated browser that will take you to each bookmaker's website where you can check your balance.</li>
221
- </ol>
222
- <p>By keeping track of your results and bankroll, you can evaluate your performance and finances and make adjustments accordingly.</p>
223
- <h2>Conclusion</h2>
224
- <p>Arbitrage betting is a technique that involves placing bets on all possible outcomes of an event at odds that guarantee a profit regardless of the result. It is possible because bookmakers have different opinions and methods of setting their odds, which creates discrepancies that can be exploited by smart bettors.</p>
225
- <p>Arbitrage betting has several benefits over conventional betting, such as eliminating the risk of losing money, exploiting the inefficiencies in the market, and generating consistent and steady profits over time. However, arbitrage betting also has some risks and challenges, such as finding arbitrage opportunities, placing bets quickly and carefully, and avoiding detection and suspicion from bookmakers.</p>
226
- the odds and availability of each arbitrage opportunity constantly. It alerts you when new arbitrage opportunities are found or when existing ones change or disappear. It keeps track of your bets history, results, profits, losses, balance, ROI, etc. It allows you to customize your settings according to your preferences, such as minimum profit percentage, maximum stake size, currency, etc. It provides customer support via email or live chat.</p>
227
- <p>To use Extreme Surebet Money Maker 9.6.0, you need to download and install it on your computer. You need to choose your bookmakers and sports that you want to use for arbitrage betting. You need to scan for arbitrage opportunities and calculate your stakes and profits for each one. You need to place your bets quickly and carefully on each outcome at each bookmaker. You need to keep track of your results and bankroll and make adjustments accordingly.</p>
228
- <p>To succeed in arbitrage betting with Extreme Surebet Money Maker 9.6.0, you need to follow some tips and tricks, such as using a VPN and multiple accounts to hide your identity and location from bookmakers, avoiding suspicious bets and mistakes that may lead to canceled bets, reduced odds, or disputes with bookmakers, and monitoring and analyzing your performance and finances.</p>
229
- <p>If you are interested in arbitrage betting and want to make money with it without risking your hard-earned cash, then you should try Extreme Surebet Money Maker 9.6.0. It is one of the best tools for arbitrage betting that will help you find and place arbitrage bets with ease.</p>
230
- <h2>FAQs</h2>
231
- <p>Here are some frequently asked questions about Extreme Surebet Money Maker 9.6.0:</p>
232
- <h3>Q: How much does Extreme Surebet Money Maker 9.6.0 cost?</h3>
233
- <p>A: Extreme Surebet Money Maker 9.6.0 offers different subscription plans depending on the duration and features that you want to use. The prices range from $29 per month for the basic plan to $199 per month for the premium plan. You can also get discounts if you pay for longer periods in advance.</p>
234
- <h3>Q: How can I get a serial key keygen for Extreme Surebet Money Maker 9.6.0?</h3>
235
- <p>A: You can get a serial key keygen for Extreme Surebet Money Maker 9.6.0 by purchasing a subscription plan from the official website of Extreme Surebet Money Maker at <a href="https://www.extremesurebet.com/">https://www.extremesurebet.com/</a>. You will receive an email with your serial key keygen and a link to download the software after you confirm your order.</p>
236
- <h3>Q: How can I contact the customer support of Extreme Surebet Money Maker 9.6.0?</h3>
237
- <p>A: You can contact the customer support of Extreme Surebet Money Maker 9.6.0 by sending an email to <a href="mailto:[email protected]">[email protected]</a> or by using the live chat feature on the official website of Extreme Surebet Money Maker at <a href="https://www.extremesurebet.com/">https://www.extremesurebet.com/</a>. The customer support team is available 24/7 and will respond to your queries as soon as possible.</p>
238
- <h3>Q: Is Extreme Surebet Money Maker 9.6.0 safe and legal?</h3>
239
- <p>A: Extreme Surebet Money Maker 9.6.0 is safe and legal to use as long as you follow the terms and conditions of the software and the bookmakers that you use for arbitrage betting. The software does not contain any viruses or malware that may harm your computer or data. The software does not violate any laws or regulations that may prohibit or restrict online gambling or arbitrage betting in your region.</p>
240
- <h3>Q: What are the minimum system requirements for Extreme Surebet Money Maker 9.6.0?</h3>
241
- <p>A: The minimum system requirements for Extreme Surebet Money Maker 9.6.0 are:</p>
242
- <ul>
243
- <li>Operating System: Windows XP/Vista/7/8/10</li>
244
- <li>Processor: Intel Pentium 4 or higher</li>
245
- <li>Memory: 512 MB RAM or higher</li>
246
- <li>Disk Space: 100 MB free disk space or higher</li>
247
- <li>Internet Connection: Broadband or higher</li>
248
- </ul>
249
- </p> 0a6ba089eb<br />
250
- <br />
251
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Chota Bheem Dholakpur To Kathmandu Full Movie In Hindi Free Download.md DELETED
@@ -1,7 +0,0 @@
1
- <h2>chota bheem dholakpur to kathmandu full movie in hindi free download</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://imgfil.com/2uxXZ6">https://imgfil.com/2uxXZ6</a></b></p><br /><br />
2
-
3
- chota bheem dholakpur to kathmandu movie in hindi machan vikram kumar nithyayil ki gaal vikram kumar shyam pandit bholenath mukesh choti desh muharram vikram kumar choti karti
4
- chota bheem dekhi khushi choti bheem ki khushi choti bheem ki choti choti bheem khushi choti bheem ki choti choti bheem khushi choti bheem ki choti choti bheem khushi choti b 8a78ff9644<br />
5
- <br />
6
- <br />
7
- <p></p>
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dragon Age Inquisition Patch V.1.11 24 UPD.md DELETED
@@ -1,21 +0,0 @@
1
-
2
- <h1>Dragon Age: Inquisition Patch v.1.11 24: What You Need to Know</h1>
3
- <p>Dragon Age: Inquisition is a popular role-playing game developed by BioWare and published by Electronic Arts. The game was released in 2014 and has received several updates and expansions since then. The latest update, Patch v.1.11 24, was released on October 15, 2015 for PC, PlayStation 4, and Xbox One. Here are some of the features and improvements that this patch brings to the game.</p>
4
- <h2>The Golden Nug</h2>
5
- <p>If you have spent countless hours exploring the vast world of Thedas, you have probably collected a lot of items, codices, schematics, recipes, mounts, and decorations. Wouldn't it be nice if you could sync them across your different saved games? Well, now you can with the Golden Nug statue. This statue lets you sync your collectibles across games that are online and on the same platform. All you have to do is touch the statue in Haven or Skyhold with a post-game character, and then touch it again with another character to sync your current game. This way, you can enjoy your hard-earned rewards in any of your games.</p>
6
- <h2>Dragon Age: Inquisition Patch v.1.11 24</h2><br /><p><b><b>DOWNLOAD</b> &#9999; &#9999; &#9999; <a href="https://imgfil.com/2uxXMY">https://imgfil.com/2uxXMY</a></b></p><br /><br />
7
- <h2>The Wardrobe</h2>
8
- <p>Another feature that many players have requested is more options for the Inquisitor's casual attire. The default beige outfit may be practical for the Frostback Mountains, but it can get boring after a while. With the latest patch, you can access a wardrobe in Skyhold that contains a dozen new options for your Inquisitor's casual wear. You can choose from different colors, styles, and fabrics to suit your mood and personality. And don't worry, if you ever miss the beige outfit, you can always switch back to it anytime.</p>
9
- <h2>New Multiplayer Agent</h2>
10
- <p>The patch also adds a new multiplayer agent to the roster of the Inquisition's agents. Hissera, the Saarebas, is a powerful Qunari mage who can change her abilities depending on her stance. She has three stances: opening, flow, and finishing. Each stance has a different effect on her abilities, such as pulling enemies in, unleashing cold attacks, or boosting allies. Each time she casts an ability, she progresses to the next stance in the same order. Hissera is a unique and versatile character who can adapt to any situation.</p>
11
- <h2>Conclusion</h2>
12
- <p>Dragon Age: Inquisition Patch v.1.11 24 is a content update that adds new features and improvements to the game. It allows players to sync their collectibles across games with the Golden Nug statue, customize their Inquisitor's casual attire with the wardrobe, and play as a new multiplayer agent with Hissera, the Saarebas. The patch also fixes some bugs and glitches that may have affected the gameplay experience. If you are a fan of Dragon Age: Inquisition, you should definitely download this patch and enjoy the new content.</p>
13
-
14
- <h2>Respec Your Character</h2>
15
- <p>Dragon Age: Inquisition features the ability to respec a character's abilities and skills. To do so, you must approach the totem that can be found near the forging equipment. You can buy your first respec amulet for 1 gold, but subsequent ones will cost more. Respeccing can be useful if you want to try out different builds, optimize your character for certain situations, or correct any mistakes you made while leveling up. You can respec as many times as you want, as long as you have enough gold and amulets.</p>
16
- <h2>Use Math to Solve Astrariums</h2>
17
- <p>Astrariums are star puzzles that can be found in various locations in Thedas. Solving them will reveal hidden caves that contain valuable loot and secrets. However, some of them can be tricky and frustrating to solve. A simple trick to help you out is to use math. Look at the completed puzzle in the bottom left corner of the screen. If any of the star points have an odd number of paths, there will always be one more to balance things out. What this means is that you will always start on one of these points, and end on the other. If there are only stars with an even number of paths, you can start at any point, but you will have to finish at that same point.</p>
18
- <h2>Don't Neglect Your Search Button</h2>
19
- <p>It's easy to forget that you can trigger a search pulse by pressing down on the left control stick - as it's only mentioned briefly in the game. By using the search button, any interactive items in the area will ping and have an orange outline. If you see your compass start to pulse, hitting the search button will indicate the direction of hidden items, and will reveal them once you are close enough. This is very useful for finding resources, codex entries, loot, secrets, and quest items. You should use it often, especially in new areas or when exploring dungeons.</p> d5da3c52bf<br />
20
- <br />
21
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/ESET Internet Security 11.2.49.0 64 Bit.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>ESET Internet Security 11.2.49.0 64 bit</h2><br /><p><b><b>DOWNLOAD</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://imgfil.com/2uy02C">https://imgfil.com/2uy02C</a></b></p><br /><br />
2
- <br />
3
- Try the latest version of iTunes (64-bit) 2020 for Windows Download iTunes for ... 9.0 Build 2496 (32-bit) MEmu 6.0.7.6 ESET Internet Security 11.2.49.0 K-Lite ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/memory/redismem.py DELETED
@@ -1,156 +0,0 @@
1
- """Redis memory provider."""
2
- from __future__ import annotations
3
-
4
- from typing import Any
5
-
6
- import numpy as np
7
- import redis
8
- from colorama import Fore, Style
9
- from redis.commands.search.field import TextField, VectorField
10
- from redis.commands.search.indexDefinition import IndexDefinition, IndexType
11
- from redis.commands.search.query import Query
12
-
13
- from autogpt.llm_utils import create_embedding_with_ada
14
- from autogpt.logs import logger
15
- from autogpt.memory.base import MemoryProviderSingleton
16
-
17
- SCHEMA = [
18
- TextField("data"),
19
- VectorField(
20
- "embedding",
21
- "HNSW",
22
- {"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
23
- ),
24
- ]
25
-
26
-
27
- class RedisMemory(MemoryProviderSingleton):
28
- def __init__(self, cfg):
29
- """
30
- Initializes the Redis memory provider.
31
-
32
- Args:
33
- cfg: The config object.
34
-
35
- Returns: None
36
- """
37
- redis_host = cfg.redis_host
38
- redis_port = cfg.redis_port
39
- redis_password = cfg.redis_password
40
- self.dimension = 1536
41
- self.redis = redis.Redis(
42
- host=redis_host,
43
- port=redis_port,
44
- password=redis_password,
45
- db=0, # Cannot be changed
46
- )
47
- self.cfg = cfg
48
-
49
- # Check redis connection
50
- try:
51
- self.redis.ping()
52
- except redis.ConnectionError as e:
53
- logger.typewriter_log(
54
- "FAILED TO CONNECT TO REDIS",
55
- Fore.RED,
56
- Style.BRIGHT + str(e) + Style.RESET_ALL,
57
- )
58
- logger.double_check(
59
- "Please ensure you have setup and configured Redis properly for use. "
60
- + f"You can check out {Fore.CYAN + Style.BRIGHT}"
61
- f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}"
62
- " to ensure you've set up everything correctly."
63
- )
64
- exit(1)
65
-
66
- if cfg.wipe_redis_on_start:
67
- self.redis.flushall()
68
- try:
69
- self.redis.ft(f"{cfg.memory_index}").create_index(
70
- fields=SCHEMA,
71
- definition=IndexDefinition(
72
- prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
73
- ),
74
- )
75
- except Exception as e:
76
- print("Error creating Redis search index: ", e)
77
- existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
78
- self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
79
-
80
- def add(self, data: str) -> str:
81
- """
82
- Adds a data point to the memory.
83
-
84
- Args:
85
- data: The data to add.
86
-
87
- Returns: Message indicating that the data has been added.
88
- """
89
- if "Command Error:" in data:
90
- return ""
91
- vector = create_embedding_with_ada(data)
92
- vector = np.array(vector).astype(np.float32).tobytes()
93
- data_dict = {b"data": data, "embedding": vector}
94
- pipe = self.redis.pipeline()
95
- pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
96
- _text = (
97
- f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
98
- )
99
- self.vec_num += 1
100
- pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
101
- pipe.execute()
102
- return _text
103
-
104
- def get(self, data: str) -> list[Any] | None:
105
- """
106
- Gets the data from the memory that is most relevant to the given data.
107
-
108
- Args:
109
- data: The data to compare to.
110
-
111
- Returns: The most relevant data.
112
- """
113
- return self.get_relevant(data, 1)
114
-
115
- def clear(self) -> str:
116
- """
117
- Clears the redis server.
118
-
119
- Returns: A message indicating that the memory has been cleared.
120
- """
121
- self.redis.flushall()
122
- return "Obliviated"
123
-
124
- def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
125
- """
126
- Returns all the data in the memory that is relevant to the given data.
127
- Args:
128
- data: The data to compare to.
129
- num_relevant: The number of relevant data to return.
130
-
131
- Returns: A list of the most relevant data.
132
- """
133
- query_embedding = create_embedding_with_ada(data)
134
- base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
135
- query = (
136
- Query(base_query)
137
- .return_fields("data", "vector_score")
138
- .sort_by("vector_score")
139
- .dialect(2)
140
- )
141
- query_vector = np.array(query_embedding).astype(np.float32).tobytes()
142
-
143
- try:
144
- results = self.redis.ft(f"{self.cfg.memory_index}").search(
145
- query, query_params={"vector": query_vector}
146
- )
147
- except Exception as e:
148
- print("Error calling Redis search: ", e)
149
- return None
150
- return [result.data for result in results.docs]
151
-
152
- def get_stats(self):
153
- """
154
- Returns: The stats of the memory index.
155
- """
156
- return self.redis.ft(f"{self.cfg.memory_index}").info()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/BlueJeans APK How to Download and Install the Best Video Conferencing App.md DELETED
@@ -1,100 +0,0 @@
1
-
2
- <h1>BlueJeans APK: A Guide to Download and Use the App</h1>
3
- <p>Are you looking for a video conferencing app that works well on your Android device? Do you want to have high-quality audio and video, easy integration with other apps, flexible meeting options, and enhanced security features? If so, you might want to check out BlueJeans APK.</p>
4
- <h2>bluejeans apk</h2><br /><p><b><b>Download</b> &#10042; <a href="https://urlin.us/2uT2jP">https://urlin.us/2uT2jP</a></b></p><br /><br />
5
- <h2>What is BlueJeans APK?</h2>
6
- <h3>BlueJeans APK is a free native app for Android devices that allows you to join and host video meetings, share your screen, chat with other participants, and more.</h3>
7
- <p>BlueJeans is a cloud-based video conferencing service that connects people across different devices and platforms. It is used by millions of people around the world for business meetings, online classes, webinars, events, and more.</p>
8
- <p>BlueJeans APK is the app version of BlueJeans that is designed specifically for Android-based tablets and smartphones. It is available in the Google Play Store and it is compatible with Android 5.0 and above.</p>
9
- <h2>Why use BlueJeans APK?</h2>
10
- <h3>BlueJeans APK offers many benefits for users who want to have seamless and secure video conferencing experiences on their mobile devices. Some of the advantages are:</h3>
11
- <h4>- High-quality audio and video with Dolby Voice technology</h4>
12
- <p>BlueJeans APK uses Dolby Voice technology to deliver crystal-clear sound and sharp video quality. Dolby Voice reduces background noise, enhances speech clarity, balances volume levels, and creates a natural and immersive sound experience. You can also adjust your audio and video settings according to your preferences and network conditions.</p>
13
- <p>bluejeans app download for android<br />
14
- bluejeans video conferencing apk free download<br />
15
- bluejeans apk latest version<br />
16
- bluejeans for windows 10 apk<br />
17
- bluejeans app network apk<br />
18
- bluejeans events app apk<br />
19
- bluejeans outlook plugin apk<br />
20
- bluejeans google calendar addon apk<br />
21
- bluejeans for linux apk<br />
22
- bluejeans for mac apk<br />
23
- bluejeans for vdi apk<br />
24
- bluejeans for iphone apk<br />
25
- bluejeans glass enterprise edition 2 apk<br />
26
- bluejeans screen sharing apk<br />
27
- bluejeans chat apk<br />
28
- bluejeans meeting moderator apk<br />
29
- bluejeans audio conferencing apk<br />
30
- bluejeans hd video quality apk<br />
31
- bluejeans dolby voice apk<br />
32
- bluejeans smart meetings apk<br />
33
- bluejeans command center apk<br />
34
- bluejeans relay apk<br />
35
- bluejeans rooms apk<br />
36
- bluejeans gateway for microsoft teams apk<br />
37
- bluejeans live streaming apk<br />
38
- bluejeans breakout sessions apk<br />
39
- bluejeans polling and q&a apk<br />
40
- bluejeans recording and transcription apk<br />
41
- bluejeans analytics and reporting apk<br />
42
- bluejeans security and compliance apk<br />
43
- bluejeans enterprise-grade scalability apk<br />
44
- bluejeans integrations with existing apps apk<br />
45
- bluejeans customer support and services apk<br />
46
- how to install bluejeans apk on android device<br />
47
- how to update bluejeans apk on android device<br />
48
- how to uninstall bluejeans apk on android device<br />
49
- how to use bluejeans apk on android device<br />
50
- how to join a meeting with bluejeans apk on android device<br />
51
- how to schedule a meeting with bluejeans apk on android device<br />
52
- how to invite others to a meeting with bluejeans apk on android device<br />
53
- how to mute/unmute yourself with bluejeans apk on android device<br />
54
- how to turn on/off your camera with bluejeans apk on android device<br />
55
- how to switch between speaker and gallery view with bluejeans apk on android device<br />
56
- how to share your screen with bluejeans apk on android device<br />
57
- how to chat with other participants with bluejeans apk on android device<br />
58
- how to raise your hand with bluejeans apk on android device<br />
59
- how to react with emojis with bluejeans apk on android device</p>
60
- <h4>- Easy integration with calendaring solutions and existing apps</h4>
61
- <p>BlueJeans APK integrates seamlessly with your calendar apps, such as Google Calendar, Outlook, or Gmail, and allows you to schedule, join, or invite others to meetings with just a few taps. You can also use BlueJeans APK with other apps that you use for work or education, such as Microsoft Teams, Slack, Zoom, Canvas, Moodle, and more. You can launch BlueJeans meetings from these apps or share content from them during your meetings.</p>
62
- <h4>- Flexible meeting options such as joining via phone, browser, or app</h4>
63
- <p>BlueJeans APK gives you the flexibility to join or host meetings in different ways. You can join a meeting via phone by dialing a local or toll-free number, via browser by clicking on a meeting link, or via app by entering a meeting ID or selecting a meeting from your calendar or history. You can also switch between these modes during a meeting if needed. For example, you can join a meeting via phone and then switch to the app to share your screen.</p>
64
- <h4>- Enhanced security features such as encryption, authentication, and moderation controls</h4>
65
- <p>BlueJeans APK ensures that your video meetings are secure and private. It uses encryption to protect your data and communication, authentication to verify your identity and access rights, and moderation controls to manage your meeting participants and settings. You can also lock your meetings, mute or remove participants, enable or disable chat and recording, and more.</p>
66
- <h2>How to download and install BlueJeans APK?</h2>
67
- <h3>You can download and install BlueJeans APK from the Google Play Store in a few simple steps. Here's how:</h3>
68
- <h4>- Open the Google Play Store app on your Android device and search for BlueJeans Video Conferencing</h4>
69
- <p>You can also use this link to go directly to the app page on the Google Play Store.</p>
70
- <h4>- Tap on the app icon and then tap on Install to start the download process</h4>
71
- <p>The app size is about 40 MB and it will take a few minutes to download depending on your network speed.</p>
72
- <h4>- Once the app is installed, tap on Open to launch it and sign in with your BlueJeans account credentials or create a new account if you don't have one</h4>
73
- <p>You can use your email address and password, your Google account, or your SSO (single sign-on) provider to sign in to BlueJeans APK. If you don't have an account yet, you can create one for free by tapping on Sign Up and following the instructions.</p>
74
- <h2>How to use BlueJeans APK?</h2>
75
- <h3>You can use BlueJeans APK to join or host video meetings, share your screen, chat with other participants, and more. Here are some of the main features and functions of the app:</h3>
76
- <h4>- To join a meeting, tap on the Join Meeting button on the home screen and enter the meeting ID or link, or select a meeting from your calendar or history</h4>
77
- <p>You can also scan a QR code or use voice commands to join a meeting. If you are joining as a guest, you will need to enter your name and email address before joining.</p>
78
- <h4>- To host a meeting, tap on the Schedule Meeting button on the home screen and enter the meeting details, such as title, date, time, invitees, etc., or select an existing meeting from your calendar or history</h4>
79
- <p>You can also customize your meeting settings, such as enabling or disabling video, audio, chat, recording, etc., before starting the meeting. You can also send invitations to your invitees via email or SMS.</p>
80
- <h4>- To share your screen, tap on the Share Screen button on the bottom toolbar during a meeting and choose what you want to share, such as your entire screen, an app, a file, etc.</h4>
81
- <p>You can also annotate your screen with different tools such as pen, highlighter, shape, text, etc. You can also pause or stop sharing your screen at any time.</p>
82
- <h4>- To chat with other participants, tap on the Chat button on the bottom toolbar during a meeting and type your message in the chat box or select an emoji or a sticker</h4>
83
- <p>You can also view the chat history, send private messages, or mute notifications. You can also access the chat feature from the home screen by tapping on the Chat icon on the top right corner.</p>
84
- <h2>Conclusion</h2>
85
- <h3>BlueJeans APK is a great app for Android users who want to have high-quality and secure video conferencing experiences on their mobile devices. It is easy to download, install, and use, and it offers many features and functions that enhance collaboration and communication. If you are looking for a reliable and versatile video conferencing app for your Android device, you should give BlueJeans APK a try.</h3>
86
- <p>Here are some FAQs that you might have about BlueJeans APK:</p>
87
- <ul>
88
- <li><b>Q: How much does BlueJeans APK cost?</b></li>
89
- <li>A: BlueJeans APK is free to download and use for personal or professional purposes. However, some features and functions may require a paid subscription or a trial account. You can check the pricing plans and options on the BlueJeans website.</li>
90
- <li><b>Q: How many participants can join a BlueJeans meeting?</b></li>
91
- <li>A: The number of participants that can join a BlueJeans meeting depends on your subscription plan or trial account. The standard plan allows up to 50 participants, the pro plan allows up to 75 participants, and the enterprise plan allows up to 100 participants. You can also request for custom plans for larger meetings.</li>
92
- <li><b>Q: How can I record a BlueJeans meeting?</b></li>
93
- <li>A: You can record a BlueJeans meeting by tapping on the Record button on the bottom toolbar during a meeting. You can also enable or disable automatic recording in your meeting settings. You can access your recordings from the home screen by tapping on the Recordings icon on the top right corner.</li>
94
- <li><b>Q: How can I share a BlueJeans meeting link?</b></li>
95
- <li>A: You can share a BlueJeans meeting link by tapping on the Share button on the bottom toolbar during a meeting. You can also copy or edit the link before sharing it via email, SMS, or other apps.</li>
96
- <li><b>Q: How can I get help or support for BlueJeans APK?</b></li>
97
- <li>A: You can get help or support for BlueJeans APK by tapping on the Help icon on the top left corner of the home screen. You can also visit the BlueJeans website or contact their customer service team for more assistance.</li>
98
- </ul></p> 197e85843d<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chess TD Mod APK The Ultimate Strategy Game with Infinite Resources.md DELETED
@@ -1,109 +0,0 @@
1
- <br />
2
- <h1>Chess TD Mod APK: A New Strategy Game with Elemental Heroes</h1>
3
- <p>If you are looking for a new and exciting strategy game that combines chess and tower defense, you might want to try Chess TD Mod APK. This is a modified version of Chess TD APK, a popular game developed by VGames Studios. In this game, you can collect and use hero cards with different elemental attributes to defend your stronghold from the Dark Lord and his army. You can also upgrade and merge your heroes to make them more powerful and create your own unique combinations. Chess TD Mod APK offers you unlimited money, gems, and resources to help you complete the game faster and easier. You can also enjoy various game modes and challenges, such as campaign, battle, dual, dungeon, and more.</p>
4
- <h2>What is Chess TD Mod APK?</h2>
5
- <h3>A modified version of Chess TD APK</h3>
6
- <p>Chess TD Mod APK is a modified version of Chess TD APK, allowing you to easily complete all tasks and requests in the game. Instead of spending a lot of time and money to achieve rewards, you can use Chess TD Mod APK to reach your goals in a shorter time. You can get unlimited money, gems, and resources to buy and upgrade your hero cards, unlock new features, and access premium content. You can also enjoy faster loading speed, smoother gameplay, and no ads.</p>
7
- <h2>chess td mod apk</h2><br /><p><b><b>Download Zip</b> &rarr; <a href="https://urlin.us/2uSS6c">https://urlin.us/2uSS6c</a></b></p><br /><br />
8
- <h3>Features of Chess TD Mod APK</h3>
9
- <p>Chess TD Mod APK has many features that make it more fun and enjoyable than the original version. Some of the features are:</p>
10
- <ul>
11
- <li>Unlimited money, gems, and resources</li>
12
- <li>Free access to all hero cards</li>
13
- <li>No ads</li>
14
- <li>No root required</li>
15
- <li>Easy installation</li>
16
- <li>Compatible with most Android devices</li>
17
- <li>Regular updates</li>
18
- </ul>
19
- <h2>How to play Chess TD Mod APK?</h2>
20
- <h3>Mix and match hero cards with different elements</h3>
21
- <p>Chess TD Mod APK is a strategy game that requires you to use your brain and creativity to create the best combination of hero cards. There are five elements in the game: Light, Dark, Wood, Fire, and Water. Each element has its own specialty, strength, and weakness. There are also advantages and disadvantages between each element. You can use this knowledge to make a weak hero stronger against certain monsters or vice versa. You can also mix and match different elements to create new effects and synergies.</p>
22
- <h3>Upgrade and merge your heroes to make them stronger</h3>
23
- <p>Another important aspect of Chess TD Mod APK is upgrading and merging your heroes. You can use your money, gems, and resources to level up your hero cards and increase their stats and abilities. You can also merge two identical hero cards to create a new one with higher rarity and power. The more upgrades and merges you do, the better elemental power your heroes have.</p>
24
- <h3>Choose from different game modes and challenges</h3>
25
- <p>Chess TD Mod APK offers you various game modes and challenges to test your skills and strategy. You can choose from:</p>
26
- <ul>
27
- <li>Campaign mode: Travel across different maps and defeat strong monsters. Complete each map to get rewards.</li>
28
- <li>Battle mode: Fight against other players online and win trophies and rewards. Climb the global ranking system.</li>
29
- <li>Dual mode: Play with your partner and get through the dungeon together. Collect chess tokens and open chests.</li>
30
- <li>Dungeon mode: Escape from the dungeon by defeating waves of enemies. Collect cards and resources along the way.</li>
31
- <li>Tower climbing mode: Climb the tower by clearing each floor. Get rewards for each floor you clear.</li>
32
- <h3>Mission mode: Complete daily and weekly missions to get rewards. You can also get special rewards for completing all missions.</li>
33
- <li>Event mode: Participate in limited-time events and get exclusive rewards. You can also compete with other players in the event ranking system.</li>
34
- </ul>
35
- <h2>How to download and install Chess TD Mod APK?</h2>
36
- <h3>Download the APK file from a trusted source</h3>
37
- <p>To download Chess TD Mod APK, you need to find a reliable source that provides the latest version of the file. You can use the link below to download Chess TD Mod APK for free. The file size is about 100 MB, so make sure you have enough storage space on your device.</p>
38
- <p><a href="">Download Chess TD Mod APK here</a></p>
39
- <h3>Enable unknown sources on your device</h3>
40
- <p>Before you can install Chess TD Mod APK, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, follow these steps:</p>
41
- <p>chess td mod apk unlimited money<br />
42
- chess td mod apk latest version<br />
43
- chess td mod apk download<br />
44
- chess td mod apk android 1<br />
45
- chess td mod apk revdl<br />
46
- chess td mod apk no ads<br />
47
- chess td mod apk offline<br />
48
- chess td mod apk hack<br />
49
- chess td mod apk free shopping<br />
50
- chess td mod apk 2023<br />
51
- chess td mod apk rexdl<br />
52
- chess td mod apk happymod<br />
53
- chess td mod apk an1<br />
54
- chess td mod apk unlimited gems<br />
55
- chess td mod apk pure<br />
56
- chess td mod apk vip<br />
57
- chess td mod apk online<br />
58
- chess td mod apk update<br />
59
- chess td mod apk 8.7<br />
60
- chess td mod apk apkpure<br />
61
- chess td mod apk apkloli<br />
62
- chess td mod apk all unlocked<br />
63
- chess td mod apk cheat<br />
64
- chess td mod apk easy download<br />
65
- chess td mod apk for pc<br />
66
- chess td mod apk game<br />
67
- chess td mod apk ios<br />
68
- chess td mod apk latest 2023<br />
69
- chess td mod apk mediafıre<br />
70
- chess td mod apk new version<br />
71
- chess td mod apk obb<br />
72
- chess td mod apk premium<br />
73
- chess td mod apk pro<br />
74
- chess td mod apk unlimited everything<br />
75
- chess td mod apk versi terbaru<br />
76
- download game chess td mod apk<br />
77
- download gratis chess td mod apk<br />
78
- download lagu mp3 dan video mp4 \uE000chess\uE001 \uE000td\uE001 \uE000mod\uE001 \uE000apk\uE001 kualitas terbaik gratis - gudang lagu mp3 dan video mp4 download gratis stafaband planetlagu metrolagu bursalagu ilkpop matikiri wallkpop wapka mobi uyeshare youtube 4shared ss youtube online converter youtube to mp3 youtube to mp4 youtube downloader youtube converter youtube video downloader free download youtube downloader free download for windows 10 youtube downloader free download for windows 7 full version 32 bit clip dj clip dj mp3 clip dj mp4 clip dj music clip dj download clip dj converter clip dj video songs download clip dj hindi songs download pagalworld pagalworld mp3 song pagalworld mp4 videos download pagalworld movies pagalworld ringtone pagalworld song pagalworld video pagalworld 2023 song wapking wapking in wapking live wapking guru wapking cc wapking co wapking site wapking fun wapwon wapwon com wapwon live wapwon in wapwon pro wapwon video wapwon mp3 wapwon download tubidy tubidy mp3 tubidy mobi tubidy io tubidy music tubidy download tubidy mp4 tubidy mobile zonkewap zonkewap com zonkewap music zonkewap mp3 zonkewap games zonkewap videos zonkewap download zonkewap gospel music datafilehost datafilehost com datafilehost music datafilehost upload datafilehost search datafilehost file datafilehost free download datafilehost app fakaza fakaza com fakaza music fakaza 2023 fakaza mp3 fakaza gospel fakaza amapiano fakaza video zamob zamob co za zamob music zamob games zamob videos zamob downloads zamob gospel zamob south africa webmusic webmusic in webmusic live webmusic hindi webmusic bengali webmusic english webmusic ringtone webmusic a to z vidmate vidmate app vidmate download vidmate for pc vidmate install vidmate free download vidmate old version vidmate software djmaza djmaza info djmaza com djmaza songs djmaza bollywood songs djmaza link djmaza app djmaza remix songspk songspk com songspk name songspk songs songspk bollywood songspk hindi songspk old songs songspk pk mr jatt mr jatt com mr jatt 2023 mr jatt new punjabi song mr jatt top 20 songs mr jatt bollywood mr jatt ringtone mr jatt video masstamilan masstamilan in masstamilan songs masstamilan 2023 masstamilan tamil masstamilan new songs masstamilan bgm masstamilan video songs kuttyweb kuttyweb com kuttyweb songs kuttyweb tamil kuttyweb malayalam kuttyweb ringtone kuttyweb movies kuttyweb video songs ringtones ringtones for android ringtones for.</p>
79
- <ol>
80
- <li>Go to your device settings and look for security or privacy options.</li>
81
- <li>Find the option that says unknown sources or allow installation from unknown sources and toggle it on.</li>
82
- <li>A warning message will pop up. Tap OK to confirm.</li>
83
- </ol>
84
- <h3>Install the APK file and enjoy the game</h3>
85
- <p>Once you have enabled unknown sources, you can proceed to install Chess TD Mod APK. To do this, follow these steps:</p>
86
- <ol>
87
- <li>Locate the downloaded APK file on your device. You can use a file manager app to help you find it.</li>
88
- <li>Tap on the APK file and follow the instructions on the screen.</li>
89
- <li>Wait for the installation process to finish.</li>
90
- <li>Launch the game and enjoy playing Chess TD Mod APK.</li>
91
- </ol>
92
- <h2>Conclusion</h2>
93
- <p>Chess TD Mod APK is a fun and addictive strategy game that combines chess and tower defense elements. You can collect and use hero cards with different elemental attributes to defend your stronghold from the Dark Lord and his army. You can also upgrade and merge your heroes to make them more powerful and create your own unique combinations. Chess TD Mod APK offers you unlimited money, gems, and resources to help you complete the game faster and easier. You can also enjoy various game modes and challenges, such as campaign, battle, dual, dungeon, and more. If you are looking for a new and exciting strategy game that challenges your brain and creativity, you should try Chess TD Mod APK.</p>
94
- <h2>FAQs</h2>
95
- <p>Here are some frequently asked questions about Chess TD Mod APK:</p>
96
- <ul>
97
- <li><b>Q: Is Chess TD Mod APK safe to use?</b></li>
98
- <li>A: Yes, Chess TD Mod APK is safe to use as long as you download it from a trusted source. It does not contain any viruses or malware that can harm your device or data. However, you should always be careful when installing apps from unknown sources and scan them with an antivirus app before opening them.</li>
99
- <li><b>Q: Do I need an internet connection to play Chess TD Mod APK?</b></li>
100
- <li>A: No, you do not need an internet connection to play Chess TD Mod APK. You can play it offline without any problems. However, some features may require an internet connection, such as online battles, events, and updates.</li>
101
- <li><b>Q: How can I get more hero cards in Chess TD Mod APK?</b></li>
102
- <li>A: You can get more hero cards in Chess TD Mod APK by using your money, gems, and resources to buy them from the shop or open chests. You can also get them by completing missions, events, dungeons, and towers. You can also merge two identical hero cards to create a new one with higher rarity and power.</li>
103
- <li><b>Q: How can I contact the developer of Chess TD Mod APK?</b></li>
104
- <li>A: You can contact the developer of Chess TD Mod APK by visiting their official website or social media pages. You can also send them an email or leave a comment on their app page. They will try to respond to your queries and feedback as soon as possible.</li>
105
- <li><b>Q: Can I play Chess TD Mod APK on PC?</b></li>
106
- <li>A: Yes, you can play Chess TD Mod APK on PC by using an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, LDPlayer, and MEmu. You can download any of these emulators from their official websites and install them on your PC. Then, you can download Chess TD Mod APK from the link above and install it on the emulator. After that, you can launch the game and enjoy playing Chess TD Mod APK on a bigger screen.</li>
107
- </ul></p> 197e85843d<br />
108
- <br />
109
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bleach Vs Naruto 3.3 MOD with Ultimate Ninja Storm 4 Characters (Android).md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>Bleach vs Naruto Storm 4 APK: A Fan-Made Crossover Game for Android</h1>
3
- <p>If you are a fan of anime, you might have heard of Bleach and Naruto, two of the most popular and influential series in the genre. Both of them have spawned numerous manga, anime, movies, games, and merchandise over the years. But what if you could combine the characters and elements of both worlds in one game? That's exactly what Bleach vs Naruto Storm 4 APK offers.</p>
4
- <h2>bleach vs naruto storm 4 apk</h2><br /><p><b><b>Download</b> &#10022; <a href="https://urlin.us/2uSXJP">https://urlin.us/2uSXJP</a></b></p><br /><br />
5
- <h2>What is Bleach vs Naruto Storm 4 APK?</h2>
6
- <p>Bleach vs Naruto Storm 4 APK is a fan-made mod of the original <a href="(^1^)">Bleach vs Naruto</a> game, which is a 2D fighting game developed by the Chinese company 5Dplay. The mod features over 230 characters from both Bleach and Naruto, as well as a new interface, HUD, and soundtrack inspired by <a href="(^2^)">Naruto Shippuden: Ultimate Ninja Storm 4</a>, the latest installment of the official Naruto game series.</p>
7
- <h3>Features of the game</h3>
8
- <p>Some of the features that you can enjoy in Bleach vs Naruto Storm 4 APK are:</p>
9
- <ul>
10
- <li>A huge roster of characters from both anime, including main protagonists, antagonists, side characters, and even some guest characters from other series.</li>
11
- <li>A variety of modes to play, such as arcade, versus, team battle, survival, training, and online multiplayer.</li>
12
- <li>A dynamic and responsive combat system that allows you to perform combos, special moves, transformations, assists, and ultimate attacks.</li>
13
- <li>A high-quality graphics and sound that capture the essence and atmosphere of both anime.</li>
14
- <li>A regular update that adds new characters, stages, features, and bug fixes.</li>
15
- </ul>
16
- <h3>How to download and install the game</h3>
17
- <p>To download and install Bleach vs Naruto Storm 4 APK on your Android device, you need to follow these steps:</p>
18
- <ol>
19
- <li>Go to <a href="(^3^)">this link</a> and download the APK file. You can choose between the full version (2GB) or the lite version (800MB).</li>
20
- <li>Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
21
- <li>Locate the downloaded APK file on your device and tap on it to start the installation process.</li>
22
- <li>Follow the instructions on the screen and wait for the installation to finish.</li>
23
- <li>Launch the game from your app drawer and enjoy!</li>
24
- </ol>
25
- <h2>Why play Bleach vs Naruto Storm 4 APK?</h2>
26
- <p>Bleach vs Naruto Storm 4 APK is not just another fighting game. It is a tribute to two of the most iconic anime of all time. Here are some reasons why you should play it:</p>
27
- <p>bleach vs naruto storm 4 mod apk<br />
28
- bleach vs naruto ultimate ninja storm 4 climax apk<br />
29
- bleach vs naruto storm 4 android download<br />
30
- bleach vs naruto storm 4 apk free download<br />
31
- bleach vs naruto storm 4 apk offline<br />
32
- bleach vs naruto storm 4 apk obb<br />
33
- bleach vs naruto storm 4 apk mod menu<br />
34
- bleach vs naruto storm 4 apk unlimited money<br />
35
- bleach vs naruto storm 4 apk latest version<br />
36
- bleach vs naruto storm 4 apk full version<br />
37
- bleach vs naruto storm 4 apk english version<br />
38
- bleach vs naruto storm 4 apk for pc<br />
39
- bleach vs naruto storm 4 apk gameplay<br />
40
- bleach vs naruto storm 4 apk how to download<br />
41
- bleach vs naruto storm 4 apk highly compressed<br />
42
- bleach vs naruto storm 4 apk no verification<br />
43
- bleach vs naruto storm 4 apk online<br />
44
- bleach vs naruto storm 4 apk ppsspp<br />
45
- bleach vs naruto storm 4 apk revdl<br />
46
- bleach vs naruto storm 4 apk rexdl<br />
47
- bleach vs naruto storm 4 apk update<br />
48
- bleach vs naruto storm 4 apk uptodown<br />
49
- bleach vs naruto storm 4 apk youtube<br />
50
- bleach vs naruto shippuden ultimate ninja storm 4 apk<br />
51
- download game bleach vs naruto storm 4 apk<br />
52
- download game android bleach vs naruto storm 4 apk<br />
53
- download game offline bleach vs naruto storm 4 apk<br />
54
- download game online bleach vs naruto storm 4 apk<br />
55
- download game ppsspp bleach vs naruto storm 4 apk<br />
56
- download game mod bleach vs naruto storm 4 apk<br />
57
- download game terbaru bleach vs naruto storm 4 apk<br />
58
- download game gratis bleach vs naruto storm 4 apk<br />
59
- cara download game bleach vs naruto storm 4 apk<br />
60
- cara instal game bleach vs naruto storm 4 apk<br />
61
- cara main game bleach vs naruto storm 4 apk<br />
62
- cara update game bleach vs naruto storm 4 apk<br />
63
- review game bleach vs naruto storm 4 apk<br />
64
- tips and tricks game bleach vs naruto storm 4 apk<br />
65
- cheat codes game bleach vs naruto storm 4 apk<br />
66
- best characters game bleach vs naruto storm 4 apk<br />
67
- best team game bleach vs naruto storm 4 apk<br />
68
- best combo game bleach vs naruto storm 4 apk<br />
69
- best skills game bleach vs naruto storm 4 apk<br />
70
- best mode game bleach vs naruto storm 4 apk<br />
71
- best graphics game bleach vs naruto storm 4 apk<br />
72
- best sound game bleach vs naruto storm 4 apk<br />
73
- best controls game bleach vs naruto storm 4 apk<br />
74
- best settings game bleach vs naruto storm 4 apk</p>
75
- <h3>Enjoy the best of both anime worlds</h3>
76
- <p>If you love both Bleach and Naruto, you will surely appreciate how this game brings them together in a seamless and coherent way. You can see how the characters interact with each other, how their abilities match or clash, and how their stories intertwine. You can also relive some of the most memorable moments from both anime, such as Ichigo's fight with Aizen, Naruto's fight with Sasuke, or their team-ups against common enemies.</p>
77
- <h3>Experience intense and thrilling battles</h3>
78
- <p>The game's combat system is fast-paced, fluid, and satisfying. You can unleash your creativity and skill as you <p>combine different attacks, combos, and specials to defeat your opponents. You can also switch between characters, use assists, and activate ultimate attacks to turn the tide of the battle. The game's graphics and sound effects make the battles even more immersive and exciting.</p>
79
- <h3>Customize your favorite characters</h3>
80
- <p>The game allows you to customize your characters in various ways. You can change their costumes, accessories, hairstyles, and colors to suit your preferences. You can also equip them with different items that enhance their stats and abilities. You can even create your own original characters by mixing and matching different parts from existing ones.</p>
81
- <h2>Tips and tricks for playing Bleach vs Naruto Storm 4 APK</h2>
82
- <p>If you want to master the game and become a pro, here are some tips and tricks that you should know:</p>
83
- <h3>Learn the controls and combos</h3>
84
- <p>The game's controls are simple and intuitive, but you need to practice them to execute them smoothly. You can use the virtual buttons on the screen or connect a controller to your device for better accuracy. You can also adjust the button layout and size in the settings menu. The game has a tutorial mode that teaches you the basics of the game, such as movement, attack, defense, special, assist, and ultimate. You can also learn the combos of each character by checking their move list in the pause menu or by playing the training mode.</p>
85
- <h3>Choose the right character for your playstyle</h3>
86
- <p>The game has a diverse and balanced roster of characters, each with their own strengths and weaknesses. You should experiment with different characters and find the ones that suit your playstyle and preferences. Some characters are fast and agile, some are powerful and durable, some are ranged and versatile, and some are tricky and unpredictable. You should also consider the compatibility and synergy of your team members, as they can affect your performance and strategy.</p>
87
- <h3>Use the assist system wisely</h3>
88
- <p>The game has an assist system that allows you to call your teammates for help during the battle. You can use this system to extend your combos, break your opponent's defense, counter their attacks, or save yourself from danger. However, you should also be careful not to waste your assist gauge or leave yourself vulnerable to enemy attacks. You should also pay attention to the cooldown time of your assists, as they vary depending on the character.</p>
89
- <h2>Conclusion</h2>
90
- <p>Bleach vs Naruto Storm 4 APK is a fan-made crossover game that lets you enjoy the best of both anime worlds on your Android device. It has a huge roster of characters, a variety of modes, a dynamic combat system, a high-quality graphics and sound, and a regular update. It is a must-play game for any anime fan who loves fighting games. If you want to download and play Bleach vs Naruto Storm 4 APK, you can follow the steps above and start your epic adventure today!</p>
91
- <h2>FAQs</h2>
92
- <ul>
93
- <li><b>Q: Is Bleach vs Naruto Storm 4 APK safe to download?</b></li>
94
- <li>A: Yes, Bleach vs Naruto Storm 4 APK is safe to download as long as you use a trusted source like <a href="">this link</a>. However, you should always scan any file that you download with an antivirus software before installing it on your device.</li>
95
- <li><b>Q: Is Bleach vs Naruto Storm 4 APK free to play?</b></li>
96
- <li>A: Yes, Bleach vs Naruto Storm 4 APK is free to play and does not require any in-app purchases or subscriptions. However, you may encounter some ads while playing the game.</li>
97
- <li><b>Q: Can I play Bleach vs Naruto Storm 4 APK offline?</b></li>
98
- <li>A: Yes, you can play Bleach vs Naruto Storm 4 APK offline without any internet connection. However, you will need an internet connection to play the online multiplayer mode or to update the game.</li>
99
- <li><b>Q: Can I play Bleach vs Naruto Storm 4 APK on PC?</b></li>
100
- <li>A: Yes, you can play Bleach vs Naruto Storm 4 APK on PC by using an Android emulator like <a href="">Bluestacks</a> or <a href="">NoxPlayer</a>. However, you may experience some performance issues or compatibility problems depending on your PC specifications and settings.</li>
101
- <li><b>Q: How can I contact the developer of Bleach vs Naruto Storm 4 APK?</b></li>
102
- <li>A: You can contact the developer of Bleach vs Naruto Storm 4 APK by visiting their <a href="">Facebook page</a> or by sending them an email at <a href="mailto:bleachv [email protected]">[email protected]</a>.</li>
103
- </ul></p> 197e85843d<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Amrutham-001300-Episodes-Telugu-UPDATED.md DELETED
@@ -1,78 +0,0 @@
1
- ## Amrutham 001-300 episodes Telugu
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- **DOWNLOAD >>> [https://corppresinro.blogspot.com/?d=2txP1O](https://corppresinro.blogspot.com/?d=2txP1O)**
12
-
13
-
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- # Amrutham 001-300 Episodes Telugu
26
-
27
-
28
-
29
- Amrutham is a popular sitcom in Telugu that aired from 2001 to 2007. It was produced by Just Yellow Media and is the longest running and most popular sitcom in Telugu. The show revolves around the lives of Amrutham, an aspiring restaurateur, and his friend Anji, who often get into trouble with their landlord Appaji and other characters. The show is known for its witty humor and hilarious situations.
30
-
31
-
32
-
33
- If you are a fan of Amrutham or want to watch it for the first time, you can stream or download all the episodes online. Here are some options for you:
34
-
35
-
36
-
37
- - You can watch Amrutham Season 1 streaming on Zee5 for free with ads. The season has 14 episodes, including the last episode "Tata Bye Bye Veedukolu" which has an unimaginable twist[^1^].
38
-
39
- - You can find a list of all the episodes of Amrutham on LiquiSearch[^2^]. The list has the episode titles and brief summaries of each episode. You can also find links to download some of the episodes from Torrent.
40
-
41
- - You can listen to some of the episodes of Amrutham on SoundCloud[^3^]. The episodes are in audio format and have the episode number and title in the description. You can also download the episodes from SoundCloud.
42
-
43
-
44
-
45
- Amrutham is a classic comedy show that will make you laugh out loud. Enjoy watching or listening to the episodes and have fun!
46
-
47
-
48
-
49
- Here are some more details about Amrutham and its characters:
50
-
51
-
52
-
53
- - Amrutham, played by Sivaji Raja and later by Naresh, is the main protagonist of the show. He is a naive and optimistic person who dreams of running a successful restaurant called Amrutha Vilas. He often comes up with crazy ideas to improve his business or to solve his problems, but they usually backfire and land him in trouble.
54
-
55
- - Anji, played by Gundu Hanumantha Rao and later by Harshavardhan, is Amrutham's best friend and partner in the restaurant. He is a smart and practical person who tries to help Amrutham with his schemes, but often ends up suffering the consequences. He is also afraid of Appaji and his wife.
56
-
57
- - Appaji, played by Inturi Vasu and later by Sivannarayana Naripeddi, is the landlord of Amrutham and Anji. He is a greedy and cunning person who always tries to exploit them for money or free food. He also has a belt that has sadistic powers and can make anyone obey him.
58
-
59
- - Sundaram, played by Vasu Inturi and later by Sivannarayana Naripeddi, is Appaji's son who works as a waiter in Amrutha Vilas. He is a loyal and innocent person who respects Amrutham and Anji. He often gets involved in their plans and faces Appaji's wrath.
60
-
61
- - Sanjeevini, played by Jhansi, is Amrutham's wife who works as a nurse. She is a sensible and caring person who loves Amrutham despite his flaws. She often tries to advise him or save him from his troubles.
62
-
63
- - Umadevi, played by Avasarala Kanyakumari and later by Bhargavi, is Anji's wife who works as a teacher. She is a strict and dominating person who often scolds Anji for his failures. She also has a crush on Appaji.
64
-
65
-
66
-
67
- The show also features many other characters who add to the comedy and chaos of the show. Some of them are Rubber Balaji, a film director who makes low-budget movies; Parandhamayya, an astrologer who gives fake predictions; Sarvam, a cook who works in Amrutha Vilas; Shanta, Appaji's aunt who tortures him; and Yama Dharma Raja, the god of death who visits Amrutham and Anji.
68
-
69
-
70
-
71
- Amrutham is a show that has a cult following among Telugu audiences. It has won many awards and accolades for its humor and creativity. It has also been remade in Tamil as Veettukku Veedu Looty and in Kannada as Silli Lalli.
72
-
73
- dfd1c89656
74
-
75
-
76
-
77
-
78
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Crime Mysteries Find objects - A Challenging Hidden Object Mod APK.md DELETED
@@ -1,137 +0,0 @@
1
-
2
- <h1>Crime Mystery Find Objects Mod APK: A Fun and Challenging Puzzle Game</h1>
3
- <p>Do you love hidden object games? Do you enjoy solving mysteries and catching criminals? If you answered yes to both questions, then you might want to check out <strong>Crime Mystery Find Objects Mod APK</strong>, a fun and challenging puzzle game that will test your detective skills and keep you entertained for hours.</p>
4
- <h2>What is Crime Mystery Find Objects Mod APK?</h2>
5
- <h3>A hidden object game with a crime-solving twist</h3>
6
- <p>Crime Mystery Find Objects is a game developed by G5 Entertainment, a company that specializes in casual games for mobile devices. The game is part of the Crime Mysteries series, which features different locations and scenarios where you have to find hidden objects and clues, solve puzzles, and catch criminals.</p>
7
- <h2>crime mystery find objects mod apk</h2><br /><p><b><b>Download File</b> &rarr; <a href="https://jinyurl.com/2uNJ8I">https://jinyurl.com/2uNJ8I</a></b></p><br /><br />
8
- <p>The game has a captivating storyline that will immerse you in the world of crime investigation. You play as a detective who works for a secret organization called T.R.A.I.L., which stands for Tactical Reconnaissance And Investigation League. Your mission is to travel around the world, from Paris to Tokyo, and solve various cases involving murder, theft, kidnapping, and more.</p>
9
- <h3>A modded version of the original game with unlimited money and free purchases</h3>
10
- <p>Crime Mystery Find Objects Mod APK is a modified version of the original game that gives you some advantages over the regular version. The modded game has unlimited money and free purchases, which means you can buy anything you want in the game without spending real money. You can also unlock all the levels, locations, and items in the game without having to complete any tasks or achievements.</p>
11
- <p>The modded game is not available on the official app stores, such as Google Play or Apple Store. You have to download it from a third-party website that provides modded games. However, before you do that, you should be aware of some risks and precautions that come with installing modded games on your device.</p>
12
- <h2>How to download and install Crime Mystery Find Objects Mod APK?</h2>
13
- <h3>The steps to download and install the modded game</h3>
14
- <p>If you want to try Crime Mystery Find Objects Mod APK, here are the steps you need to follow:</p>
15
- <ol>
16
- <li>Find a reliable website that offers the modded game. You can use a search engine or ask for recommendations from other players who have tried the modded game.</li>
17
- <li>Download the modded game file from the website. Make sure it is compatible with your device and has the latest version of the game.</li>
18
- <li>Enable unknown sources on your device. This will allow you to install apps that are not from the official app stores. To do this, go to your device settings, then security, then unknown sources, and turn it on.</li>
19
- <li>Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on your screen until the installation is complete.</li>
20
- <li>Launch the modded game and enjoy playing with unlimited money and free purchases.</li>
21
- </ol>
22
- <h3>The precautions to take before installing the modded game</h3>
23
- <p>While installing modded games can be fun and exciting, it can also be risky and dangerous. Here are some precautions you should take before installing Crime Mystery Find Objects Mod APK:</p>
24
- <ul>
25
- <li> - Backup your data before installing the modded game. Modded games can sometimes cause errors or crashes on your device, which can result in data loss or corruption. To avoid this, you should backup your important data, such as photos, videos, contacts, messages, etc., before installing the modded game. You can use a cloud service or an external storage device to do this. - Scan the modded game file for viruses or malware. Modded games can sometimes contain harmful or malicious code that can infect your device or steal your personal information. To prevent this, you should scan the modded game file with a reputable antivirus or anti-malware software before installing it. You can also check the reviews and ratings of the website that provides the modded game to see if other users have reported any issues or problems with the modded game. - Do not update the modded game from the official app stores. Modded games are usually not compatible with the official updates from the original game developers. If you update the modded game from the official app stores, you might lose the modded features or cause the game to stop working. To avoid this, you should disable automatic updates for the modded game on your device settings. You can also check the website that provides the modded game for any new versions or updates of the modded game.</ul>
26
- <h2>What are the features of Crime Mystery Find Objects Mod APK?</h2>
27
- <h3>The gameplay and graphics of the modded game</h3>
28
- <p>Crime Mystery Find Objects Mod APK has the same gameplay and graphics as the original game, but with some added features and improvements. The gameplay consists of finding hidden objects and clues in different scenes, solving puzzles and mini-games, and catching criminals. The graphics are realistic and detailed, with various locations and scenarios that will make you feel like a real detective.</p>
29
- <p>crime mystery hidden objects mod apk download<br />
30
- crime mystery match 3 puzzle mod apk free<br />
31
- crime mystery detective game mod apk unlimited<br />
32
- crime mystery adventure game mod apk offline<br />
33
- crime mystery hidden clues mod apk latest<br />
34
- crime mystery investigation game mod apk android<br />
35
- crime mystery find evidence mod apk hack<br />
36
- crime mystery solve cases mod apk online<br />
37
- crime mystery hidden scenes mod apk premium<br />
38
- crime mystery escape game mod apk full<br />
39
- crime mystery point and click mod apk pro<br />
40
- crime mystery hidden pictures mod apk cracked<br />
41
- crime mystery story game mod apk unlocked<br />
42
- crime mystery hidden puzzles mod apk modded<br />
43
- crime mystery interactive game mod apk updated<br />
44
- crime mystery hidden items mod apk no ads<br />
45
- crime mystery logic game mod apk cheats<br />
46
- crime mystery hidden numbers mod apk version<br />
47
- crime mystery role playing game mod apk free shopping<br />
48
- crime mystery hidden letters mod apk mega<br />
49
- crime mystery text game mod apk money<br />
50
- crime mystery hidden shapes mod apk vip<br />
51
- crime mystery trivia game mod apk gems<br />
52
- crime mystery hidden symbols mod apk unlimited lives<br />
53
- crime mystery word game mod apk coins<br />
54
- crime mystery find differences mod apk unlimited hints<br />
55
- crime mystery board game mod apk stars<br />
56
- crime mystery find words mod apk unlimited moves<br />
57
- crime mystery card game mod apk energy<br />
58
- crime mystery find pairs mod apk unlimited boosters<br />
59
- crime mystery dice game mod apk keys<br />
60
- crime mystery find colors mod apk unlimited time<br />
61
- crime mystery strategy game mod apk gold<br />
62
- crime mystery find patterns mod apk unlimited levels<br />
63
- crime mystery simulation game mod apk diamonds<br />
64
- crime mystery find shapes mod apk all unlocked<br />
65
- crime mystery action game mod apk hearts<br />
66
- crime mystery find numbers mod apk no timer<br />
67
- crime mystery puzzle game mod apk lives<br />
68
- crime mystery find letters mod apk no root</p>
69
- <p>Some of the features and improvements of the modded game are:</p>
70
- <ul>
71
- <li>You can play offline without an internet connection.</li>
72
- <li>You can zoom in and out of the scenes to find hidden objects more easily.</li>
73
- <li>You can use hints and skip buttons to help you solve puzzles and mini-games.</li>
74
- <li>You can customize your avatar and choose from different outfits and accessories.</li>
75
- <li>You can collect trophies and achievements for completing tasks and challenges.</li>
76
- </ul>
77
- <h3>The benefits and drawbacks of the modded game</h3>
78
- <p>Crime Mystery Find Objects Mod APK has some benefits and drawbacks that you should consider before playing it. Here are some of them:</p>
79
- <table>
80
- <tr>
81
- <th>Benefits</th>
82
- <th>Drawbacks</th>
83
- </tr>
84
- <tr>
85
- <td>You can enjoy unlimited money and free purchases in the game.</td>
86
- <td>You might lose the thrill and challenge of playing the game.</td>
87
- </tr>
88
- <tr>
89
- <td>You can unlock all the levels, locations, and items in the game.</td>
90
- <td>You might miss out on some of the fun and excitement of discovering new things in the game.</td>
91
- </tr>
92
- <tr>
93
- <td>You can play without any ads or interruptions in the game.</td>
94
- <td>You might not support the original game developers and their work.</td>
95
- </tr>
96
- <tr>
97
- <td>You can have more options and features in the game.</td>
98
- <td>You might encounter some bugs or glitches in the game.</td>
99
- </tr>
100
- </table>
101
- <h2>How to play Crime Mystery Find Objects Mod APK?</h2>
102
- <h3>The tips and tricks to find hidden objects and solve puzzles</h3>
103
- <p>If you want to play Crime Mystery Find Objects Mod APK like a pro, here are some tips and tricks that will help you find hidden objects and solve puzzles faster and easier:</p>
104
- <ul>
105
- <li>Pay attention to the scene description and objectives. They will give you hints about what to look for and what to do in each scene.</li>
106
- <li>Use your finger to swipe across the screen to explore every corner of the scene. You might find some hidden objects or clues that are not obvious at first glance.</li>
107
- <li>Tap on an object to select it. If it is correct, it will be marked off your list. If it is wrong, it will flash red and you will lose some time.</li>
108
- <li>If you get stuck, you can use hints or skip buttons to help you out. Hints will highlight an object or clue for you. Skip buttons will let you skip a puzzle or mini-game. However, you have a limited number of hints and skip buttons per scene, so use them wisely.</li>
109
- <li>Try to find hidden objects and solve puzzles as fast as possible. The faster you complete a scene, the higher your score will be. You will also earn more stars, coins, and rewards for finishing a scene quickly.</li>
110
- </ul>
111
- <h3>The challenges and rewards of the modded game</h3>
112
- <p>Crime Mystery Find Objects Mod APK has some challenges and rewards that will make you want to play more and improve your skills. Here are some of them:</p>
113
- <ul>
114
- <li>You can challenge yourself with different difficulty levels, from easy to expert. The higher the difficulty level, the more hidden objects and puzzles you have to find and solve, and the less time and hints you have.</li>
115
- <li>You can compete with other players from around the world on the global leaderboard. You can see your rank and score, and compare them with other players. You can also chat with other players and share tips and strategies.</li>
116
- <li>You can earn stars, coins, and rewards for completing scenes and tasks. Stars are used to unlock new levels and locations. Coins are used to buy items and upgrades in the game. Rewards are special items or bonuses that you can use in the game, such as extra time, extra hints, or extra skip buttons.</li>
117
- </ul>
118
- <h2>Conclusion</h2>
119
- <h3>A summary of the main points of the article</h3>
120
- <p>Crime Mystery Find Objects Mod APK is a fun and challenging puzzle game that will test your detective skills and keep you entertained for hours. It is a hidden object game with a crime-solving twist, where you have to find hidden objects and clues, solve puzzles and mini-games, and catch criminals. It is a modded version of the original game that gives you unlimited money and free purchases, which means you can buy anything you want in the game without spending real money. You can also unlock all the levels, locations, and items in the game without having to complete any tasks or achievements.</p>
121
- <p>However, before you download and install the modded game, you should be aware of some risks and precautions that come with installing modded games on your device. You should backup your data before installing the modded game, scan the modded game file for viruses or malware, and do not update the modded game from the official app stores. You should also consider the benefits and drawbacks of playing the modded game, such as having more options and features, but losing the thrill and challenge of playing the game.</p>
122
- <h3>A call to action for the readers to try the modded game</h3>
123
- <p>If you are looking for a new and exciting puzzle game to play on your device, you should give Crime Mystery Find Objects Mod APK a try. It is a game that will challenge your mind and entertain your eyes with its realistic and detailed graphics, captivating storyline, and various locations and scenarios. You will also enjoy having unlimited money and free purchases in the game, which will let you customize your avatar, buy items and upgrades, and unlock everything in the game. You will also have fun competing with other players on the global leaderboard, earning stars, coins, and rewards, and improving your detective skills.</p>
124
- <p>So what are you waiting for? Download Crime Mystery Find Objects Mod APK today and start solving mysteries and catching criminals like a pro!</p>
125
- <h2>FAQs</h2>
126
- <h3>What is Crime Mystery Find Objects Mod APK?</h3>
127
- <p>Crime Mystery Find Objects Mod APK is a modified version of the original game that gives you unlimited money and free purchases in the game.</p>
128
- <h3>How to download and install Crime Mystery Find Objects Mod APK?</h3>
129
- <p>You have to download it from a third-party website that provides modded games. Then, you have to enable unknown sources on your device, locate the downloaded file on your device, tap on it to start the installation process, and launch the modded game.</p>
130
- <h3>What are the features of Crime Mystery Find Objects Mod APK?</h3>
131
- <p>The modded game has the same gameplay and graphics as the original game, but with some added features and improvements. You can play offline without an internet connection, zoom in and out of the scenes, use hints and skip buttons, customize your avatar, collect trophies and achievements, etc.</p>
132
- <h3>How to play Crime Mystery Find Objects Mod APK?</h3>
133
- <p>You have to find hidden objects and clues in different scenes, solve puzzles and mini-games, and catch criminals. You can also challenge yourself with different difficulty levels, compete with other players on the global leaderboard, and earn stars, coins, and rewards.</p>
134
- <h3>Is Crime Mystery Find Objects Mod APK safe to play?</h3>
135
- <p>Crime Mystery Find Objects Mod APK is not an official game, so it might not be safe to play. You should take some precautions before installing the modded game, such as backing up your data, scanning the modded game file for viruses or malware, and not updating the modded game from the official app stores.</p> 401be4b1e0<br />
136
- <br />
137
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dragon Trail Hunter World - A Brand-New Tribal World for You to Discover.md DELETED
@@ -1,148 +0,0 @@
1
- <br />
2
- <h1>Dragon Trail: Hunter World - A New Island Tribal Adventure Game</h1>
3
- <p>Are you looking for a new and exciting role-playing game that will take you to a fantastical island full of dragons, pets, and adventures? If so, you might want to check out Dragon Trail: Hunter World, a brand-new game developed by TTHmobi. In this game, you will play as a youth chosen by the dragon, who will explore the secret of Loya Book with your father's belief. You will also collect various costumes, cute pets and mounts, team up with other players, and compete for the island supremacy. In this article, we will tell you everything you need to know about Dragon Trail: Hunter World, and how you can download and play it on your PC for a better gaming experience.</p>
4
- <h2>What is Dragon Trail: Hunter World?</h2>
5
- <p>Dragon Trail: Hunter World is a role-playing game that combines elements of fantasy, adventure, and tribal culture. It is set in a mysterious island called Star Island, where dragons and humans coexist peacefully. However, an evil force is threatening to destroy this harmony, and it is up to you to stop it. You will embark on a journey to uncover the secret of Loya Book, a legendary artifact that contains the power of the dragon. Along the way, you will meet many interesting characters, collect various pets and dragons, fight against enemies, and explore the beautiful scenery of Star Island.</p>
6
- <h2>dragon trail indir</h2><br /><p><b><b>DOWNLOAD</b> &#10004; <a href="https://jinyurl.com/2uNQqN">https://jinyurl.com/2uNQqN</a></b></p><br /><br />
7
- <h3>The story and the gameplay</h3>
8
- <p>The story of Dragon Trail: Hunter World is divided into chapters, each with its own plot and quests. You will follow the main character, who is a descendant of the dragon tribe, as he or she tries to fulfill his or her father's wish. You will also encounter other characters who will join your team or become your rivals. The game has a rich dialogue system that allows you to interact with different characters and choose your responses. Your choices will affect the outcome of the story and the relationship with other characters.</p>
9
- <p>The gameplay of Dragon Trail: Hunter World is mainly based on exploration, combat, and collection. You will be able to roam freely around Star Island, discovering new places, secrets, and treasures. You will also encounter various enemies, such as wild animals, monsters, and evil dragons. You will have to fight them using your skills, weapons, and pets. The game has a real-time combat system that requires you to tap or swipe on the screen to perform different actions. You can also use special skills that have cooldowns or consume energy. The game also has a multiplayer mode that allows you to team up with other players or compete against them in different modes.</p>
10
- <h3>The features and the graphics</h3>
11
- <p>Dragon Trail: Hunter World has many features that make it stand out from other role-playing games. Some of these features are:</p>
12
- <ul>
13
- <li>A huge open world that you can explore at your own pace.</li>
14
- <li>A variety of costumes, pets, mounts, weapons, and accessories that you can collect and customize.</li>
15
- <li>A mentor-disciple system that allows you to train with your master or apprentice everyday by completing missions.</li>
16
- <li>A guild system that allows you to join or create a guild with other players and participate in guild wars.</li>
17
- <li>A ranking system that shows your progress and achievements in different aspects of the game.</li>
18
- <li>A daily login reward system that gives you free diamonds and other items every day.</li>
19
- </ul>
20
- <p>The graphics of Dragon Trail: Hunter World are also impressive. The game has a colorful and cartoon-like style that suits its fantasy theme. The game also has high-quality animations and sound effects that enhance the immersion of the game. The game also has well-made cutscenes that show the story in a cinematic way.</p>
21
- <h3>The classes and the pets</h3>
22
- <p>Dragon <p>Dragon Trail: Hunter World has four classes that you can choose from, each with its own strengths and weaknesses. The classes are:</p>
23
- <p>dragon trail hunter world indir<br />
24
- dragon trail hunter world pc indir<br />
25
- dragon trail hunter world apk indir<br />
26
- dragon trail hunter world google play indir<br />
27
- dragon trail hunter world bluestacks indir<br />
28
- dragon trail hunter world android indir<br />
29
- dragon trail hunter world ios indir<br />
30
- dragon trail hunter world oyunu indir<br />
31
- dragon trail hunter world ücretsiz indir<br />
32
- dragon trail hunter world hileli indir<br />
33
- dragon trail beta indir<br />
34
- dragon trail beta apk indir<br />
35
- dragon trail beta google play indir<br />
36
- dragon trail beta android indir<br />
37
- dragon trail beta ios indir<br />
38
- dragon trail beta oyunu indir<br />
39
- dragon trail beta ücretsiz indir<br />
40
- dragon trail beta hileli indir<br />
41
- dragon trail oyunu indir<br />
42
- dragon trail oyunu apk indir<br />
43
- dragon trail oyunu google play indir<br />
44
- dragon trail oyunu bluestacks indir<br />
45
- dragon trail oyunu android indir<br />
46
- dragon trail oyunu ios indir<br />
47
- dragon trail oyunu ücretsiz indir<br />
48
- dragon trail oyunu hileli indir<br />
49
- dragon trail rol yapma oyunu indir<br />
50
- dragon trail rol yapma oyunu apk indir<br />
51
- dragon trail rol yapma oyunu google play indir<br />
52
- dragon trail rol yapma oyunu bluestacks indir<br />
53
- dragon trail rol yapma oyunu android indir<br />
54
- dragon trail rol yapma oyunu ios indir<br />
55
- dragon trail rol yapma oyunu ücretsiz indir<br />
56
- dragon trail rol yapma oyunu hileli indir<br />
57
- dragon trail island adventure game indir<br />
58
- dragon trail island adventure game apk indir<br />
59
- dragon trail island adventure game google play indir<br />
60
- dragon trail island adventure game bluestacks indir<br />
61
- dragon trail island adventure game android indir<br />
62
- dragon trail island adventure game ios indir<br />
63
- dragon trail island adventure game ücretsiz indir<br />
64
- dragon trail island adventure game hileli indir<br />
65
- tthmobi dragon trail indir<br />
66
- tthmobi dragon trail apk indir<br />
67
- tthmobi dragon trail google play indir<br />
68
- tthmobi dragon trail bluestacks indir<br />
69
- tthmobi dragon trail android indir<br />
70
- tthmobi dragon trail ios indir</p>
71
- <ul>
72
- <li>Warrior: A melee fighter who specializes in physical attacks and defense. He or she can use swords, axes, and shields to deal damage and protect himself or herself.</li>
73
- <li>Mage: A ranged caster who specializes in magic attacks and support. He or she can use staffs, wands, and books to cast spells and buff allies.</li>
74
- <li>Ranger: A ranged shooter who specializes in speed and accuracy. He or she can use bows, crossbows, and guns to shoot arrows and bullets at enemies.</li>
75
- <li>Assassin: A melee rogue who specializes in stealth and critical hits. He or she can use daggers, claws, and whips to sneak up and stab enemies.</li>
76
- </ul>
77
- <p>Dragon Trail: Hunter World also has a pet system that allows you to collect and raise various pets that will accompany you in your adventure. Pets can help you in combat by attacking enemies, healing you, or providing buffs. Pets can also be upgraded and evolved to increase their power and appearance. Some of the pets that you can find in the game are:</p>
78
- <ul>
79
- <li>Dragon: A majestic creature that has the power of fire, ice, thunder, or wind. It can breathe fireballs, ice shards, lightning bolts, or wind blades at enemies.</li>
80
- <li>Fox: A cute animal that has the power of illusion, charm, or luck. It can create clones, charm enemies, or increase your drop rate.</li>
81
- <li>Bear: A strong animal that has the power of earth, rock, or metal. It can smash enemies with its claws, throw rocks at them, or create a metal shield.</li>
82
- <li>Bird: A swift animal that has the power of light, sound, or speed. It can blind enemies with its feathers, stun them with its chirp, or dash at them with its wings.</li>
83
- </ul>
84
- <h2>How to download and play Dragon Trail: Hunter World on PC?</h2>
85
- <p>If you are interested in playing Dragon Trail: Hunter World on your PC, you might be wondering how to do it. After all, the game is only available for mobile devices on Google Play and App Store. However, there is a way to play it on your PC using an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. One of the best Android emulators that you can use is BlueStacks, which is free, fast, and easy to use.</p>
86
- <h3>The benefits of playing on PC</h3>
87
- <p>Playing Dragon Trail: Hunter World on PC has many benefits that you might not get on your mobile device. Some of these benefits are:</p>
88
- <ul>
89
- <li>A bigger screen that gives you a better view of the game's graphics and details.</li>
90
- <li>A smoother performance that reduces lag and crashes.</li>
91
- <li>A more comfortable control scheme that lets you use your keyboard and mouse instead of your touch screen.</li>
92
- <li>A longer battery life that lets you play for hours without worrying about charging your device.</li>
93
- <li>A more secure environment that protects your account and data from hackers and viruses.</li>
94
- </ul>
95
- <h3>The steps to install BlueStacks and Dragon Trail: Hunter World</h3>
96
- <p>To play Dragon Trail: Hunter World on PC using BlueStacks, you need to follow these simple steps:</p>
97
- <ol>
98
- <li>Download and install BlueStacks from its official website <a href="">here</a>.</li>
99
- <li>Launch BlueStacks and sign in with your Google account.</li>
100
- <li>Search for Dragon Trail: Hunter World in the search bar or go to the Google Play Store icon on the home screen.</li>
101
- <li>Click on the game icon and install it.</li>
102
- <li>Once the installation is complete, click on the game icon again to launch it.</li>
103
- </ol>
104
- <h3>The tips and tricks to enjoy the game on PC</h3>
105
- <p>To make the most out of playing Dragon Trail: Hunter World on PC using BlueStacks, you might want to try these tips and tricks:</p>
106
- <ul>
107
- <li>Customize your keyboard and mouse settings to suit your preferences. You can do this by clicking on the keyboard icon on the bottom right corner of the screen. You can also use the default settings provided by BlueStacks.</li>
108
- <li>Enable high frame rate mode to improve the game's smoothness. You can do this by clicking on the gear icon on the top right corner of the screen and going to the engine tab. You can also adjust other settings such as resolution, graphics quality, and memory allocation.</li>
109
- <li>Use the multi-instance feature to play multiple accounts or games at the same time. You can do this by You can do this by clicking on the multi-instance icon on the bottom right corner of the screen and creating a new instance. You can also clone an existing instance or sync multiple instances to perform the same actions.</li>
110
- <li>Use the macro feature to automate repetitive tasks or create custom commands. You can do this by clicking on the macro icon on the right side of the screen and recording your actions. You can also edit, delete, or assign hotkeys to your macros.</li>
111
- <li>Use the screenshot and video capture feature to record your gameplay or share your achievements. You can do this by clicking on the camera icon on the right side of the screen and choosing your desired option. You can also access your media files from the BlueStacks folder on your PC.</li>
112
- </ul>
113
- <h2>Conclusion</h2>
114
- <p>Dragon Trail: Hunter World is a fun and immersive role-playing game that will take you to a magical island full of dragons, pets, and adventures. You will enjoy the game's story, gameplay, features, and graphics, as well as the benefits of playing it on PC using BlueStacks. If you are looking for a new and exciting game to play, you should definitely give Dragon Trail: Hunter World a try.</p>
115
- <h3>Why you should try Dragon Trail: Hunter World</h3>
116
- <p>Here are some of the reasons why you should try Dragon Trail: Hunter World:</p>
117
- <ul>
118
- <li>It is a free-to-play game that you can download and play anytime, anywhere.</li>
119
- <li>It is a unique game that combines fantasy, adventure, and tribal culture in a captivating way.</li>
120
- <li>It is a challenging game that tests your skills, strategy, and creativity in various aspects.</li>
121
- <li>It is a social game that lets you interact with other players and make new friends.</li>
122
- <li>It is a rewarding game that gives you plenty of incentives and rewards for playing.</li>
123
- </ul>
124
- <h3>Where to find more information and updates about the game</h3>
125
- <p>If you want to find more information and updates about Dragon Trail: Hunter World, you can visit these sources:</p>
126
- <ul>
127
- <li>The official website of the game <a href="">here</a>, where you can find the latest news, events, guides, and support.</li>
128
- <li>The official Facebook page of the game <a href="">here</a>, where you can follow the posts, comments, videos, and live streams.</li>
129
- <li>The official YouTube channel of the game <a href="">here</a>, where you can watch the trailers, gameplay, reviews, and tips.</li>
130
- <li>The official Discord server of the game <a href="">here</a>, where you can chat with other players, developers, and moderators.</li>
131
- <li>The official Reddit community of the game <a href="">here</a>, where you can join the discussions, questions, suggestions, and feedback.</li>
132
- </ul>
133
- <h3>FAQs</h3>
134
- <p>Here are some of the frequently asked questions about Dragon Trail: Hunter World:</p>
135
- <ol>
136
- <li>Q: How can I change my class in the game?</li>
137
- <li>A: You can change your class in the game by going to the class hall in Star City and talking to the class master. You can change your class once for free, but after that you will need to pay diamonds or coupons.</li>
138
- <li>Q: How can I get more pets in the game?</li>
139
- <li>A: You can get more pets in the game by completing quests, participating in events, opening chests, or buying them from the shop. You can also breed your pets to get new ones with different attributes.</li>
140
- <li>Q: How can I upgrade my pets in the game?</li>
141
- <li>A: You can upgrade your pets in the game by feeding them with pet food or pet essence. You can also evolve them by using pet stones or pet crystals. You can also awaken them by using pet souls or pet runes.</li>
142
- <li>Q: How can I join or create a guild in the game?</li>
143
- <li>A: You can join or create a guild in the game by going to the guild hall in Star City and talking to the guild manager. You will need to be at least level 20 to join or create a guild. You will also need to pay some gold or diamonds to create a guild.</li>
144
- <li>Q: How can I contact the customer service in the game?</li>
145
- <li>A: You can contact the customer service in the game by going to the settings menu and tapping on the customer service button. You can also send an email to <a href="mailto:[email protected]">[email protected]</a> or fill out an online form <a href="">here</a>.</li>
146
- </ol></p> 401be4b1e0<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_deraindrop/model/block.py DELETED
@@ -1,146 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- ##########################################################################
4
- def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
5
- layer = nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias, stride=stride)
6
- return layer
7
-
8
-
9
- def conv3x3(in_chn, out_chn, bias=True):
10
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
11
- return layer
12
-
13
-
14
- def conv_down(in_chn, out_chn, bias=False):
15
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
16
- return layer
17
-
18
- ##########################################################################
19
- ## Supervised Attention Module (RAM)
20
- class SAM(nn.Module):
21
- def __init__(self, n_feat, kernel_size, bias):
22
- super(SAM, self).__init__()
23
- self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
24
- self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
25
- self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
26
-
27
- def forward(self, x, x_img):
28
- x1 = self.conv1(x)
29
- img = self.conv2(x) + x_img
30
- x2 = torch.sigmoid(self.conv3(img))
31
- x1 = x1 * x2
32
- x1 = x1 + x
33
- return x1, img
34
-
35
- ##########################################################################
36
- ## Spatial Attention
37
- class SALayer(nn.Module):
38
- def __init__(self, kernel_size=7):
39
- super(SALayer, self).__init__()
40
- self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
41
- self.sigmoid = nn.Sigmoid()
42
-
43
- def forward(self, x):
44
- avg_out = torch.mean(x, dim=1, keepdim=True)
45
- max_out, _ = torch.max(x, dim=1, keepdim=True)
46
- y = torch.cat([avg_out, max_out], dim=1)
47
- y = self.conv1(y)
48
- y = self.sigmoid(y)
49
- return x * y
50
-
51
- # Spatial Attention Block (SAB)
52
- class SAB(nn.Module):
53
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
54
- super(SAB, self).__init__()
55
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
56
- self.body = nn.Sequential(*modules_body)
57
- self.SA = SALayer(kernel_size=7)
58
-
59
- def forward(self, x):
60
- res = self.body(x)
61
- res = self.SA(res)
62
- res += x
63
- return res
64
-
65
- ##########################################################################
66
- ## Pixel Attention
67
- class PALayer(nn.Module):
68
- def __init__(self, channel, reduction=16, bias=False):
69
- super(PALayer, self).__init__()
70
- self.pa = nn.Sequential(
71
- nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
72
- nn.ReLU(inplace=True),
73
- nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias), # channel <-> 1
74
- nn.Sigmoid()
75
- )
76
-
77
- def forward(self, x):
78
- y = self.pa(x)
79
- return x * y
80
-
81
- ## Pixel Attention Block (PAB)
82
- class PAB(nn.Module):
83
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
84
- super(PAB, self).__init__()
85
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
86
- self.PA = PALayer(n_feat, reduction, bias=bias)
87
- self.body = nn.Sequential(*modules_body)
88
-
89
- def forward(self, x):
90
- res = self.body(x)
91
- res = self.PA(res)
92
- res += x
93
- return res
94
-
95
- ##########################################################################
96
- ## Channel Attention Layer
97
- class CALayer(nn.Module):
98
- def __init__(self, channel, reduction=16, bias=False):
99
- super(CALayer, self).__init__()
100
- # global average pooling: feature --> point
101
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
102
- # feature channel downscale and upscale --> channel weight
103
- self.conv_du = nn.Sequential(
104
- nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
105
- nn.ReLU(inplace=True),
106
- nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
107
- nn.Sigmoid()
108
- )
109
-
110
- def forward(self, x):
111
- y = self.avg_pool(x)
112
- y = self.conv_du(y)
113
- return x * y
114
-
115
- ## Channel Attention Block (CAB)
116
- class CAB(nn.Module):
117
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
118
- super(CAB, self).__init__()
119
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
120
-
121
- self.CA = CALayer(n_feat, reduction, bias=bias)
122
- self.body = nn.Sequential(*modules_body)
123
-
124
- def forward(self, x):
125
- res = self.body(x)
126
- res = self.CA(res)
127
- res += x
128
- return res
129
-
130
-
131
- if __name__ == "__main__":
132
- import time
133
- from thop import profile
134
- # layer = CAB(64, 3, 4, False, nn.PReLU())
135
- layer = PAB(64, 3, 4, False, nn.PReLU())
136
- # layer = SAB(64, 3, 4, False, nn.PReLU())
137
- for idx, m in enumerate(layer.modules()):
138
- print(idx, "-", m)
139
- s = time.time()
140
-
141
- rgb = torch.ones(1, 64, 256, 256, dtype=torch.float, requires_grad=False)
142
- out = layer(rgb)
143
- flops, params = profile(layer, inputs=(rgb,))
144
- print('parameters:', params)
145
- print('flops', flops)
146
- print('time: {:.4f}ms'.format((time.time()-s)*10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/postcss.config.js DELETED
@@ -1,6 +0,0 @@
1
- module.exports = {
2
- plugins: {
3
- tailwindcss: {},
4
- autoprefixer: {},
5
- },
6
- }
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/train/utils.py DELETED
@@ -1,486 +0,0 @@
1
- import os, traceback
2
- import glob
3
- import sys
4
- import argparse
5
- import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
- logger = logging
16
-
17
-
18
- def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
19
- assert os.path.isfile(checkpoint_path)
20
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
21
-
22
- ##################
23
- def go(model, bkey):
24
- saved_state_dict = checkpoint_dict[bkey]
25
- if hasattr(model, "module"):
26
- state_dict = model.module.state_dict()
27
- else:
28
- state_dict = model.state_dict()
29
- new_state_dict = {}
30
- for k, v in state_dict.items(): # 模型需要的shape
31
- try:
32
- new_state_dict[k] = saved_state_dict[k]
33
- if saved_state_dict[k].shape != state_dict[k].shape:
34
- print(
35
- "shape-%s-mismatch|need-%s|get-%s"
36
- % (k, state_dict[k].shape, saved_state_dict[k].shape)
37
- ) #
38
- raise KeyError
39
- except:
40
- # logger.info(traceback.format_exc())
41
- logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
42
- new_state_dict[k] = v # 模型自带的随机值
43
- if hasattr(model, "module"):
44
- model.module.load_state_dict(new_state_dict, strict=False)
45
- else:
46
- model.load_state_dict(new_state_dict, strict=False)
47
-
48
- go(combd, "combd")
49
- go(sbd, "sbd")
50
- #############
51
- logger.info("Loaded model weights")
52
-
53
- iteration = checkpoint_dict["iteration"]
54
- learning_rate = checkpoint_dict["learning_rate"]
55
- if (
56
- optimizer is not None and load_opt == 1
57
- ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
58
- # try:
59
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
60
- # except:
61
- # traceback.print_exc()
62
- logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
63
- return model, optimizer, learning_rate, iteration
64
-
65
-
66
- # def load_checkpoint(checkpoint_path, model, optimizer=None):
67
- # assert os.path.isfile(checkpoint_path)
68
- # checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
69
- # iteration = checkpoint_dict['iteration']
70
- # learning_rate = checkpoint_dict['learning_rate']
71
- # if optimizer is not None:
72
- # optimizer.load_state_dict(checkpoint_dict['optimizer'])
73
- # # print(1111)
74
- # saved_state_dict = checkpoint_dict['model']
75
- # # print(1111)
76
- #
77
- # if hasattr(model, 'module'):
78
- # state_dict = model.module.state_dict()
79
- # else:
80
- # state_dict = model.state_dict()
81
- # new_state_dict= {}
82
- # for k, v in state_dict.items():
83
- # try:
84
- # new_state_dict[k] = saved_state_dict[k]
85
- # except:
86
- # logger.info("%s is not in the checkpoint" % k)
87
- # new_state_dict[k] = v
88
- # if hasattr(model, 'module'):
89
- # model.module.load_state_dict(new_state_dict)
90
- # else:
91
- # model.load_state_dict(new_state_dict)
92
- # logger.info("Loaded checkpoint '{}' (epoch {})" .format(
93
- # checkpoint_path, iteration))
94
- # return model, optimizer, learning_rate, iteration
95
- def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
96
- assert os.path.isfile(checkpoint_path)
97
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
98
-
99
- saved_state_dict = checkpoint_dict["model"]
100
- if hasattr(model, "module"):
101
- state_dict = model.module.state_dict()
102
- else:
103
- state_dict = model.state_dict()
104
- new_state_dict = {}
105
- for k, v in state_dict.items(): # 模型需要的shape
106
- try:
107
- new_state_dict[k] = saved_state_dict[k]
108
- if saved_state_dict[k].shape != state_dict[k].shape:
109
- print(
110
- "shape-%s-mismatch|need-%s|get-%s"
111
- % (k, state_dict[k].shape, saved_state_dict[k].shape)
112
- ) #
113
- raise KeyError
114
- except:
115
- # logger.info(traceback.format_exc())
116
- logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
117
- new_state_dict[k] = v # 模型自带的随机值
118
- if hasattr(model, "module"):
119
- model.module.load_state_dict(new_state_dict, strict=False)
120
- else:
121
- model.load_state_dict(new_state_dict, strict=False)
122
- logger.info("Loaded model weights")
123
-
124
- iteration = checkpoint_dict["iteration"]
125
- learning_rate = checkpoint_dict["learning_rate"]
126
- if (
127
- optimizer is not None and load_opt == 1
128
- ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
129
- # try:
130
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
131
- # except:
132
- # traceback.print_exc()
133
- logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
134
- return model, optimizer, learning_rate, iteration
135
-
136
-
137
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
138
- logger.info(
139
- "Saving model and optimizer state at epoch {} to {}".format(
140
- iteration, checkpoint_path
141
- )
142
- )
143
- if hasattr(model, "module"):
144
- state_dict = model.module.state_dict()
145
- else:
146
- state_dict = model.state_dict()
147
- torch.save(
148
- {
149
- "model": state_dict,
150
- "iteration": iteration,
151
- "optimizer": optimizer.state_dict(),
152
- "learning_rate": learning_rate,
153
- },
154
- checkpoint_path,
155
- )
156
-
157
-
158
- def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path):
159
- logger.info(
160
- "Saving model and optimizer state at epoch {} to {}".format(
161
- iteration, checkpoint_path
162
- )
163
- )
164
- if hasattr(combd, "module"):
165
- state_dict_combd = combd.module.state_dict()
166
- else:
167
- state_dict_combd = combd.state_dict()
168
- if hasattr(sbd, "module"):
169
- state_dict_sbd = sbd.module.state_dict()
170
- else:
171
- state_dict_sbd = sbd.state_dict()
172
- torch.save(
173
- {
174
- "combd": state_dict_combd,
175
- "sbd": state_dict_sbd,
176
- "iteration": iteration,
177
- "optimizer": optimizer.state_dict(),
178
- "learning_rate": learning_rate,
179
- },
180
- checkpoint_path,
181
- )
182
-
183
-
184
- def summarize(
185
- writer,
186
- global_step,
187
- scalars={},
188
- histograms={},
189
- images={},
190
- audios={},
191
- audio_sampling_rate=22050,
192
- ):
193
- for k, v in scalars.items():
194
- writer.add_scalar(k, v, global_step)
195
- for k, v in histograms.items():
196
- writer.add_histogram(k, v, global_step)
197
- for k, v in images.items():
198
- writer.add_image(k, v, global_step, dataformats="HWC")
199
- for k, v in audios.items():
200
- writer.add_audio(k, v, global_step, audio_sampling_rate)
201
-
202
-
203
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
204
- f_list = glob.glob(os.path.join(dir_path, regex))
205
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
206
- x = f_list[-1]
207
- print(x)
208
- return x
209
-
210
-
211
- def plot_spectrogram_to_numpy(spectrogram):
212
- global MATPLOTLIB_FLAG
213
- if not MATPLOTLIB_FLAG:
214
- import matplotlib
215
-
216
- matplotlib.use("Agg")
217
- MATPLOTLIB_FLAG = True
218
- mpl_logger = logging.getLogger("matplotlib")
219
- mpl_logger.setLevel(logging.WARNING)
220
- import matplotlib.pylab as plt
221
- import numpy as np
222
-
223
- fig, ax = plt.subplots(figsize=(10, 2))
224
- im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
225
- plt.colorbar(im, ax=ax)
226
- plt.xlabel("Frames")
227
- plt.ylabel("Channels")
228
- plt.tight_layout()
229
-
230
- fig.canvas.draw()
231
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
232
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
233
- plt.close()
234
- return data
235
-
236
-
237
- def plot_alignment_to_numpy(alignment, info=None):
238
- global MATPLOTLIB_FLAG
239
- if not MATPLOTLIB_FLAG:
240
- import matplotlib
241
-
242
- matplotlib.use("Agg")
243
- MATPLOTLIB_FLAG = True
244
- mpl_logger = logging.getLogger("matplotlib")
245
- mpl_logger.setLevel(logging.WARNING)
246
- import matplotlib.pylab as plt
247
- import numpy as np
248
-
249
- fig, ax = plt.subplots(figsize=(6, 4))
250
- im = ax.imshow(
251
- alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
252
- )
253
- fig.colorbar(im, ax=ax)
254
- xlabel = "Decoder timestep"
255
- if info is not None:
256
- xlabel += "\n\n" + info
257
- plt.xlabel(xlabel)
258
- plt.ylabel("Encoder timestep")
259
- plt.tight_layout()
260
-
261
- fig.canvas.draw()
262
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
263
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
264
- plt.close()
265
- return data
266
-
267
-
268
- def load_wav_to_torch(full_path):
269
- sampling_rate, data = read(full_path)
270
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
271
-
272
-
273
- def load_filepaths_and_text(filename, split="|"):
274
- with open(filename, encoding="utf-8") as f:
275
- filepaths_and_text = [line.strip().split(split) for line in f]
276
- return filepaths_and_text
277
-
278
-
279
- def get_hparams(init=True):
280
- """
281
- todo:
282
- 结尾七人组:
283
- 保存频率、总epoch done
284
- bs done
285
- pretrainG、pretrainD done
286
- 卡号:os.en["CUDA_VISIBLE_DEVICES"] done
287
- if_latest done
288
- 模型:if_f0 done
289
- 采样率:自动选择config done
290
- 是否缓存数据集进GPU:if_cache_data_in_gpu done
291
-
292
- -m:
293
- 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done
294
- -c不要了
295
- """
296
- parser = argparse.ArgumentParser()
297
- # parser.add_argument('-c', '--config', type=str, default="configs/40k.json",help='JSON file for configuration')
298
- parser.add_argument(
299
- "-se",
300
- "--save_every_epoch",
301
- type=int,
302
- required=True,
303
- help="checkpoint save frequency (epoch)",
304
- )
305
- parser.add_argument(
306
- "-te", "--total_epoch", type=int, required=True, help="total_epoch"
307
- )
308
- parser.add_argument(
309
- "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path"
310
- )
311
- parser.add_argument(
312
- "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path"
313
- )
314
- parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -")
315
- parser.add_argument(
316
- "-bs", "--batch_size", type=int, required=True, help="batch size"
317
- )
318
- parser.add_argument(
319
- "-e", "--experiment_dir", type=str, required=True, help="experiment dir"
320
- ) # -m
321
- parser.add_argument(
322
- "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k"
323
- )
324
- parser.add_argument(
325
- "-sw",
326
- "--save_every_weights",
327
- type=str,
328
- default="0",
329
- help="save the extracted model in weights directory when saving checkpoints",
330
- )
331
- parser.add_argument(
332
- "-v", "--version", type=str, required=True, help="model version"
333
- )
334
- parser.add_argument(
335
- "-f0",
336
- "--if_f0",
337
- type=int,
338
- required=True,
339
- help="use f0 as one of the inputs of the model, 1 or 0",
340
- )
341
- parser.add_argument(
342
- "-l",
343
- "--if_latest",
344
- type=int,
345
- required=True,
346
- help="if only save the latest G/D pth file, 1 or 0",
347
- )
348
- parser.add_argument(
349
- "-c",
350
- "--if_cache_data_in_gpu",
351
- type=int,
352
- required=True,
353
- help="if caching the dataset in GPU memory, 1 or 0",
354
- )
355
-
356
- args = parser.parse_args()
357
- name = args.experiment_dir
358
- experiment_dir = os.path.join("./logs", args.experiment_dir)
359
-
360
- if not os.path.exists(experiment_dir):
361
- os.makedirs(experiment_dir)
362
-
363
- if args.version == "v1" or args.sample_rate == "40k":
364
- config_path = "configs/%s.json" % args.sample_rate
365
- else:
366
- config_path = "configs/%s_v2.json" % args.sample_rate
367
- config_save_path = os.path.join(experiment_dir, "config.json")
368
- if init:
369
- with open(config_path, "r") as f:
370
- data = f.read()
371
- with open(config_save_path, "w") as f:
372
- f.write(data)
373
- else:
374
- with open(config_save_path, "r") as f:
375
- data = f.read()
376
- config = json.loads(data)
377
-
378
- hparams = HParams(**config)
379
- hparams.model_dir = hparams.experiment_dir = experiment_dir
380
- hparams.save_every_epoch = args.save_every_epoch
381
- hparams.name = name
382
- hparams.total_epoch = args.total_epoch
383
- hparams.pretrainG = args.pretrainG
384
- hparams.pretrainD = args.pretrainD
385
- hparams.version = args.version
386
- hparams.gpus = args.gpus
387
- hparams.train.batch_size = args.batch_size
388
- hparams.sample_rate = args.sample_rate
389
- hparams.if_f0 = args.if_f0
390
- hparams.if_latest = args.if_latest
391
- hparams.save_every_weights = args.save_every_weights
392
- hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu
393
- hparams.data.training_files = "%s/filelist.txt" % experiment_dir
394
- return hparams
395
-
396
-
397
- def get_hparams_from_dir(model_dir):
398
- config_save_path = os.path.join(model_dir, "config.json")
399
- with open(config_save_path, "r") as f:
400
- data = f.read()
401
- config = json.loads(data)
402
-
403
- hparams = HParams(**config)
404
- hparams.model_dir = model_dir
405
- return hparams
406
-
407
-
408
- def get_hparams_from_file(config_path):
409
- with open(config_path, "r") as f:
410
- data = f.read()
411
- config = json.loads(data)
412
-
413
- hparams = HParams(**config)
414
- return hparams
415
-
416
-
417
- def check_git_hash(model_dir):
418
- source_dir = os.path.dirname(os.path.realpath(__file__))
419
- if not os.path.exists(os.path.join(source_dir, ".git")):
420
- logger.warn(
421
- "{} is not a git repository, therefore hash value comparison will be ignored.".format(
422
- source_dir
423
- )
424
- )
425
- return
426
-
427
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
428
-
429
- path = os.path.join(model_dir, "githash")
430
- if os.path.exists(path):
431
- saved_hash = open(path).read()
432
- if saved_hash != cur_hash:
433
- logger.warn(
434
- "git hash values are different. {}(saved) != {}(current)".format(
435
- saved_hash[:8], cur_hash[:8]
436
- )
437
- )
438
- else:
439
- open(path, "w").write(cur_hash)
440
-
441
-
442
- def get_logger(model_dir, filename="train.log"):
443
- global logger
444
- logger = logging.getLogger(os.path.basename(model_dir))
445
- logger.setLevel(logging.DEBUG)
446
-
447
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
448
- if not os.path.exists(model_dir):
449
- os.makedirs(model_dir)
450
- h = logging.FileHandler(os.path.join(model_dir, filename))
451
- h.setLevel(logging.DEBUG)
452
- h.setFormatter(formatter)
453
- logger.addHandler(h)
454
- return logger
455
-
456
-
457
- class HParams:
458
- def __init__(self, **kwargs):
459
- for k, v in kwargs.items():
460
- if type(v) == dict:
461
- v = HParams(**v)
462
- self[k] = v
463
-
464
- def keys(self):
465
- return self.__dict__.keys()
466
-
467
- def items(self):
468
- return self.__dict__.items()
469
-
470
- def values(self):
471
- return self.__dict__.values()
472
-
473
- def __len__(self):
474
- return len(self.__dict__)
475
-
476
- def __getitem__(self, key):
477
- return getattr(self, key)
478
-
479
- def __setitem__(self, key, value):
480
- return setattr(self, key, value)
481
-
482
- def __contains__(self, key):
483
- return key in self.__dict__
484
-
485
- def __repr__(self):
486
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AICODER009/Food101_Detection/app.py DELETED
@@ -1,81 +0,0 @@
1
- ### 1. Imports and class names setup ###
2
- import gradio as gr
3
- import os
4
- import torch
5
-
6
- from model import create_effnetb2_model
7
- from timeit import default_timer as timer
8
- from typing import Tuple, Dict
9
-
10
- # Setup class names
11
- with open("class_names.txt", "r") as f: # reading them in from class_names.txt
12
- class_names = [food_name.strip() for food_name in f.readlines()]
13
-
14
- ### 2. Model and transforms preparation ###
15
-
16
- # Create model
17
- effnetb2, effnetb2_transforms = create_effnetb2_model(
18
- num_classes=101, # could also use len(class_names)
19
- )
20
-
21
- # Load saved weights
22
- effnetb2.load_state_dict(
23
- torch.load(
24
- f="09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth",
25
- map_location=torch.device("cpu"), # load to CPU
26
- )
27
- )
28
-
29
- ### 3. Predict function ###
30
-
31
- # Create predict function
32
- def predict(img) -> Tuple[Dict, float]:
33
- """Transforms and performs a prediction on img and returns prediction and time taken.
34
- """
35
- # Start the timer
36
- start_time = timer()
37
-
38
- # Transform the target image and add a batch dimension
39
- img = effnetb2_transforms(img).unsqueeze(0)
40
-
41
- # Put model into evaluation mode and turn on inference mode
42
- effnetb2.eval()
43
- with torch.inference_mode():
44
- # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
45
- pred_probs = torch.softmax(effnetb2(img), dim=1)
46
-
47
- # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
48
- pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
49
-
50
- # Calculate the prediction time
51
- pred_time = round(timer() - start_time, 5)
52
-
53
- # Return the prediction dictionary and prediction time
54
- return pred_labels_and_probs, pred_time
55
-
56
- ### 4. Gradio app ###
57
-
58
- # Create title, description and article strings
59
- title = "FoodVision Big 🍔👁"
60
- description = "An EfficientNetB2 feature extractor computer vision model to classify images of food into [101 different classes](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/food101_class_names.txt)."
61
- article = "Created by Subhan Aliyev."
62
-
63
- # Create examples list from "examples/" directory
64
- example_list = [["examples/" + example] for example in os.listdir("examples")]
65
-
66
- # Create Gradio interface
67
- demo = gr.Interface(
68
- fn=predict,
69
- inputs=gr.Image(type="pil"),
70
- outputs=[
71
- gr.Label(num_top_classes=5, label="Predictions"),
72
- gr.Number(label="Prediction time (s)"),
73
- ],
74
- examples=example_list,
75
- title=title,
76
- description=description,
77
- article=article,
78
- )
79
-
80
- # Launch the app!
81
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio.py DELETED
@@ -1,1262 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
- import os
9
- import torch
10
- import torch.nn as nn
11
- import numpy as np
12
- import pytorch_lightning as pl
13
- from torch.optim.lr_scheduler import LambdaLR
14
- from einops import rearrange, repeat
15
- from contextlib import contextmanager
16
- from functools import partial
17
- from tqdm import tqdm
18
- from torchvision.utils import make_grid
19
- from pytorch_lightning.utilities.distributed import rank_zero_only
20
-
21
- from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
22
- from ldm.modules.ema import LitEma
23
- from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
24
- from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
25
- from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
26
- from ldm.models.diffusion.ddim import DDIMSampler
27
- from ldm.models.diffusion.ddpm import DDPM, disabled_train
28
- from omegaconf import ListConfig
29
-
30
- __conditioning_keys__ = {'concat': 'c_concat',
31
- 'crossattn': 'c_crossattn',
32
- 'adm': 'y'}
33
-
34
-
35
- class LatentDiffusion_audio(DDPM):
36
- """main class"""
37
- def __init__(self,
38
- first_stage_config,
39
- cond_stage_config,
40
- num_timesteps_cond=None,
41
- mel_dim=80,
42
- mel_length=848,
43
- cond_stage_key="image",
44
- cond_stage_trainable=False,
45
- concat_mode=True,
46
- cond_stage_forward=None,
47
- conditioning_key=None,
48
- scale_factor=1.0,
49
- scale_by_std=False,
50
- *args, **kwargs):
51
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
52
- self.scale_by_std = scale_by_std
53
- assert self.num_timesteps_cond <= kwargs['timesteps']
54
- # for backwards compatibility after implementation of DiffusionWrapper
55
- if conditioning_key is None:
56
- conditioning_key = 'concat' if concat_mode else 'crossattn'
57
- if cond_stage_config == '__is_unconditional__':
58
- conditioning_key = None
59
- ckpt_path = kwargs.pop("ckpt_path", None)
60
- ignore_keys = kwargs.pop("ignore_keys", [])
61
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
62
- self.concat_mode = concat_mode
63
- self.mel_dim = mel_dim
64
- self.mel_length = mel_length
65
- self.cond_stage_trainable = cond_stage_trainable
66
- self.cond_stage_key = cond_stage_key
67
- try:
68
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
69
- except:
70
- self.num_downs = 0
71
- if not scale_by_std:
72
- self.scale_factor = scale_factor
73
- else:
74
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
75
- self.instantiate_first_stage(first_stage_config)
76
- self.instantiate_cond_stage(cond_stage_config)
77
- self.cond_stage_forward = cond_stage_forward
78
- self.clip_denoised = False
79
- self.bbox_tokenizer = None
80
-
81
- self.restarted_from_ckpt = False
82
- if ckpt_path is not None:
83
- self.init_from_ckpt(ckpt_path, ignore_keys)
84
- self.restarted_from_ckpt = True
85
-
86
- def make_cond_schedule(self, ):
87
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
88
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
89
- self.cond_ids[:self.num_timesteps_cond] = ids
90
-
91
- @rank_zero_only
92
- @torch.no_grad()
93
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
94
- # only for very first batch
95
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
96
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
97
- # set rescale weight to 1./std of encodings
98
- print("### USING STD-RESCALING ###")
99
- x = super().get_input(batch, self.first_stage_key)
100
- x = x.to(self.device)
101
- encoder_posterior = self.encode_first_stage(x)
102
- z = self.get_first_stage_encoding(encoder_posterior).detach()
103
- del self.scale_factor
104
- self.register_buffer('scale_factor', 1. / z.flatten().std())
105
- print(f"setting self.scale_factor to {self.scale_factor}")
106
- print("### USING STD-RESCALING ###")
107
-
108
- def register_schedule(self,
109
- given_betas=None, beta_schedule="linear", timesteps=1000,
110
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
111
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
112
-
113
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
114
- if self.shorten_cond_schedule:
115
- self.make_cond_schedule()
116
-
117
- def instantiate_first_stage(self, config):
118
- model = instantiate_from_config(config)
119
- self.first_stage_model = model.eval()
120
- self.first_stage_model.train = disabled_train
121
- for param in self.first_stage_model.parameters():
122
- param.requires_grad = False
123
-
124
- def instantiate_cond_stage(self, config):
125
- if not self.cond_stage_trainable:
126
- if config == "__is_first_stage__":
127
- print("Using first stage also as cond stage.")
128
- self.cond_stage_model = self.first_stage_model
129
- elif config == "__is_unconditional__":
130
- print(f"Training {self.__class__.__name__} as an unconditional model.")
131
- self.cond_stage_model = None
132
- # self.be_unconditional = True
133
- else:
134
- model = instantiate_from_config(config)
135
- self.cond_stage_model = model.eval()
136
- self.cond_stage_model.train = disabled_train
137
- for param in self.cond_stage_model.parameters():
138
- param.requires_grad = False
139
- else:
140
- assert config != '__is_first_stage__'
141
- assert config != '__is_unconditional__'
142
- model = instantiate_from_config(config)
143
- self.cond_stage_model = model
144
-
145
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
146
- denoise_row = []
147
- for zd in tqdm(samples, desc=desc):
148
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
149
- force_not_quantize=force_no_decoder_quantization))
150
- n_imgs_per_row = len(denoise_row)
151
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
152
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
153
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
154
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
155
- return denoise_grid
156
-
157
- def get_first_stage_encoding(self, encoder_posterior):
158
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
159
- z = encoder_posterior.sample()
160
- elif isinstance(encoder_posterior, torch.Tensor):
161
- z = encoder_posterior
162
- else:
163
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
164
- return self.scale_factor * z
165
-
166
- def get_learned_conditioning(self, c):
167
- if self.cond_stage_forward is None:
168
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
169
- c = self.cond_stage_model.encode(c)
170
- if isinstance(c, DiagonalGaussianDistribution):
171
- c = c.mode()
172
- else:
173
- c = self.cond_stage_model(c)
174
- else:
175
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
176
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
177
- return c
178
-
179
-
180
- @torch.no_grad()
181
- def get_unconditional_conditioning(self, batch_size, null_label=None):
182
- if null_label is not None:
183
- xc = null_label
184
- if isinstance(xc, ListConfig):
185
- xc = list(xc)
186
- if isinstance(xc, dict) or isinstance(xc, list):
187
- c = self.get_learned_conditioning(xc)
188
- else:
189
- if hasattr(xc, "to"):
190
- xc = xc.to(self.device)
191
- c = self.get_learned_conditioning(xc)
192
- else:
193
- if self.cond_stage_key in ["class_label", "cls"]:
194
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
195
- return self.get_learned_conditioning(xc)
196
- else:
197
- raise NotImplementedError("todo")
198
- if isinstance(c, list): # in case the encoder gives us a list
199
- for i in range(len(c)):
200
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
201
- else:
202
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
203
- return c
204
-
205
- def meshgrid(self, h, w):
206
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
207
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
208
-
209
- arr = torch.cat([y, x], dim=-1)
210
- return arr
211
-
212
- def delta_border(self, h, w):
213
- """
214
- :param h: height
215
- :param w: width
216
- :return: normalized distance to image border,
217
- wtith min distance = 0 at border and max dist = 0.5 at image center
218
- """
219
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
220
- arr = self.meshgrid(h, w) / lower_right_corner
221
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
222
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
223
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
224
- return edge_dist
225
-
226
- def get_weighting(self, h, w, Ly, Lx, device):
227
- weighting = self.delta_border(h, w)
228
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
229
- self.split_input_params["clip_max_weight"], )
230
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
231
-
232
- if self.split_input_params["tie_braker"]:
233
- L_weighting = self.delta_border(Ly, Lx)
234
- L_weighting = torch.clip(L_weighting,
235
- self.split_input_params["clip_min_tie_weight"],
236
- self.split_input_params["clip_max_tie_weight"])
237
-
238
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
239
- weighting = weighting * L_weighting
240
- return weighting
241
-
242
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
243
- """
244
- :param x: img of size (bs, c, h, w)
245
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
246
- """
247
- bs, nc, h, w = x.shape
248
-
249
- # number of crops in image
250
- Ly = (h - kernel_size[0]) // stride[0] + 1
251
- Lx = (w - kernel_size[1]) // stride[1] + 1
252
-
253
- if uf == 1 and df == 1:
254
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
255
- unfold = torch.nn.Unfold(**fold_params)
256
-
257
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
258
-
259
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
260
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
261
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
262
-
263
- elif uf > 1 and df == 1:
264
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
265
- unfold = torch.nn.Unfold(**fold_params)
266
-
267
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
268
- dilation=1, padding=0,
269
- stride=(stride[0] * uf, stride[1] * uf))
270
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
271
-
272
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
273
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
274
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
275
-
276
- elif df > 1 and uf == 1:
277
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
278
- unfold = torch.nn.Unfold(**fold_params)
279
-
280
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
281
- dilation=1, padding=0,
282
- stride=(stride[0] // df, stride[1] // df))
283
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
284
-
285
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
286
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
287
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
288
-
289
- else:
290
- raise NotImplementedError
291
-
292
- return fold, unfold, normalization, weighting
293
-
294
- @torch.no_grad()
295
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
296
- cond_key=None, return_original_cond=False, bs=None):
297
- x = super().get_input(batch, k)
298
- if bs is not None:
299
- x = x[:bs]
300
- x = x.to(self.device)
301
- encoder_posterior = self.encode_first_stage(x)
302
- z = self.get_first_stage_encoding(encoder_posterior).detach()
303
-
304
- if self.model.conditioning_key is not None:
305
- if cond_key is None:
306
- cond_key = self.cond_stage_key
307
- if cond_key != self.first_stage_key:
308
- if cond_key in ['caption', 'coordinates_bbox']:
309
- xc = batch[cond_key]
310
- elif cond_key == 'class_label':
311
- xc = batch
312
- else:
313
- xc = super().get_input(batch, cond_key).to(self.device)
314
- else:
315
- xc = x
316
- if not self.cond_stage_trainable or force_c_encode:
317
- if isinstance(xc, dict) or isinstance(xc, list):
318
- # import pudb; pudb.set_trace()
319
- c = self.get_learned_conditioning(xc)
320
- else:
321
- c = self.get_learned_conditioning(xc.to(self.device))
322
- else:
323
- c = xc
324
- if bs is not None:
325
- c = c[:bs]
326
- # Testing #
327
- if cond_key == 'masked_image':
328
- mask = super().get_input(batch, "mask")
329
- cc = torch.nn.functional.interpolate(mask, size=c.shape[-2:]) # [B, 1, 10, 106]
330
- c = torch.cat((c, cc), dim=1) # [B, 5, 10, 106]
331
- # Testing #
332
- if self.use_positional_encodings:
333
- pos_x, pos_y = self.compute_latent_shifts(batch)
334
- ckey = __conditioning_keys__[self.model.conditioning_key]
335
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
336
-
337
- else:
338
- c = None
339
- xc = None
340
- if self.use_positional_encodings:
341
- pos_x, pos_y = self.compute_latent_shifts(batch)
342
- c = {'pos_x': pos_x, 'pos_y': pos_y}
343
- out = [z, c]
344
- if return_first_stage_outputs:
345
- xrec = self.decode_first_stage(z)
346
- out.extend([x, xrec])
347
- if return_original_cond:
348
- out.append(xc)
349
- return out
350
-
351
- @torch.no_grad()
352
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
353
- if predict_cids:
354
- if z.dim() == 4:
355
- z = torch.argmax(z.exp(), dim=1).long()
356
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
357
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
358
-
359
- z = 1. / self.scale_factor * z
360
-
361
- if hasattr(self, "split_input_params"):
362
- if self.split_input_params["patch_distributed_vq"]:
363
- ks = self.split_input_params["ks"] # eg. (128, 128)
364
- stride = self.split_input_params["stride"] # eg. (64, 64)
365
- uf = self.split_input_params["vqf"]
366
- bs, nc, h, w = z.shape
367
- if ks[0] > h or ks[1] > w:
368
- ks = (min(ks[0], h), min(ks[1], w))
369
- print("reducing Kernel")
370
-
371
- if stride[0] > h or stride[1] > w:
372
- stride = (min(stride[0], h), min(stride[1], w))
373
- print("reducing stride")
374
-
375
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
376
-
377
- z = unfold(z) # (bn, nc * prod(**ks), L)
378
- # 1. Reshape to img shape
379
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
380
-
381
- # 2. apply model loop over last dim
382
- if isinstance(self.first_stage_model, VQModelInterface):
383
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
384
- force_not_quantize=predict_cids or force_not_quantize)
385
- for i in range(z.shape[-1])]
386
- else:
387
-
388
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
389
- for i in range(z.shape[-1])]
390
-
391
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
392
- o = o * weighting
393
- # Reverse 1. reshape to img shape
394
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
395
- # stitch crops together
396
- decoded = fold(o)
397
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
398
- return decoded
399
- else:
400
- if isinstance(self.first_stage_model, VQModelInterface):
401
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
402
- else:
403
- return self.first_stage_model.decode(z)
404
-
405
- else:
406
- if isinstance(self.first_stage_model, VQModelInterface):
407
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
408
- else:
409
- return self.first_stage_model.decode(z)
410
-
411
- # same as above but without decorator
412
- def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
413
- if predict_cids:
414
- if z.dim() == 4:
415
- z = torch.argmax(z.exp(), dim=1).long()
416
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
417
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
418
-
419
- z = 1. / self.scale_factor * z
420
-
421
- if hasattr(self, "split_input_params"):
422
- if self.split_input_params["patch_distributed_vq"]:
423
- ks = self.split_input_params["ks"] # eg. (128, 128)
424
- stride = self.split_input_params["stride"] # eg. (64, 64)
425
- uf = self.split_input_params["vqf"]
426
- bs, nc, h, w = z.shape
427
- if ks[0] > h or ks[1] > w:
428
- ks = (min(ks[0], h), min(ks[1], w))
429
- print("reducing Kernel")
430
-
431
- if stride[0] > h or stride[1] > w:
432
- stride = (min(stride[0], h), min(stride[1], w))
433
- print("reducing stride")
434
-
435
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
436
-
437
- z = unfold(z) # (bn, nc * prod(**ks), L)
438
- # 1. Reshape to img shape
439
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
440
-
441
- # 2. apply model loop over last dim
442
- if isinstance(self.first_stage_model, VQModelInterface):
443
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
444
- force_not_quantize=predict_cids or force_not_quantize)
445
- for i in range(z.shape[-1])]
446
- else:
447
-
448
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
449
- for i in range(z.shape[-1])]
450
-
451
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
452
- o = o * weighting
453
- # Reverse 1. reshape to img shape
454
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
455
- # stitch crops together
456
- decoded = fold(o)
457
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
458
- return decoded
459
- else:
460
- if isinstance(self.first_stage_model, VQModelInterface):
461
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
462
- else:
463
- return self.first_stage_model.decode(z)
464
-
465
- else:
466
- if isinstance(self.first_stage_model, VQModelInterface):
467
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
468
- else:
469
- return self.first_stage_model.decode(z)
470
-
471
- @torch.no_grad()
472
- def encode_first_stage(self, x):
473
- if hasattr(self, "split_input_params"):
474
- if self.split_input_params["patch_distributed_vq"]:
475
- ks = self.split_input_params["ks"] # eg. (128, 128)
476
- stride = self.split_input_params["stride"] # eg. (64, 64)
477
- df = self.split_input_params["vqf"]
478
- self.split_input_params['original_image_size'] = x.shape[-2:]
479
- bs, nc, h, w = x.shape
480
- if ks[0] > h or ks[1] > w:
481
- ks = (min(ks[0], h), min(ks[1], w))
482
- print("reducing Kernel")
483
-
484
- if stride[0] > h or stride[1] > w:
485
- stride = (min(stride[0], h), min(stride[1], w))
486
- print("reducing stride")
487
-
488
- fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
489
- z = unfold(x) # (bn, nc * prod(**ks), L)
490
- # Reshape to img shape
491
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
492
-
493
- output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
494
- for i in range(z.shape[-1])]
495
-
496
- o = torch.stack(output_list, axis=-1)
497
- o = o * weighting
498
-
499
- # Reverse reshape to img shape
500
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
501
- # stitch crops together
502
- decoded = fold(o)
503
- decoded = decoded / normalization
504
- return decoded
505
-
506
- else:
507
- return self.first_stage_model.encode(x)
508
- else:
509
- return self.first_stage_model.encode(x)
510
-
511
- def shared_step(self, batch, **kwargs):
512
- x, c = self.get_input(batch, self.first_stage_key)
513
- loss = self(x, c)
514
- return loss
515
-
516
- def test_step(self,batch,batch_idx):
517
- cond = batch[self.cond_stage_key] * self.test_repeat
518
- cond = self.get_learned_conditioning(cond) # c: string -> [B, T, Context_dim]
519
- batch_size = len(cond)
520
- enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps)# shape = [batch_size,self.channels,self.mel_dim,self.mel_length]
521
- xrec = self.decode_first_stage(enc_emb)
522
- reconstructions = (xrec + 1)/2 # to mel scale
523
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
524
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
525
- if not os.path.exists(savedir):
526
- os.makedirs(savedir)
527
-
528
- file_names = batch['f_name']
529
- nfiles = len(file_names)
530
- reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
531
- for k in range(reconstructions.shape[0]):
532
- b,repeat = k % nfiles, k // nfiles
533
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
534
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
535
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition
536
- np.save(save_img_path,reconstructions[b])
537
-
538
- return None
539
-
540
- def forward(self, x, c, *args, **kwargs):
541
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
542
- if self.model.conditioning_key is not None:
543
- assert c is not None
544
- if self.cond_stage_trainable:
545
- c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim]
546
- if self.shorten_cond_schedule: # TODO: drop this option
547
- tc = self.cond_ids[t].to(self.device)
548
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
549
- return self.p_losses(x, c, t, *args, **kwargs)
550
-
551
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
552
- def rescale_bbox(bbox):
553
- x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
554
- y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
555
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
556
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
557
- return x0, y0, w, h
558
-
559
- return [rescale_bbox(b) for b in bboxes]
560
-
561
- def apply_model(self, x_noisy, t, cond, return_ids=False):
562
-
563
- if isinstance(cond, dict):
564
- # hybrid case, cond is exptected to be a dict
565
- pass
566
- else:
567
- if not isinstance(cond, list):
568
- cond = [cond]
569
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
570
- cond = {key: cond}
571
-
572
- if hasattr(self, "split_input_params"):
573
- assert len(cond) == 1 # todo can only deal with one conditioning atm
574
- assert not return_ids
575
- ks = self.split_input_params["ks"] # eg. (128, 128)
576
- stride = self.split_input_params["stride"] # eg. (64, 64)
577
-
578
- h, w = x_noisy.shape[-2:]
579
-
580
- fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
581
-
582
- z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
583
- # Reshape to img shape
584
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
585
- z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
586
-
587
- if self.cond_stage_key in ["image", "LR_image", "segmentation",
588
- 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
589
- c_key = next(iter(cond.keys())) # get key
590
- c = next(iter(cond.values())) # get value
591
- assert (len(c) == 1) # todo extend to list with more than one elem
592
- c = c[0] # get element
593
-
594
- c = unfold(c)
595
- c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
596
-
597
- cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
598
-
599
- elif self.cond_stage_key == 'coordinates_bbox':
600
- assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
601
-
602
- # assuming padding of unfold is always 0 and its dilation is always 1
603
- n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
604
- full_img_h, full_img_w = self.split_input_params['original_image_size']
605
- # as we are operating on latents, we need the factor from the original image size to the
606
- # spatial latent size to properly rescale the crops for regenerating the bbox annotations
607
- num_downs = self.first_stage_model.encoder.num_resolutions - 1
608
- rescale_latent = 2 ** (num_downs)
609
-
610
- # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
611
- # need to rescale the tl patch coordinates to be in between (0,1)
612
- tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
613
- rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
614
- for patch_nr in range(z.shape[-1])]
615
-
616
- # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
617
- patch_limits = [(x_tl, y_tl,
618
- rescale_latent * ks[0] / full_img_w,
619
- rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
620
- # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
621
-
622
- # tokenize crop coordinates for the bounding boxes of the respective patches
623
- patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
624
- for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
625
- print(patch_limits_tknzd[0].shape)
626
- # cut tknzd crop position from conditioning
627
- assert isinstance(cond, dict), 'cond must be dict to be fed into model'
628
- cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
629
- print(cut_cond.shape)
630
-
631
- adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
632
- adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
633
- print(adapted_cond.shape)
634
- adapted_cond = self.get_learned_conditioning(adapted_cond)
635
- print(adapted_cond.shape)
636
- adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
637
- print(adapted_cond.shape)
638
-
639
- cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
640
-
641
- else:
642
- cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
643
-
644
- # apply model by loop over crops
645
- output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
646
- assert not isinstance(output_list[0],
647
- tuple) # todo cant deal with multiple model outputs check this never happens
648
-
649
- o = torch.stack(output_list, axis=-1)
650
- o = o * weighting
651
- # Reverse reshape to img shape
652
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
653
- # stitch crops together
654
- x_recon = fold(o) / normalization
655
-
656
- else:
657
- x_recon = self.model(x_noisy, t, **cond)
658
-
659
- if isinstance(x_recon, tuple) and not return_ids:
660
- return x_recon[0]
661
- else:
662
- return x_recon
663
-
664
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
665
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
666
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
667
-
668
- def _prior_bpd(self, x_start):
669
- """
670
- Get the prior KL term for the variational lower-bound, measured in
671
- bits-per-dim.
672
- This term can't be optimized, as it only depends on the encoder.
673
- :param x_start: the [N x C x ...] tensor of inputs.
674
- :return: a batch of [N] KL values (in bits), one per batch element.
675
- """
676
- batch_size = x_start.shape[0]
677
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
678
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
679
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
680
- return mean_flat(kl_prior) / np.log(2.0)
681
-
682
- def p_losses(self, x_start, cond, t, noise=None):
683
- noise = default(noise, lambda: torch.randn_like(x_start))
684
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
685
- model_output = self.apply_model(x_noisy, t, cond)
686
-
687
- loss_dict = {}
688
- prefix = 'train' if self.training else 'val'
689
-
690
- if self.parameterization == "x0":
691
- target = x_start
692
- elif self.parameterization == "eps":
693
- target = noise
694
- else:
695
- raise NotImplementedError()
696
-
697
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
698
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
699
-
700
- logvar_t = self.logvar[t].to(self.device)
701
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
702
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
703
- if self.learn_logvar:
704
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
705
- loss_dict.update({'logvar': self.logvar.data.mean()})
706
-
707
- loss = self.l_simple_weight * loss.mean()
708
-
709
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
710
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
711
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
712
- loss += (self.original_elbo_weight * loss_vlb)
713
- loss_dict.update({f'{prefix}/loss': loss})
714
-
715
- return loss, loss_dict
716
-
717
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
718
- return_x0=False, score_corrector=None, corrector_kwargs=None):
719
- t_in = t
720
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
721
-
722
- if score_corrector is not None:
723
- assert self.parameterization == "eps"
724
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
725
-
726
- if return_codebook_ids:
727
- model_out, logits = model_out
728
-
729
- if self.parameterization == "eps":
730
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
731
- elif self.parameterization == "x0":
732
- x_recon = model_out
733
- else:
734
- raise NotImplementedError()
735
-
736
- if clip_denoised:
737
- x_recon.clamp_(-1., 1.)
738
- if quantize_denoised:
739
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
740
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
741
- if return_codebook_ids:
742
- return model_mean, posterior_variance, posterior_log_variance, logits
743
- elif return_x0:
744
- return model_mean, posterior_variance, posterior_log_variance, x_recon
745
- else:
746
- return model_mean, posterior_variance, posterior_log_variance
747
-
748
- @torch.no_grad()
749
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
750
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
751
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
752
- b, *_, device = *x.shape, x.device
753
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
754
- return_codebook_ids=return_codebook_ids,
755
- quantize_denoised=quantize_denoised,
756
- return_x0=return_x0,
757
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
758
- if return_codebook_ids:
759
- raise DeprecationWarning("Support dropped.")
760
- model_mean, _, model_log_variance, logits = outputs
761
- elif return_x0:
762
- model_mean, _, model_log_variance, x0 = outputs
763
- else:
764
- model_mean, _, model_log_variance = outputs
765
-
766
- noise = noise_like(x.shape, device, repeat_noise) * temperature
767
- if noise_dropout > 0.:
768
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
769
- # no noise when t == 0
770
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
771
-
772
- if return_codebook_ids:
773
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
774
- if return_x0:
775
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
776
- else:
777
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
778
-
779
- @torch.no_grad()
780
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
781
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
782
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
783
- log_every_t=None):
784
- if not log_every_t:
785
- log_every_t = self.log_every_t
786
- timesteps = self.num_timesteps
787
- if batch_size is not None:
788
- b = batch_size if batch_size is not None else shape[0]
789
- shape = [batch_size] + list(shape)
790
- else:
791
- b = batch_size = shape[0]
792
- if x_T is None:
793
- img = torch.randn(shape, device=self.device)
794
- else:
795
- img = x_T
796
- intermediates = []
797
- if cond is not None:
798
- if isinstance(cond, dict):
799
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
800
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
801
- else:
802
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
803
-
804
- if start_T is not None:
805
- timesteps = min(timesteps, start_T)
806
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
807
- total=timesteps) if verbose else reversed(
808
- range(0, timesteps))
809
- if type(temperature) == float:
810
- temperature = [temperature] * timesteps
811
-
812
- for i in iterator:
813
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
814
- if self.shorten_cond_schedule:
815
- assert self.model.conditioning_key != 'hybrid'
816
- tc = self.cond_ids[ts].to(cond.device)
817
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
818
-
819
- img, x0_partial = self.p_sample(img, cond, ts,
820
- clip_denoised=self.clip_denoised,
821
- quantize_denoised=quantize_denoised, return_x0=True,
822
- temperature=temperature[i], noise_dropout=noise_dropout,
823
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
824
- if mask is not None:
825
- assert x0 is not None
826
- img_orig = self.q_sample(x0, ts)
827
- img = img_orig * mask + (1. - mask) * img
828
-
829
- if i % log_every_t == 0 or i == timesteps - 1:
830
- intermediates.append(x0_partial)
831
- if callback: callback(i)
832
- if img_callback: img_callback(img, i)
833
- return img, intermediates
834
-
835
- @torch.no_grad()
836
- def p_sample_loop(self, cond, shape, return_intermediates=False,
837
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
838
- mask=None, x0=None, img_callback=None, start_T=None,
839
- log_every_t=None):
840
-
841
- if not log_every_t:
842
- log_every_t = self.log_every_t
843
- device = self.betas.device
844
- b = shape[0]
845
- if x_T is None:
846
- img = torch.randn(shape, device=device)
847
- else:
848
- img = x_T
849
-
850
- intermediates = [img]
851
- if timesteps is None:
852
- timesteps = self.num_timesteps
853
-
854
- if start_T is not None:
855
- timesteps = min(timesteps, start_T)
856
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
857
- range(0, timesteps))
858
-
859
- if mask is not None:
860
- assert x0 is not None
861
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
862
-
863
- for i in iterator:
864
- ts = torch.full((b,), i, device=device, dtype=torch.long)
865
- if self.shorten_cond_schedule:
866
- assert self.model.conditioning_key != 'hybrid'
867
- tc = self.cond_ids[ts].to(cond.device)
868
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
869
-
870
- img = self.p_sample(img, cond, ts,
871
- clip_denoised=self.clip_denoised,
872
- quantize_denoised=quantize_denoised)
873
- if mask is not None:
874
- img_orig = self.q_sample(x0, ts)
875
- img = img_orig * mask + (1. - mask) * img
876
-
877
- if i % log_every_t == 0 or i == timesteps - 1:
878
- intermediates.append(img)
879
- if callback: callback(i)
880
- if img_callback: img_callback(img, i)
881
-
882
- if return_intermediates:
883
- return img, intermediates
884
- return img
885
-
886
- @torch.no_grad()
887
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
888
- verbose=True, timesteps=None, quantize_denoised=False,
889
- mask=None, x0=None, shape=None,**kwargs):
890
- if shape is None:
891
- shape = (batch_size, self.channels, self.mel_dim, self.mel_length)
892
- if cond is not None:
893
- if isinstance(cond, dict):
894
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
895
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
896
- else:
897
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
898
- return self.p_sample_loop(cond,
899
- shape,
900
- return_intermediates=return_intermediates, x_T=x_T,
901
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
902
- mask=mask, x0=x0)
903
-
904
- @torch.no_grad()
905
- def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
906
-
907
- if ddim:
908
- ddim_sampler = DDIMSampler(self)
909
- shape = (self.channels, self.mel_dim, self.mel_length)
910
- samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
911
- shape,cond,verbose=False,**kwargs)
912
-
913
- else:
914
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
915
- return_intermediates=True,**kwargs)
916
-
917
- return samples, intermediates
918
-
919
-
920
- @torch.no_grad()
921
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
922
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
923
- plot_diffusion_rows=True, **kwargs):
924
-
925
- use_ddim = ddim_steps is not None
926
-
927
- log = dict()
928
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
929
- return_first_stage_outputs=True,
930
- force_c_encode=True,
931
- return_original_cond=True,
932
- bs=N)
933
- N = min(x.shape[0], N)
934
- n_row = min(x.shape[0], n_row)
935
- log["inputs"] = x
936
- log["reconstruction"] = xrec
937
- if self.model.conditioning_key is not None:
938
- if hasattr(self.cond_stage_model, "decode") and self.cond_stage_key != "masked_image":
939
- xc = self.cond_stage_model.decode(c)
940
- log["conditioning"] = xc
941
- elif self.cond_stage_key == "masked_image":
942
- log["mask"] = c[:, -1, :, :][:, None, :, :]
943
- xc = self.cond_stage_model.decode(c[:, :self.cond_stage_model.embed_dim, :, :])
944
- log["conditioning"] = xc
945
- elif self.cond_stage_key in ["caption"]:
946
- xc = log_txt_as_img((256, 256), batch["caption"])
947
- log["conditioning"] = xc
948
- elif self.cond_stage_key == 'class_label':
949
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
950
- log['conditioning'] = xc
951
- elif isimage(xc):
952
- log["conditioning"] = xc
953
- if ismap(xc):
954
- log["original_conditioning"] = self.to_rgb(xc)
955
-
956
- if plot_diffusion_rows:
957
- # get diffusion row
958
- diffusion_row = list()
959
- z_start = z[:n_row]
960
- for t in range(self.num_timesteps):
961
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
962
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
963
- t = t.to(self.device).long()
964
- noise = torch.randn_like(z_start)
965
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
966
- diffusion_row.append(self.decode_first_stage(z_noisy))
967
-
968
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
969
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
970
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
971
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
972
- log["diffusion_row"] = diffusion_grid
973
-
974
- if sample:
975
- # get denoise row
976
- with self.ema_scope("Plotting"):
977
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
978
- ddim_steps=ddim_steps,eta=ddim_eta)
979
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
980
- x_samples = self.decode_first_stage(samples)
981
- log["samples"] = x_samples
982
- if plot_denoise_rows:
983
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
984
- log["denoise_row"] = denoise_grid
985
-
986
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
987
- self.first_stage_model, IdentityFirstStage):
988
- # also display when quantizing x0 while sampling
989
- with self.ema_scope("Plotting Quantized Denoised"):
990
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
991
- ddim_steps=ddim_steps,eta=ddim_eta,
992
- quantize_denoised=True)
993
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
994
- # quantize_denoised=True)
995
- x_samples = self.decode_first_stage(samples.to(self.device))
996
- log["samples_x0_quantized"] = x_samples
997
-
998
- if inpaint:
999
- # make a simple center square
1000
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
1001
- mask = torch.ones(N, h, w).to(self.device)
1002
- # zeros will be filled in
1003
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1004
- mask = mask[:, None, ...]
1005
- with self.ema_scope("Plotting Inpaint"):
1006
-
1007
- samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1008
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1009
- x_samples = self.decode_first_stage(samples.to(self.device))
1010
- log["samples_inpainting"] = x_samples
1011
- log["mask_inpainting"] = mask
1012
-
1013
- # outpaint
1014
- mask = 1 - mask
1015
- with self.ema_scope("Plotting Outpaint"):
1016
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1017
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1018
- x_samples = self.decode_first_stage(samples.to(self.device))
1019
- log["samples_outpainting"] = x_samples
1020
- log["mask_outpainting"] = mask
1021
-
1022
- if plot_progressive_rows:
1023
- with self.ema_scope("Plotting Progressives"):
1024
- img, progressives = self.progressive_denoising(c,
1025
- shape=(self.channels, self.mel_dim, self.mel_length),
1026
- batch_size=N)
1027
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1028
- log["progressive_row"] = prog_row
1029
-
1030
- if return_keys:
1031
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1032
- return log
1033
- else:
1034
- return {key: log[key] for key in return_keys}
1035
- return log
1036
-
1037
- def configure_optimizers(self):
1038
- lr = self.learning_rate
1039
- params = list(self.model.parameters())
1040
- if self.cond_stage_trainable:
1041
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1042
- params = params + list(self.cond_stage_model.parameters())
1043
- if self.learn_logvar:
1044
- print('Diffusion model optimizing logvar')
1045
- params.append(self.logvar)
1046
- opt = torch.optim.AdamW(params, lr=lr)
1047
- if self.use_scheduler:
1048
- assert 'target' in self.scheduler_config
1049
- scheduler = instantiate_from_config(self.scheduler_config)
1050
-
1051
- print("Setting up LambdaLR scheduler...")
1052
- scheduler = [
1053
- {
1054
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1055
- 'interval': 'step',
1056
- 'frequency': 1
1057
- }]
1058
- return [opt], scheduler
1059
- return opt
1060
-
1061
- @torch.no_grad()
1062
- def to_rgb(self, x):
1063
- x = x.float()
1064
- if not hasattr(self, "colorize"):
1065
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1066
- x = nn.functional.conv2d(x, weight=self.colorize)
1067
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1068
- return x
1069
-
1070
-
1071
- class LatentFinetuneDiffusion(LatentDiffusion_audio):
1072
- """
1073
- Basis for different finetunas, such as inpainting or depth2image
1074
- To disable finetuning mode, set finetune_keys to None
1075
- """
1076
-
1077
- def __init__(self,
1078
- concat_keys: tuple,
1079
- finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
1080
- "model_ema.diffusion_modelinput_blocks00weight"
1081
- ),
1082
- keep_finetune_dims=4,
1083
- # if model was trained without concat mode before and we would like to keep these channels
1084
- c_concat_log_start=None, # to log reconstruction of c_concat codes
1085
- c_concat_log_end=None,
1086
- *args, **kwargs
1087
- ):
1088
- ckpt_path = kwargs.pop("ckpt_path", None)
1089
- ignore_keys = kwargs.pop("ignore_keys", list())
1090
- super().__init__(*args, **kwargs)
1091
- self.finetune_keys = finetune_keys
1092
- self.concat_keys = concat_keys
1093
- self.keep_dims = keep_finetune_dims
1094
- self.c_concat_log_start = c_concat_log_start
1095
- self.c_concat_log_end = c_concat_log_end
1096
-
1097
- if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
1098
- if exists(ckpt_path):
1099
- self.init_from_ckpt(ckpt_path, ignore_keys)
1100
-
1101
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
1102
- sd = torch.load(path, map_location="cpu")
1103
- if "state_dict" in list(sd.keys()):
1104
- sd = sd["state_dict"]
1105
- keys = list(sd.keys())
1106
-
1107
- for k in keys:
1108
- for ik in ignore_keys:
1109
- if k.startswith(ik):
1110
- print("Deleting key {} from state_dict.".format(k))
1111
- del sd[k]
1112
-
1113
- # make it explicit, finetune by including extra input channels
1114
- if exists(self.finetune_keys) and k in self.finetune_keys:
1115
- new_entry = None
1116
- for name, param in self.named_parameters():
1117
- if name in self.finetune_keys:
1118
- print(
1119
- f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
1120
- new_entry = torch.zeros_like(param) # zero init
1121
- assert exists(new_entry), 'did not find matching parameter to modify'
1122
- new_entry[:, :self.keep_dims, ...] = sd[k]
1123
- sd[k] = new_entry
1124
-
1125
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False)
1126
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
1127
- if len(missing) > 0:
1128
- print(f"Missing Keys: {missing}")
1129
- if len(unexpected) > 0:
1130
- print(f"Unexpected Keys: {unexpected}")
1131
-
1132
- @torch.no_grad()
1133
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1134
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1135
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1136
- use_ema_scope=True,
1137
- **kwargs):
1138
- use_ddim = ddim_steps is not None
1139
-
1140
- log = dict()
1141
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
1142
- c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
1143
- N = min(x.shape[0], N)
1144
- n_row = min(x.shape[0], n_row)
1145
- log["inputs"] = x
1146
- log["reconstruction"] = xrec
1147
- if self.model.conditioning_key is not None:
1148
- if hasattr(self.cond_stage_model, "decode"):
1149
- xc = self.cond_stage_model.decode(c)
1150
- log["conditioning"] = xc
1151
- elif self.cond_stage_key in ["caption"]:
1152
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1153
- log["conditioning"] = xc
1154
- elif self.cond_stage_key == 'class_label':
1155
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1156
- log['conditioning'] = xc
1157
- elif isimage(xc):
1158
- log["conditioning"] = xc
1159
- if ismap(xc):
1160
- log["original_conditioning"] = self.to_rgb(xc)
1161
-
1162
- if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
1163
- log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
1164
-
1165
- if plot_diffusion_rows:
1166
- # get diffusion row
1167
- diffusion_row = list()
1168
- z_start = z[:n_row]
1169
- for t in range(self.num_timesteps):
1170
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1171
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1172
- t = t.to(self.device).long()
1173
- noise = torch.randn_like(z_start)
1174
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1175
- diffusion_row.append(self.decode_first_stage(z_noisy))
1176
-
1177
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1178
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1179
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1180
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1181
- log["diffusion_row"] = diffusion_grid
1182
-
1183
- if sample:
1184
- # get denoise row
1185
- with self.ema_scope("Sampling"):
1186
- samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1187
- batch_size=N, ddim=use_ddim,
1188
- ddim_steps=ddim_steps, eta=ddim_eta)
1189
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1190
- x_samples = self.decode_first_stage(samples)
1191
- log["samples"] = x_samples
1192
- if plot_denoise_rows:
1193
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1194
- log["denoise_row"] = denoise_grid
1195
-
1196
- if unconditional_guidance_scale > 1.0:
1197
- uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1198
- uc_cat = c_cat
1199
- uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
1200
- with self.ema_scope("Sampling with classifier-free guidance"):
1201
- samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1202
- batch_size=N, ddim=use_ddim,
1203
- ddim_steps=ddim_steps, eta=ddim_eta,
1204
- unconditional_guidance_scale=unconditional_guidance_scale,
1205
- unconditional_conditioning=uc_full,
1206
- )
1207
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1208
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1209
-
1210
- return log
1211
-
1212
-
1213
- class LatentInpaintDiffusion(LatentFinetuneDiffusion):
1214
- """
1215
- can either run as pure inpainting model (only concat mode) or with mixed conditionings,
1216
- e.g. mask as concat and text via cross-attn.
1217
- To disable finetuning mode, set finetune_keys to None
1218
- """
1219
-
1220
- def __init__(self,
1221
- concat_keys=("mask", "masked_image"),
1222
- masked_image_key="masked_image",
1223
- *args, **kwargs
1224
- ):
1225
- super().__init__(concat_keys, *args, **kwargs)
1226
- self.masked_image_key = masked_image_key
1227
- assert self.masked_image_key in concat_keys
1228
-
1229
- @torch.no_grad()
1230
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1231
- # note: restricted to non-trainable encoders currently
1232
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
1233
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1234
- force_c_encode=True, return_original_cond=True, bs=bs)
1235
-
1236
- assert exists(self.concat_keys)
1237
- c_cat = list()
1238
- for ck in self.concat_keys:
1239
- if len(batch[ck].shape) == 3:
1240
- batch[ck] = batch[ck][..., None]
1241
- cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1242
- if bs is not None:
1243
- cc = cc[:bs]
1244
- cc = cc.to(self.device)
1245
- bchw = z.shape
1246
- if ck != self.masked_image_key:
1247
- cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
1248
- else:
1249
- cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
1250
- c_cat.append(cc)
1251
- c_cat = torch.cat(c_cat, dim=1)
1252
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1253
- if return_first_stage_outputs:
1254
- return z, all_conds, x, xrec, xc
1255
- return z, all_conds
1256
-
1257
- @torch.no_grad()
1258
- def log_images(self, *args, **kwargs):
1259
- log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
1260
- log["masked_image"] = rearrange(args[0]["masked_image"],
1261
- 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1262
- return log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGE/A_B/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: A B
3
- emoji: 💻
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZeroToHero/04-Image2OCR/app.py DELETED
@@ -1,54 +0,0 @@
1
- import pandas as pd
2
- import PIL
3
- from PIL import Image
4
- from PIL import ImageDraw
5
- import gradio as gr
6
- import torch
7
- import easyocr
8
-
9
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png', 'english.png')
10
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg', 'chinese.jpg')
11
- torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg', 'japanese.jpg')
12
- torch.hub.download_url_to_file('https://i.imgur.com/mwQFd7G.jpeg', 'Hindi.jpeg')
13
-
14
- def draw_boxes(image, bounds, color='yellow', width=2):
15
- draw = ImageDraw.Draw(image)
16
- for bound in bounds:
17
- p0, p1, p2, p3 = bound[0]
18
- draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
19
- return image
20
-
21
- def inference(img, lang):
22
- reader = easyocr.Reader(lang)
23
- bounds = reader.readtext(img.name)
24
- im = PIL.Image.open(img.name)
25
- draw_boxes(im, bounds)
26
- im.save('result.jpg')
27
- return ['result.jpg', pd.DataFrame(bounds).iloc[: , 1:]]
28
-
29
- title = 'Image To Optical Character Recognition'
30
- description = 'Multilingual OCR which works conveniently on all devices in multiple languages.'
31
- article = "<p style='text-align: center'></p>"
32
- examples = [['english.png',['en']],['chinese.jpg',['ch_sim', 'en']],['japanese.jpg',['ja', 'en']],['Hindi.jpeg',['hi', 'en']]]
33
- css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
34
- choices = [
35
- "ch_sim",
36
- "ch_tra",
37
- "de",
38
- "en",
39
- "es",
40
- "ja",
41
- "hi",
42
- "ru"
43
- ]
44
- gr.Interface(
45
- inference,
46
- [gr.inputs.Image(type='file', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['en'], label='language')],
47
- [gr.outputs.Image(type='file', label='Output'), gr.outputs.Dataframe(headers=['text', 'confidence'])],
48
- title=title,
49
- description=description,
50
- article=article,
51
- examples=examples,
52
- css=css,
53
- enable_queue=True
54
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/dreamgaussian/process.py DELETED
@@ -1,92 +0,0 @@
1
- import os
2
- import glob
3
- import sys
4
- import cv2
5
- import argparse
6
- import numpy as np
7
- import matplotlib.pyplot as plt
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- from torchvision import transforms
13
- from PIL import Image
14
- import rembg
15
-
16
- class BLIP2():
17
- def __init__(self, device='cuda'):
18
- self.device = device
19
- from transformers import AutoProcessor, Blip2ForConditionalGeneration
20
- self.processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
21
- self.model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16).to(device)
22
-
23
- @torch.no_grad()
24
- def __call__(self, image):
25
- image = Image.fromarray(image)
26
- inputs = self.processor(image, return_tensors="pt").to(self.device, torch.float16)
27
-
28
- generated_ids = self.model.generate(**inputs, max_new_tokens=20)
29
- generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
30
-
31
- return generated_text
32
-
33
-
34
- if __name__ == '__main__':
35
-
36
- parser = argparse.ArgumentParser()
37
- parser.add_argument('path', type=str, help="path to image (png, jpeg, etc.)")
38
- parser.add_argument('--model', default='u2net', type=str, help="rembg model, see https://github.com/danielgatis/rembg#models")
39
- parser.add_argument('--size', default=256, type=int, help="output resolution")
40
- parser.add_argument('--border_ratio', default=0.2, type=float, help="output border ratio")
41
- parser.add_argument('--recenter', type=bool, default=True, help="recenter, potentially not helpful for multiview zero123")
42
- opt = parser.parse_args()
43
-
44
- session = rembg.new_session(model_name=opt.model)
45
-
46
- if os.path.isdir(opt.path):
47
- print(f'[INFO] processing directory {opt.path}...')
48
- files = glob.glob(f'{opt.path}/*')
49
- out_dir = opt.path
50
- else: # isfile
51
- files = [opt.path]
52
- out_dir = os.path.dirname(opt.path)
53
-
54
- for file in files:
55
-
56
- out_base = os.path.basename(file).split('.')[0]
57
- out_rgba = os.path.join(out_dir, out_base + '_rgba.png')
58
-
59
- # load image
60
- print(f'[INFO] loading image {file}...')
61
- image = cv2.imread(file, cv2.IMREAD_UNCHANGED)
62
-
63
- # carve background
64
- print(f'[INFO] background removal...')
65
- carved_image = rembg.remove(image, session=session) # [H, W, 4]
66
- mask = carved_image[..., -1] > 0
67
-
68
- # recenter
69
- if opt.recenter:
70
- print(f'[INFO] recenter...')
71
- final_rgba = np.zeros((opt.size, opt.size, 4), dtype=np.uint8)
72
-
73
- coords = np.nonzero(mask)
74
- x_min, x_max = coords[0].min(), coords[0].max()
75
- y_min, y_max = coords[1].min(), coords[1].max()
76
- h = x_max - x_min
77
- w = y_max - y_min
78
- desired_size = int(opt.size * (1 - opt.border_ratio))
79
- scale = desired_size / max(h, w)
80
- h2 = int(h * scale)
81
- w2 = int(w * scale)
82
- x2_min = (opt.size - h2) // 2
83
- x2_max = x2_min + h2
84
- y2_min = (opt.size - w2) // 2
85
- y2_max = y2_min + w2
86
- final_rgba[x2_min:x2_max, y2_min:y2_max] = cv2.resize(carved_image[x_min:x_max, y_min:y_max], (w2, h2), interpolation=cv2.INTER_AREA)
87
-
88
- else:
89
- final_rgba = carved_image
90
-
91
- # write image
92
- cv2.imwrite(out_rgba, final_rgba)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Lockchat.py DELETED
@@ -1,64 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
-
5
- import requests
6
-
7
- from ...typing import Any, CreateResult
8
- from ..base_provider import BaseProvider
9
-
10
-
11
- class Lockchat(BaseProvider):
12
- url: str = "http://supertest.lockchat.app"
13
- supports_stream = True
14
- supports_gpt_35_turbo = True
15
- supports_gpt_4 = True
16
-
17
- @staticmethod
18
- def create_completion(
19
- model: str,
20
- messages: list[dict[str, str]],
21
- stream: bool, **kwargs: Any) -> CreateResult:
22
-
23
- temperature = float(kwargs.get("temperature", 0.7))
24
- payload = {
25
- "temperature": temperature,
26
- "messages" : messages,
27
- "model" : model,
28
- "stream" : True,
29
- }
30
-
31
- headers = {
32
- "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
33
- }
34
- response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
35
- json=payload, headers=headers, stream=True)
36
-
37
- response.raise_for_status()
38
- for token in response.iter_lines():
39
- if b"The model: `gpt-4` does not exist" in token:
40
- print("error, retrying...")
41
- Lockchat.create_completion(
42
- model = model,
43
- messages = messages,
44
- stream = stream,
45
- temperature = temperature,
46
- **kwargs)
47
-
48
- if b"content" in token:
49
- token = json.loads(token.decode("utf-8").split("data: ")[1])
50
- token = token["choices"][0]["delta"].get("content")
51
- if token:
52
- yield (token)
53
-
54
- @classmethod
55
- @property
56
- def params(cls):
57
- params = [
58
- ("model", "str"),
59
- ("messages", "list[dict[str, str]]"),
60
- ("stream", "bool"),
61
- ("temperature", "float"),
62
- ]
63
- param = ", ".join([": ".join(p) for p in params])
64
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AiMimicry/sovits-models/inference_main.py DELETED
@@ -1,130 +0,0 @@
1
- import io
2
- import logging
3
- import time
4
- from pathlib import Path
5
-
6
- import librosa
7
- import matplotlib.pyplot as plt
8
- import numpy as np
9
- import soundfile
10
-
11
- from inference import infer_tool
12
- from inference import slicer
13
- from inference.infer_tool import Svc
14
-
15
- logging.getLogger('numba').setLevel(logging.WARNING)
16
- chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
17
-
18
-
19
-
20
- def main():
21
- import argparse
22
-
23
- parser = argparse.ArgumentParser(description='sovits4 inference')
24
-
25
- # 一定要设置的部分
26
- parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径')
27
- parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径')
28
- parser.add_argument('-cl', '--clip', type=float, default=0, help='音频强制切片,默认0为自动切片,单位为秒/s')
29
- parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下')
30
- parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)')
31
- parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称')
32
-
33
- # 可选项部分
34
- parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调')
35
- parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填')
36
- parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可')
37
- parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒')
38
- parser.add_argument('-fmp', '--f0_mean_pooling', type=bool, default=False, help='是否对F0使用均值滤波器(池化),对部分哑音有改善。注意,启动该选项会导致推理速度下降,默认关闭')
39
-
40
- # 不用动的部分
41
- parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50')
42
- parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu')
43
- parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学')
44
- parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现')
45
- parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式')
46
- parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭')
47
-
48
- args = parser.parse_args()
49
-
50
- svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path)
51
- infer_tool.mkdir(["raw", "results"])
52
- clean_names = args.clean_names
53
- trans = args.trans
54
- spk_list = args.spk_list
55
- slice_db = args.slice_db
56
- wav_format = args.wav_format
57
- auto_predict_f0 = args.auto_predict_f0
58
- cluster_infer_ratio = args.cluster_infer_ratio
59
- noice_scale = args.noice_scale
60
- pad_seconds = args.pad_seconds
61
- clip = args.clip
62
- lg = args.linear_gradient
63
- lgr = args.linear_gradient_retain
64
- F0_mean_pooling = args.f0_mean_pooling
65
-
66
- infer_tool.fill_a_to_b(trans, clean_names)
67
- for clean_name, tran in zip(clean_names, trans):
68
- raw_audio_path = f"raw/{clean_name}"
69
- if "." not in raw_audio_path:
70
- raw_audio_path += ".wav"
71
- infer_tool.format_wav(raw_audio_path)
72
- wav_path = Path(raw_audio_path).with_suffix('.wav')
73
- chunks = slicer.cut(wav_path, db_thresh=slice_db)
74
- audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
75
- per_size = int(clip*audio_sr)
76
- lg_size = int(lg*audio_sr)
77
- lg_size_r = int(lg_size*lgr)
78
- lg_size_c_l = (lg_size-lg_size_r)//2
79
- lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
80
- lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
81
-
82
- for spk in spk_list:
83
- audio = []
84
- for (slice_tag, data) in audio_data:
85
- print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
86
-
87
- length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample))
88
- if slice_tag:
89
- print('jump empty segment')
90
- _audio = np.zeros(length)
91
- audio.extend(list(infer_tool.pad_array(_audio, length)))
92
- continue
93
- if per_size != 0:
94
- datas = infer_tool.split_list_by_n(data, per_size,lg_size)
95
- else:
96
- datas = [data]
97
- for k,dat in enumerate(datas):
98
- per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length
99
- if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======')
100
- # padd
101
- pad_len = int(audio_sr * pad_seconds)
102
- dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])])
103
- raw_path = io.BytesIO()
104
- soundfile.write(raw_path, dat, audio_sr, format="wav")
105
- raw_path.seek(0)
106
- out_audio, out_sr = svc_model.infer(spk, tran, raw_path,
107
- cluster_infer_ratio=cluster_infer_ratio,
108
- auto_predict_f0=auto_predict_f0,
109
- noice_scale=noice_scale,
110
- F0_mean_pooling = F0_mean_pooling
111
- )
112
- _audio = out_audio.cpu().numpy()
113
- pad_len = int(svc_model.target_sample * pad_seconds)
114
- _audio = _audio[pad_len:-pad_len]
115
- _audio = infer_tool.pad_array(_audio, per_length)
116
- if lg_size!=0 and k!=0:
117
- lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:]
118
- lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size]
119
- lg_pre = lg1*(1-lg)+lg2*lg
120
- audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size]
121
- audio.extend(lg_pre)
122
- _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:]
123
- audio.extend(list(_audio))
124
- key = "auto" if auto_predict_f0 else f"{tran}key"
125
- cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}"
126
- res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}'
127
- soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format)
128
-
129
- if __name__ == '__main__':
130
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akshay-Vs/GPT-Based-Generator/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: GPT Based Generator
3
- emoji: 🦀
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alealejandrooo/deathCertReader/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: DeathCertifReader
3
- emoji: 🔥
4
- colorFrom: pink
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.28.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: LumeraDS/deathCertReader
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r18.py DELETED
@@ -1,26 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # make training faster
4
- # our RAM is 256G
5
- # mount -t tmpfs -o size=140G tmpfs /train_tmp
6
-
7
- config = edict()
8
- config.loss = "cosface"
9
- config.network = "r18"
10
- config.resume = False
11
- config.output = None
12
- config.embedding_size = 512
13
- config.sample_rate = 1.0
14
- config.fp16 = True
15
- config.momentum = 0.9
16
- config.weight_decay = 5e-4
17
- config.batch_size = 128
18
- config.lr = 0.1 # batch size is 512
19
-
20
- config.rec = "/train_tmp/glint360k"
21
- config.num_classes = 360232
22
- config.num_image = 17091657
23
- config.num_epoch = 20
24
- config.warmup_epoch = -1
25
- config.decay_epoch = [8, 12, 15, 18]
26
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl.py DELETED
@@ -1,260 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
- import torch
20
- from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
21
-
22
- from diffusers import (
23
- AutoencoderKL,
24
- ControlNetModel,
25
- EulerDiscreteScheduler,
26
- StableDiffusionXLControlNetPipeline,
27
- UNet2DConditionModel,
28
- )
29
- from diffusers.utils import randn_tensor, torch_device
30
- from diffusers.utils.import_utils import is_xformers_available
31
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
32
-
33
- from ..pipeline_params import (
34
- IMAGE_TO_IMAGE_IMAGE_PARAMS,
35
- TEXT_TO_IMAGE_BATCH_PARAMS,
36
- TEXT_TO_IMAGE_IMAGE_PARAMS,
37
- TEXT_TO_IMAGE_PARAMS,
38
- )
39
- from ..test_pipelines_common import (
40
- PipelineKarrasSchedulerTesterMixin,
41
- PipelineLatentTesterMixin,
42
- PipelineTesterMixin,
43
- )
44
-
45
-
46
- enable_full_determinism()
47
-
48
-
49
- class ControlNetPipelineSDXLFastTests(
50
- PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
51
- ):
52
- pipeline_class = StableDiffusionXLControlNetPipeline
53
- params = TEXT_TO_IMAGE_PARAMS
54
- batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
55
- image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
56
- image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
57
-
58
- def get_dummy_components(self):
59
- torch.manual_seed(0)
60
- unet = UNet2DConditionModel(
61
- block_out_channels=(32, 64),
62
- layers_per_block=2,
63
- sample_size=32,
64
- in_channels=4,
65
- out_channels=4,
66
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
67
- up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
68
- # SD2-specific config below
69
- attention_head_dim=(2, 4),
70
- use_linear_projection=True,
71
- addition_embed_type="text_time",
72
- addition_time_embed_dim=8,
73
- transformer_layers_per_block=(1, 2),
74
- projection_class_embeddings_input_dim=80, # 6 * 8 + 32
75
- cross_attention_dim=64,
76
- )
77
- torch.manual_seed(0)
78
- controlnet = ControlNetModel(
79
- block_out_channels=(32, 64),
80
- layers_per_block=2,
81
- in_channels=4,
82
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
83
- conditioning_embedding_out_channels=(16, 32),
84
- # SD2-specific config below
85
- attention_head_dim=(2, 4),
86
- use_linear_projection=True,
87
- addition_embed_type="text_time",
88
- addition_time_embed_dim=8,
89
- transformer_layers_per_block=(1, 2),
90
- projection_class_embeddings_input_dim=80, # 6 * 8 + 32
91
- cross_attention_dim=64,
92
- )
93
- torch.manual_seed(0)
94
- scheduler = EulerDiscreteScheduler(
95
- beta_start=0.00085,
96
- beta_end=0.012,
97
- steps_offset=1,
98
- beta_schedule="scaled_linear",
99
- timestep_spacing="leading",
100
- )
101
- torch.manual_seed(0)
102
- vae = AutoencoderKL(
103
- block_out_channels=[32, 64],
104
- in_channels=3,
105
- out_channels=3,
106
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
107
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
108
- latent_channels=4,
109
- )
110
- torch.manual_seed(0)
111
- text_encoder_config = CLIPTextConfig(
112
- bos_token_id=0,
113
- eos_token_id=2,
114
- hidden_size=32,
115
- intermediate_size=37,
116
- layer_norm_eps=1e-05,
117
- num_attention_heads=4,
118
- num_hidden_layers=5,
119
- pad_token_id=1,
120
- vocab_size=1000,
121
- # SD2-specific config below
122
- hidden_act="gelu",
123
- projection_dim=32,
124
- )
125
- text_encoder = CLIPTextModel(text_encoder_config)
126
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
127
-
128
- text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
129
- tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
130
-
131
- components = {
132
- "unet": unet,
133
- "controlnet": controlnet,
134
- "scheduler": scheduler,
135
- "vae": vae,
136
- "text_encoder": text_encoder,
137
- "tokenizer": tokenizer,
138
- "text_encoder_2": text_encoder_2,
139
- "tokenizer_2": tokenizer_2,
140
- }
141
- return components
142
-
143
- def get_dummy_inputs(self, device, seed=0):
144
- if str(device).startswith("mps"):
145
- generator = torch.manual_seed(seed)
146
- else:
147
- generator = torch.Generator(device=device).manual_seed(seed)
148
-
149
- controlnet_embedder_scale_factor = 2
150
- image = randn_tensor(
151
- (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor),
152
- generator=generator,
153
- device=torch.device(device),
154
- )
155
-
156
- inputs = {
157
- "prompt": "A painting of a squirrel eating a burger",
158
- "generator": generator,
159
- "num_inference_steps": 2,
160
- "guidance_scale": 6.0,
161
- "output_type": "numpy",
162
- "image": image,
163
- }
164
-
165
- return inputs
166
-
167
- def test_attention_slicing_forward_pass(self):
168
- return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
169
-
170
- @unittest.skipIf(
171
- torch_device != "cuda" or not is_xformers_available(),
172
- reason="XFormers attention is only available with CUDA and `xformers` installed",
173
- )
174
- def test_xformers_attention_forwardGenerator_pass(self):
175
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
176
-
177
- def test_inference_batch_single_identical(self):
178
- self._test_inference_batch_single_identical(expected_max_diff=2e-3)
179
-
180
- @require_torch_gpu
181
- def test_stable_diffusion_xl_offloads(self):
182
- pipes = []
183
- components = self.get_dummy_components()
184
- sd_pipe = self.pipeline_class(**components).to(torch_device)
185
- pipes.append(sd_pipe)
186
-
187
- components = self.get_dummy_components()
188
- sd_pipe = self.pipeline_class(**components)
189
- sd_pipe.enable_model_cpu_offload()
190
- pipes.append(sd_pipe)
191
-
192
- components = self.get_dummy_components()
193
- sd_pipe = self.pipeline_class(**components)
194
- sd_pipe.enable_sequential_cpu_offload()
195
- pipes.append(sd_pipe)
196
-
197
- image_slices = []
198
- for pipe in pipes:
199
- pipe.unet.set_default_attn_processor()
200
-
201
- inputs = self.get_dummy_inputs(torch_device)
202
- image = pipe(**inputs).images
203
-
204
- image_slices.append(image[0, -3:, -3:, -1].flatten())
205
-
206
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
207
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
208
-
209
- def test_stable_diffusion_xl_multi_prompts(self):
210
- components = self.get_dummy_components()
211
- sd_pipe = self.pipeline_class(**components).to(torch_device)
212
-
213
- # forward with single prompt
214
- inputs = self.get_dummy_inputs(torch_device)
215
- output = sd_pipe(**inputs)
216
- image_slice_1 = output.images[0, -3:, -3:, -1]
217
-
218
- # forward with same prompt duplicated
219
- inputs = self.get_dummy_inputs(torch_device)
220
- inputs["prompt_2"] = inputs["prompt"]
221
- output = sd_pipe(**inputs)
222
- image_slice_2 = output.images[0, -3:, -3:, -1]
223
-
224
- # ensure the results are equal
225
- assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
226
-
227
- # forward with different prompt
228
- inputs = self.get_dummy_inputs(torch_device)
229
- inputs["prompt_2"] = "different prompt"
230
- output = sd_pipe(**inputs)
231
- image_slice_3 = output.images[0, -3:, -3:, -1]
232
-
233
- # ensure the results are not equal
234
- assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
235
-
236
- # manually set a negative_prompt
237
- inputs = self.get_dummy_inputs(torch_device)
238
- inputs["negative_prompt"] = "negative prompt"
239
- output = sd_pipe(**inputs)
240
- image_slice_1 = output.images[0, -3:, -3:, -1]
241
-
242
- # forward with same negative_prompt duplicated
243
- inputs = self.get_dummy_inputs(torch_device)
244
- inputs["negative_prompt"] = "negative prompt"
245
- inputs["negative_prompt_2"] = inputs["negative_prompt"]
246
- output = sd_pipe(**inputs)
247
- image_slice_2 = output.images[0, -3:, -3:, -1]
248
-
249
- # ensure the results are equal
250
- assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
251
-
252
- # forward with different negative_prompt
253
- inputs = self.get_dummy_inputs(torch_device)
254
- inputs["negative_prompt"] = "negative prompt"
255
- inputs["negative_prompt_2"] = "different negative prompt"
256
- output = sd_pipe(**inputs)
257
- image_slice_3 = output.images[0, -3:, -3:, -1]
258
-
259
- # ensure the results are not equal
260
- assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/yolo_bbox_coder.py DELETED
@@ -1,89 +0,0 @@
1
- import mmcv
2
- import torch
3
-
4
- from ..builder import BBOX_CODERS
5
- from .base_bbox_coder import BaseBBoxCoder
6
-
7
-
8
- @BBOX_CODERS.register_module()
9
- class YOLOBBoxCoder(BaseBBoxCoder):
10
- """YOLO BBox coder.
11
-
12
- Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
13
- image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
14
- cx, cy in [0., 1.], denotes relative center position w.r.t the center of
15
- bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
16
-
17
- Args:
18
- eps (float): Min value of cx, cy when encoding.
19
- """
20
-
21
- def __init__(self, eps=1e-6):
22
- super(BaseBBoxCoder, self).__init__()
23
- self.eps = eps
24
-
25
- @mmcv.jit(coderize=True)
26
- def encode(self, bboxes, gt_bboxes, stride):
27
- """Get box regression transformation deltas that can be used to
28
- transform the ``bboxes`` into the ``gt_bboxes``.
29
-
30
- Args:
31
- bboxes (torch.Tensor): Source boxes, e.g., anchors.
32
- gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
33
- ground-truth boxes.
34
- stride (torch.Tensor | int): Stride of bboxes.
35
-
36
- Returns:
37
- torch.Tensor: Box transformation deltas
38
- """
39
-
40
- assert bboxes.size(0) == gt_bboxes.size(0)
41
- assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
42
- x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
43
- y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
44
- w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
45
- h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
46
- x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
47
- y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
48
- w = bboxes[..., 2] - bboxes[..., 0]
49
- h = bboxes[..., 3] - bboxes[..., 1]
50
- w_target = torch.log((w_gt / w).clamp(min=self.eps))
51
- h_target = torch.log((h_gt / h).clamp(min=self.eps))
52
- x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
53
- self.eps, 1 - self.eps)
54
- y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
55
- self.eps, 1 - self.eps)
56
- encoded_bboxes = torch.stack(
57
- [x_center_target, y_center_target, w_target, h_target], dim=-1)
58
- return encoded_bboxes
59
-
60
- @mmcv.jit(coderize=True)
61
- def decode(self, bboxes, pred_bboxes, stride):
62
- """Apply transformation `pred_bboxes` to `boxes`.
63
-
64
- Args:
65
- boxes (torch.Tensor): Basic boxes, e.g. anchors.
66
- pred_bboxes (torch.Tensor): Encoded boxes with shape
67
- stride (torch.Tensor | int): Strides of bboxes.
68
-
69
- Returns:
70
- torch.Tensor: Decoded boxes.
71
- """
72
- assert pred_bboxes.size(0) == bboxes.size(0)
73
- assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
74
- x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
75
- y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
76
- w = bboxes[..., 2] - bboxes[..., 0]
77
- h = bboxes[..., 3] - bboxes[..., 1]
78
- # Get outputs x, y
79
- x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center
80
- y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center
81
- w_pred = torch.exp(pred_bboxes[..., 2]) * w
82
- h_pred = torch.exp(pred_bboxes[..., 3]) * h
83
-
84
- decoded_bboxes = torch.stack(
85
- (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
86
- x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
87
- dim=-1)
88
-
89
- return decoded_bboxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/compose.py DELETED
@@ -1,51 +0,0 @@
1
- import collections
2
-
3
- from mmcv.utils import build_from_cfg
4
-
5
- from ..builder import PIPELINES
6
-
7
-
8
- @PIPELINES.register_module()
9
- class Compose(object):
10
- """Compose multiple transforms sequentially.
11
-
12
- Args:
13
- transforms (Sequence[dict | callable]): Sequence of transform object or
14
- config dict to be composed.
15
- """
16
-
17
- def __init__(self, transforms):
18
- assert isinstance(transforms, collections.abc.Sequence)
19
- self.transforms = []
20
- for transform in transforms:
21
- if isinstance(transform, dict):
22
- transform = build_from_cfg(transform, PIPELINES)
23
- self.transforms.append(transform)
24
- elif callable(transform):
25
- self.transforms.append(transform)
26
- else:
27
- raise TypeError('transform must be callable or a dict')
28
-
29
- def __call__(self, data):
30
- """Call function to apply transforms sequentially.
31
-
32
- Args:
33
- data (dict): A result dict contains the data to transform.
34
-
35
- Returns:
36
- dict: Transformed data.
37
- """
38
-
39
- for t in self.transforms:
40
- data = t(data)
41
- if data is None:
42
- return None
43
- return data
44
-
45
- def __repr__(self):
46
- format_string = self.__class__.__name__ + '('
47
- for t in self.transforms:
48
- format_string += '\n'
49
- format_string += f' {t}'
50
- format_string += '\n)'
51
- return format_string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/uniformer.py DELETED
@@ -1,422 +0,0 @@
1
- # --------------------------------------------------------
2
- # UniFormer
3
- # Copyright (c) 2022 SenseTime X-Lab
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Kunchang Li
6
- # --------------------------------------------------------
7
-
8
- from collections import OrderedDict
9
- import math
10
-
11
- from functools import partial
12
- import torch
13
- import torch.nn as nn
14
- import torch.nn.functional as F
15
- import torch.utils.checkpoint as checkpoint
16
- import numpy as np
17
- from timm.models.layers import DropPath, to_2tuple, trunc_normal_
18
-
19
- from mmcv_custom import load_checkpoint
20
- from mmdet.utils import get_root_logger
21
- from ..builder import BACKBONES
22
-
23
-
24
- class Mlp(nn.Module):
25
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
26
- super().__init__()
27
- out_features = out_features or in_features
28
- hidden_features = hidden_features or in_features
29
- self.fc1 = nn.Linear(in_features, hidden_features)
30
- self.act = act_layer()
31
- self.fc2 = nn.Linear(hidden_features, out_features)
32
- self.drop = nn.Dropout(drop)
33
-
34
- def forward(self, x):
35
- x = self.fc1(x)
36
- x = self.act(x)
37
- x = self.drop(x)
38
- x = self.fc2(x)
39
- x = self.drop(x)
40
- return x
41
-
42
-
43
- class CMlp(nn.Module):
44
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
45
- super().__init__()
46
- out_features = out_features or in_features
47
- hidden_features = hidden_features or in_features
48
- self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
49
- self.act = act_layer()
50
- self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
51
- self.drop = nn.Dropout(drop)
52
-
53
- def forward(self, x):
54
- x = self.fc1(x)
55
- x = self.act(x)
56
- x = self.drop(x)
57
- x = self.fc2(x)
58
- x = self.drop(x)
59
- return x
60
-
61
-
62
- class CBlock(nn.Module):
63
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
64
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
65
- super().__init__()
66
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
67
- self.norm1 = nn.BatchNorm2d(dim)
68
- self.conv1 = nn.Conv2d(dim, dim, 1)
69
- self.conv2 = nn.Conv2d(dim, dim, 1)
70
- self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
71
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
72
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
73
- self.norm2 = nn.BatchNorm2d(dim)
74
- mlp_hidden_dim = int(dim * mlp_ratio)
75
- self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
76
-
77
- def forward(self, x):
78
- x = x + self.pos_embed(x)
79
- x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
80
- x = x + self.drop_path(self.mlp(self.norm2(x)))
81
- return x
82
-
83
-
84
- class Attention(nn.Module):
85
- def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
86
- super().__init__()
87
- self.num_heads = num_heads
88
- head_dim = dim // num_heads
89
- # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
90
- self.scale = qk_scale or head_dim ** -0.5
91
-
92
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
93
- self.attn_drop = nn.Dropout(attn_drop)
94
- self.proj = nn.Linear(dim, dim)
95
- self.proj_drop = nn.Dropout(proj_drop)
96
-
97
- def forward(self, x):
98
- B, N, C = x.shape
99
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
100
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
101
-
102
- attn = (q @ k.transpose(-2, -1)) * self.scale
103
- attn = attn.softmax(dim=-1)
104
- attn = self.attn_drop(attn)
105
-
106
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
107
- x = self.proj(x)
108
- x = self.proj_drop(x)
109
- return x
110
-
111
-
112
- class SABlock(nn.Module):
113
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
114
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
115
- super().__init__()
116
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
117
- self.norm1 = norm_layer(dim)
118
- self.attn = Attention(
119
- dim,
120
- num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
121
- attn_drop=attn_drop, proj_drop=drop)
122
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
123
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
124
- self.norm2 = norm_layer(dim)
125
- mlp_hidden_dim = int(dim * mlp_ratio)
126
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
127
-
128
- def forward(self, x):
129
- x = x + self.pos_embed(x)
130
- B, N, H, W = x.shape
131
- x = x.flatten(2).transpose(1, 2)
132
- x = x + self.drop_path(self.attn(self.norm1(x)))
133
- x = x + self.drop_path(self.mlp(self.norm2(x)))
134
- x = x.transpose(1, 2).reshape(B, N, H, W)
135
- return x
136
-
137
-
138
- def window_partition(x, window_size):
139
- """
140
- Args:
141
- x: (B, H, W, C)
142
- window_size (int): window size
143
- Returns:
144
- windows: (num_windows*B, window_size, window_size, C)
145
- """
146
- B, H, W, C = x.shape
147
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
148
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
149
- return windows
150
-
151
-
152
- def window_reverse(windows, window_size, H, W):
153
- """
154
- Args:
155
- windows: (num_windows*B, window_size, window_size, C)
156
- window_size (int): Window size
157
- H (int): Height of image
158
- W (int): Width of image
159
- Returns:
160
- x: (B, H, W, C)
161
- """
162
- B = int(windows.shape[0] / (H * W / window_size / window_size))
163
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
164
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
165
- return x
166
-
167
-
168
- class SABlock_Windows(nn.Module):
169
- def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
170
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
171
- super().__init__()
172
- self.window_size=window_size
173
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
174
- self.norm1 = norm_layer(dim)
175
- self.attn = Attention(
176
- dim,
177
- num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
178
- attn_drop=attn_drop, proj_drop=drop)
179
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
180
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
181
- self.norm2 = norm_layer(dim)
182
- mlp_hidden_dim = int(dim * mlp_ratio)
183
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
184
-
185
- def forward(self, x):
186
- x = x + self.pos_embed(x)
187
- x = x.permute(0, 2, 3, 1)
188
- B, H, W, C = x.shape
189
- shortcut = x
190
- x = self.norm1(x)
191
-
192
- pad_l = pad_t = 0
193
- pad_r = (self.window_size - W % self.window_size) % self.window_size
194
- pad_b = (self.window_size - H % self.window_size) % self.window_size
195
- x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
196
- _, Hp, Wp, _ = x.shape
197
-
198
- x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C
199
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
200
-
201
- # W-MSA/SW-MSA
202
- attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C
203
-
204
- # merge windows
205
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
206
- x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
207
-
208
- # reverse cyclic shift
209
- if pad_r > 0 or pad_b > 0:
210
- x = x[:, :H, :W, :].contiguous()
211
-
212
- x = shortcut + self.drop_path(x)
213
- x = x + self.drop_path(self.mlp(self.norm2(x)))
214
- x = x.permute(0, 3, 1, 2).reshape(B, C, H, W)
215
- return x
216
-
217
-
218
- class PatchEmbed(nn.Module):
219
- """ Image to Patch Embedding
220
- """
221
- def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
222
- super().__init__()
223
- img_size = to_2tuple(img_size)
224
- patch_size = to_2tuple(patch_size)
225
- num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
226
- self.img_size = img_size
227
- self.patch_size = patch_size
228
- self.num_patches = num_patches
229
- self.norm = nn.LayerNorm(embed_dim)
230
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
231
-
232
- def forward(self, x):
233
- B, _, H, W = x.shape
234
- x = self.proj(x)
235
- B, _, H, W = x.shape
236
- x = x.flatten(2).transpose(1, 2)
237
- x = self.norm(x)
238
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
239
- return x
240
-
241
-
242
- @BACKBONES.register_module()
243
- class UniFormer(nn.Module):
244
- """ Vision Transformer
245
- A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
246
- https://arxiv.org/abs/2010.11929
247
- """
248
- def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512],
249
- head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
250
- drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
251
- pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0],
252
- windows=False, hybrid=False, window_size=14):
253
- """
254
- Args:
255
- layer (list): number of block in each layer
256
- img_size (int, tuple): input image size
257
- in_chans (int): number of input channels
258
- num_classes (int): number of classes for classification head
259
- embed_dim (int): embedding dimension
260
- head_dim (int): dimension of attention heads
261
- mlp_ratio (int): ratio of mlp hidden dim to embedding dim
262
- qkv_bias (bool): enable bias for qkv if True
263
- qk_scale (float): override default qk scale of head_dim ** -0.5 if set
264
- representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
265
- drop_rate (float): dropout rate
266
- attn_drop_rate (float): attention dropout rate
267
- drop_path_rate (float): stochastic depth rate
268
- norm_layer (nn.Module): normalization layer
269
- pretrained_path (str): path of pretrained model
270
- use_checkpoint (bool): whether use checkpoint
271
- checkpoint_num (list): index for using checkpoint in every stage
272
- windows (bool): whether use window MHRA
273
- hybrid (bool): whether use hybrid MHRA
274
- window_size (int): size of window (>14)
275
- """
276
- super().__init__()
277
- self.num_classes = num_classes
278
- self.use_checkpoint = use_checkpoint
279
- self.checkpoint_num = checkpoint_num
280
- self.windows = windows
281
- print(f'Use Checkpoint: {self.use_checkpoint}')
282
- print(f'Checkpoint Number: {self.checkpoint_num}')
283
- self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
284
- norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
285
-
286
- self.patch_embed1 = PatchEmbed(
287
- img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
288
- self.patch_embed2 = PatchEmbed(
289
- img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
290
- self.patch_embed3 = PatchEmbed(
291
- img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
292
- self.patch_embed4 = PatchEmbed(
293
- img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
294
-
295
- self.pos_drop = nn.Dropout(p=drop_rate)
296
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule
297
- num_heads = [dim // head_dim for dim in embed_dim]
298
- self.blocks1 = nn.ModuleList([
299
- CBlock(
300
- dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
301
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
302
- for i in range(layers[0])])
303
- self.norm1=norm_layer(embed_dim[0])
304
- self.blocks2 = nn.ModuleList([
305
- CBlock(
306
- dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
307
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer)
308
- for i in range(layers[1])])
309
- self.norm2 = norm_layer(embed_dim[1])
310
- if self.windows:
311
- print('Use local window for all blocks in stage3')
312
- self.blocks3 = nn.ModuleList([
313
- SABlock_Windows(
314
- dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
315
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
316
- for i in range(layers[2])])
317
- elif hybrid:
318
- print('Use hybrid window for blocks in stage3')
319
- block3 = []
320
- for i in range(layers[2]):
321
- if (i + 1) % 4 == 0:
322
- block3.append(SABlock(
323
- dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
324
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
325
- else:
326
- block3.append(SABlock_Windows(
327
- dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
328
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
329
- self.blocks3 = nn.ModuleList(block3)
330
- else:
331
- print('Use global window for all blocks in stage3')
332
- self.blocks3 = nn.ModuleList([
333
- SABlock(
334
- dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
335
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
336
- for i in range(layers[2])])
337
- self.norm3 = norm_layer(embed_dim[2])
338
- self.blocks4 = nn.ModuleList([
339
- SABlock(
340
- dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
341
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer)
342
- for i in range(layers[3])])
343
- self.norm4 = norm_layer(embed_dim[3])
344
-
345
- # Representation layer
346
- if representation_size:
347
- self.num_features = representation_size
348
- self.pre_logits = nn.Sequential(OrderedDict([
349
- ('fc', nn.Linear(embed_dim, representation_size)),
350
- ('act', nn.Tanh())
351
- ]))
352
- else:
353
- self.pre_logits = nn.Identity()
354
-
355
- self.apply(self._init_weights)
356
- self.init_weights(pretrained=pretrained_path)
357
-
358
- def init_weights(self, pretrained):
359
- if isinstance(pretrained, str):
360
- logger = get_root_logger()
361
- load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
362
- print(f'Load pretrained model from {pretrained}')
363
- def _init_weights(self, m):
364
- if isinstance(m, nn.Linear):
365
- trunc_normal_(m.weight, std=.02)
366
- if isinstance(m, nn.Linear) and m.bias is not None:
367
- nn.init.constant_(m.bias, 0)
368
- elif isinstance(m, nn.LayerNorm):
369
- nn.init.constant_(m.bias, 0)
370
- nn.init.constant_(m.weight, 1.0)
371
-
372
- @torch.jit.ignore
373
- def no_weight_decay(self):
374
- return {'pos_embed', 'cls_token'}
375
-
376
- def get_classifier(self):
377
- return self.head
378
-
379
- def reset_classifier(self, num_classes, global_pool=''):
380
- self.num_classes = num_classes
381
- self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
382
-
383
- def forward_features(self, x):
384
- out = []
385
- x = self.patch_embed1(x)
386
- x = self.pos_drop(x)
387
- for i, blk in enumerate(self.blocks1):
388
- if self.use_checkpoint and i < self.checkpoint_num[0]:
389
- x = checkpoint.checkpoint(blk, x)
390
- else:
391
- x = blk(x)
392
- x_out = self.norm1(x.permute(0, 2, 3, 1))
393
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
394
- x = self.patch_embed2(x)
395
- for i, blk in enumerate(self.blocks2):
396
- if self.use_checkpoint and i < self.checkpoint_num[1]:
397
- x = checkpoint.checkpoint(blk, x)
398
- else:
399
- x = blk(x)
400
- x_out = self.norm2(x.permute(0, 2, 3, 1))
401
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
402
- x = self.patch_embed3(x)
403
- for i, blk in enumerate(self.blocks3):
404
- if self.use_checkpoint and i < self.checkpoint_num[2]:
405
- x = checkpoint.checkpoint(blk, x)
406
- else:
407
- x = blk(x)
408
- x_out = self.norm3(x.permute(0, 2, 3, 1))
409
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
410
- x = self.patch_embed4(x)
411
- for i, blk in enumerate(self.blocks4):
412
- if self.use_checkpoint and i < self.checkpoint_num[3]:
413
- x = checkpoint.checkpoint(blk, x)
414
- else:
415
- x = blk(x)
416
- x_out = self.norm4(x.permute(0, 2, 3, 1))
417
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
418
- return tuple(out)
419
-
420
- def forward(self, x):
421
- x = self.forward_features(x)
422
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/fsaf_head.py DELETED
@@ -1,422 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from mmcv.cnn import normal_init
4
- from mmcv.runner import force_fp32
5
-
6
- from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply,
7
- unmap)
8
- from ..builder import HEADS
9
- from ..losses.accuracy import accuracy
10
- from ..losses.utils import weight_reduce_loss
11
- from .retina_head import RetinaHead
12
-
13
-
14
- @HEADS.register_module()
15
- class FSAFHead(RetinaHead):
16
- """Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_.
17
-
18
- The head contains two subnetworks. The first classifies anchor boxes and
19
- the second regresses deltas for the anchors (num_anchors is 1 for anchor-
20
- free methods)
21
-
22
- Args:
23
- *args: Same as its base class in :class:`RetinaHead`
24
- score_threshold (float, optional): The score_threshold to calculate
25
- positive recall. If given, prediction scores lower than this value
26
- is counted as incorrect prediction. Default to None.
27
- **kwargs: Same as its base class in :class:`RetinaHead`
28
-
29
- Example:
30
- >>> import torch
31
- >>> self = FSAFHead(11, 7)
32
- >>> x = torch.rand(1, 7, 32, 32)
33
- >>> cls_score, bbox_pred = self.forward_single(x)
34
- >>> # Each anchor predicts a score for each class except background
35
- >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
36
- >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
37
- >>> assert cls_per_anchor == self.num_classes
38
- >>> assert box_per_anchor == 4
39
- """
40
-
41
- def __init__(self, *args, score_threshold=None, **kwargs):
42
- super().__init__(*args, **kwargs)
43
- self.score_threshold = score_threshold
44
-
45
- def forward_single(self, x):
46
- """Forward feature map of a single scale level.
47
-
48
- Args:
49
- x (Tensor): Feature map of a single scale level.
50
-
51
- Returns:
52
- tuple (Tensor):
53
- cls_score (Tensor): Box scores for each scale level
54
- Has shape (N, num_points * num_classes, H, W).
55
- bbox_pred (Tensor): Box energies / deltas for each scale
56
- level with shape (N, num_points * 4, H, W).
57
- """
58
- cls_score, bbox_pred = super().forward_single(x)
59
- # relu: TBLR encoder only accepts positive bbox_pred
60
- return cls_score, self.relu(bbox_pred)
61
-
62
- def init_weights(self):
63
- """Initialize weights of the head."""
64
- super(FSAFHead, self).init_weights()
65
- # The positive bias in self.retina_reg conv is to prevent predicted \
66
- # bbox with 0 area
67
- normal_init(self.retina_reg, std=0.01, bias=0.25)
68
-
69
- def _get_targets_single(self,
70
- flat_anchors,
71
- valid_flags,
72
- gt_bboxes,
73
- gt_bboxes_ignore,
74
- gt_labels,
75
- img_meta,
76
- label_channels=1,
77
- unmap_outputs=True):
78
- """Compute regression and classification targets for anchors in a
79
- single image.
80
-
81
- Most of the codes are the same with the base class
82
- :obj: `AnchorHead`, except that it also collects and returns
83
- the matched gt index in the image (from 0 to num_gt-1). If the
84
- anchor bbox is not matched to any gt, the corresponding value in
85
- pos_gt_inds is -1.
86
- """
87
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
88
- img_meta['img_shape'][:2],
89
- self.train_cfg.allowed_border)
90
- if not inside_flags.any():
91
- return (None, ) * 7
92
- # Assign gt and sample anchors
93
- anchors = flat_anchors[inside_flags.type(torch.bool), :]
94
- assign_result = self.assigner.assign(
95
- anchors, gt_bboxes, gt_bboxes_ignore,
96
- None if self.sampling else gt_labels)
97
-
98
- sampling_result = self.sampler.sample(assign_result, anchors,
99
- gt_bboxes)
100
-
101
- num_valid_anchors = anchors.shape[0]
102
- bbox_targets = torch.zeros_like(anchors)
103
- bbox_weights = torch.zeros_like(anchors)
104
- labels = anchors.new_full((num_valid_anchors, ),
105
- self.num_classes,
106
- dtype=torch.long)
107
- label_weights = anchors.new_zeros((num_valid_anchors, label_channels),
108
- dtype=torch.float)
109
- pos_gt_inds = anchors.new_full((num_valid_anchors, ),
110
- -1,
111
- dtype=torch.long)
112
-
113
- pos_inds = sampling_result.pos_inds
114
- neg_inds = sampling_result.neg_inds
115
-
116
- if len(pos_inds) > 0:
117
- if not self.reg_decoded_bbox:
118
- pos_bbox_targets = self.bbox_coder.encode(
119
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
120
- else:
121
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
122
- # is applied directly on the decoded bounding boxes, both
123
- # the predicted boxes and regression targets should be with
124
- # absolute coordinate format.
125
- pos_bbox_targets = sampling_result.pos_gt_bboxes
126
- bbox_targets[pos_inds, :] = pos_bbox_targets
127
- bbox_weights[pos_inds, :] = 1.0
128
- # The assigned gt_index for each anchor. (0-based)
129
- pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds
130
- if gt_labels is None:
131
- # Only rpn gives gt_labels as None
132
- # Foreground is the first class
133
- labels[pos_inds] = 0
134
- else:
135
- labels[pos_inds] = gt_labels[
136
- sampling_result.pos_assigned_gt_inds]
137
- if self.train_cfg.pos_weight <= 0:
138
- label_weights[pos_inds] = 1.0
139
- else:
140
- label_weights[pos_inds] = self.train_cfg.pos_weight
141
-
142
- if len(neg_inds) > 0:
143
- label_weights[neg_inds] = 1.0
144
-
145
- # shadowed_labels is a tensor composed of tuples
146
- # (anchor_inds, class_label) that indicate those anchors lying in the
147
- # outer region of a gt or overlapped by another gt with a smaller
148
- # area.
149
- #
150
- # Therefore, only the shadowed labels are ignored for loss calculation.
151
- # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`
152
- shadowed_labels = assign_result.get_extra_property('shadowed_labels')
153
- if shadowed_labels is not None and shadowed_labels.numel():
154
- if len(shadowed_labels.shape) == 2:
155
- idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]
156
- assert (labels[idx_] != label_).all(), \
157
- 'One label cannot be both positive and ignored'
158
- label_weights[idx_, label_] = 0
159
- else:
160
- label_weights[shadowed_labels] = 0
161
-
162
- # map up to original set of anchors
163
- if unmap_outputs:
164
- num_total_anchors = flat_anchors.size(0)
165
- labels = unmap(labels, num_total_anchors, inside_flags)
166
- label_weights = unmap(label_weights, num_total_anchors,
167
- inside_flags)
168
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
169
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
170
- pos_gt_inds = unmap(
171
- pos_gt_inds, num_total_anchors, inside_flags, fill=-1)
172
-
173
- return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
174
- neg_inds, sampling_result, pos_gt_inds)
175
-
176
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
177
- def loss(self,
178
- cls_scores,
179
- bbox_preds,
180
- gt_bboxes,
181
- gt_labels,
182
- img_metas,
183
- gt_bboxes_ignore=None):
184
- """Compute loss of the head.
185
-
186
- Args:
187
- cls_scores (list[Tensor]): Box scores for each scale level
188
- Has shape (N, num_points * num_classes, H, W).
189
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
190
- level with shape (N, num_points * 4, H, W).
191
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
192
- image in [tl_x, tl_y, br_x, br_y] format.
193
- gt_labels (list[Tensor]): class indices corresponding to each box
194
- img_metas (list[dict]): Meta information of each image, e.g.,
195
- image size, scaling factor, etc.
196
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
197
- boxes can be ignored when computing the loss.
198
-
199
- Returns:
200
- dict[str, Tensor]: A dictionary of loss components.
201
- """
202
- for i in range(len(bbox_preds)): # loop over fpn level
203
- # avoid 0 area of the predicted bbox
204
- bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)
205
- # TODO: It may directly use the base-class loss function.
206
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
207
- assert len(featmap_sizes) == self.anchor_generator.num_levels
208
- batch_size = len(gt_bboxes)
209
- device = cls_scores[0].device
210
- anchor_list, valid_flag_list = self.get_anchors(
211
- featmap_sizes, img_metas, device=device)
212
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
213
- cls_reg_targets = self.get_targets(
214
- anchor_list,
215
- valid_flag_list,
216
- gt_bboxes,
217
- img_metas,
218
- gt_bboxes_ignore_list=gt_bboxes_ignore,
219
- gt_labels_list=gt_labels,
220
- label_channels=label_channels)
221
- if cls_reg_targets is None:
222
- return None
223
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
224
- num_total_pos, num_total_neg,
225
- pos_assigned_gt_inds_list) = cls_reg_targets
226
-
227
- num_gts = np.array(list(map(len, gt_labels)))
228
- num_total_samples = (
229
- num_total_pos + num_total_neg if self.sampling else num_total_pos)
230
- # anchor number of multi levels
231
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
232
- # concat all level anchors and flags to a single tensor
233
- concat_anchor_list = []
234
- for i in range(len(anchor_list)):
235
- concat_anchor_list.append(torch.cat(anchor_list[i]))
236
- all_anchor_list = images_to_levels(concat_anchor_list,
237
- num_level_anchors)
238
- losses_cls, losses_bbox = multi_apply(
239
- self.loss_single,
240
- cls_scores,
241
- bbox_preds,
242
- all_anchor_list,
243
- labels_list,
244
- label_weights_list,
245
- bbox_targets_list,
246
- bbox_weights_list,
247
- num_total_samples=num_total_samples)
248
-
249
- # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned
250
- # gt index of each anchor bbox in each fpn level.
251
- cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size
252
- for i, assign in enumerate(pos_assigned_gt_inds_list):
253
- # loop over fpn levels
254
- for j in range(1, batch_size):
255
- # loop over batch size
256
- # Convert gt indices in each img to those in the batch
257
- assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])
258
- pos_assigned_gt_inds_list[i] = assign.flatten()
259
- labels_list[i] = labels_list[i].flatten()
260
- num_gts = sum(map(len, gt_labels)) # total number of gt in the batch
261
- # The unique label index of each gt in the batch
262
- label_sequence = torch.arange(num_gts, device=device)
263
- # Collect the average loss of each gt in each level
264
- with torch.no_grad():
265
- loss_levels, = multi_apply(
266
- self.collect_loss_level_single,
267
- losses_cls,
268
- losses_bbox,
269
- pos_assigned_gt_inds_list,
270
- labels_seq=label_sequence)
271
- # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level
272
- loss_levels = torch.stack(loss_levels, dim=0)
273
- # Locate the best fpn level for loss back-propagation
274
- if loss_levels.numel() == 0: # zero gt
275
- argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)
276
- else:
277
- _, argmin = loss_levels.min(dim=0)
278
-
279
- # Reweight the loss of each (anchor, label) pair, so that only those
280
- # at the best gt level are back-propagated.
281
- losses_cls, losses_bbox, pos_inds = multi_apply(
282
- self.reweight_loss_single,
283
- losses_cls,
284
- losses_bbox,
285
- pos_assigned_gt_inds_list,
286
- labels_list,
287
- list(range(len(losses_cls))),
288
- min_levels=argmin)
289
- num_pos = torch.cat(pos_inds, 0).sum().float()
290
- pos_recall = self.calculate_pos_recall(cls_scores, labels_list,
291
- pos_inds)
292
-
293
- if num_pos == 0: # No gt
294
- avg_factor = num_pos + float(num_total_neg)
295
- else:
296
- avg_factor = num_pos
297
- for i in range(len(losses_cls)):
298
- losses_cls[i] /= avg_factor
299
- losses_bbox[i] /= avg_factor
300
- return dict(
301
- loss_cls=losses_cls,
302
- loss_bbox=losses_bbox,
303
- num_pos=num_pos / batch_size,
304
- pos_recall=pos_recall)
305
-
306
- def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
307
- """Calculate positive recall with score threshold.
308
-
309
- Args:
310
- cls_scores (list[Tensor]): Classification scores at all fpn levels.
311
- Each tensor is in shape (N, num_classes * num_anchors, H, W)
312
- labels_list (list[Tensor]): The label that each anchor is assigned
313
- to. Shape (N * H * W * num_anchors, )
314
- pos_inds (list[Tensor]): List of bool tensors indicating whether
315
- the anchor is assigned to a positive label.
316
- Shape (N * H * W * num_anchors, )
317
-
318
- Returns:
319
- Tensor: A single float number indicating the positive recall.
320
- """
321
- with torch.no_grad():
322
- num_class = self.num_classes
323
- scores = [
324
- cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]
325
- for cls, pos in zip(cls_scores, pos_inds)
326
- ]
327
- labels = [
328
- label.reshape(-1)[pos]
329
- for label, pos in zip(labels_list, pos_inds)
330
- ]
331
- scores = torch.cat(scores, dim=0)
332
- labels = torch.cat(labels, dim=0)
333
- if self.use_sigmoid_cls:
334
- scores = scores.sigmoid()
335
- else:
336
- scores = scores.softmax(dim=1)
337
-
338
- return accuracy(scores, labels, thresh=self.score_threshold)
339
-
340
- def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
341
- labels_seq):
342
- """Get the average loss in each FPN level w.r.t. each gt label.
343
-
344
- Args:
345
- cls_loss (Tensor): Classification loss of each feature map pixel,
346
- shape (num_anchor, num_class)
347
- reg_loss (Tensor): Regression loss of each feature map pixel,
348
- shape (num_anchor, 4)
349
- assigned_gt_inds (Tensor): It indicates which gt the prior is
350
- assigned to (0-based, -1: no assignment). shape (num_anchor),
351
- labels_seq: The rank of labels. shape (num_gt)
352
-
353
- Returns:
354
- shape: (num_gt), average loss of each gt in this level
355
- """
356
- if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4)
357
- reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims
358
- if len(cls_loss.shape) == 2:
359
- cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims
360
- loss = cls_loss + reg_loss
361
- assert loss.size(0) == assigned_gt_inds.size(0)
362
- # Default loss value is 1e6 for a layer where no anchor is positive
363
- # to ensure it will not be chosen to back-propagate gradient
364
- losses_ = loss.new_full(labels_seq.shape, 1e6)
365
- for i, l in enumerate(labels_seq):
366
- match = assigned_gt_inds == l
367
- if match.any():
368
- losses_[i] = loss[match].mean()
369
- return losses_,
370
-
371
- def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,
372
- labels, level, min_levels):
373
- """Reweight loss values at each level.
374
-
375
- Reassign loss values at each level by masking those where the
376
- pre-calculated loss is too large. Then return the reduced losses.
377
-
378
- Args:
379
- cls_loss (Tensor): Element-wise classification loss.
380
- Shape: (num_anchors, num_classes)
381
- reg_loss (Tensor): Element-wise regression loss.
382
- Shape: (num_anchors, 4)
383
- assigned_gt_inds (Tensor): The gt indices that each anchor bbox
384
- is assigned to. -1 denotes a negative anchor, otherwise it is the
385
- gt index (0-based). Shape: (num_anchors, ),
386
- labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).
387
- level (int): The current level index in the pyramid
388
- (0-4 for RetinaNet)
389
- min_levels (Tensor): The best-matching level for each gt.
390
- Shape: (num_gts, ),
391
-
392
- Returns:
393
- tuple:
394
- - cls_loss: Reduced corrected classification loss. Scalar.
395
- - reg_loss: Reduced corrected regression loss. Scalar.
396
- - pos_flags (Tensor): Corrected bool tensor indicating the
397
- final positive anchors. Shape: (num_anchors, ).
398
- """
399
- loc_weight = torch.ones_like(reg_loss)
400
- cls_weight = torch.ones_like(cls_loss)
401
- pos_flags = assigned_gt_inds >= 0 # positive pixel flag
402
- pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()
403
-
404
- if pos_flags.any(): # pos pixels exist
405
- pos_assigned_gt_inds = assigned_gt_inds[pos_flags]
406
- zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)
407
- neg_indices = pos_indices[zeroing_indices]
408
-
409
- if neg_indices.numel():
410
- pos_flags[neg_indices] = 0
411
- loc_weight[neg_indices] = 0
412
- # Only the weight corresponding to the label is
413
- # zeroed out if not selected
414
- zeroing_labels = labels[neg_indices]
415
- assert (zeroing_labels >= 0).all()
416
- cls_weight[neg_indices, zeroing_labels] = 0
417
-
418
- # Weighted loss for both cls and reg loss
419
- cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')
420
- reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')
421
-
422
- return cls_loss, reg_loss, pos_flags
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fastscnn/README.md DELETED
@@ -1,22 +0,0 @@
1
- # Fast-SCNN for Semantic Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @article{poudel2019fast,
9
- title={Fast-scnn: Fast semantic segmentation network},
10
- author={Poudel, Rudra PK and Liwicki, Stephan and Cipolla, Roberto},
11
- journal={arXiv preprint arXiv:1902.04502},
12
- year={2019}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- ### Cityscapes
19
-
20
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
21
- | --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
22
- | Fast-SCNN | Fast-SCNN | 512x1024 | 80000 | 8.4 | 63.61 | 69.06 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fast_scnn.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-f5096c79.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-20200807_165744.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Annotation-AI/fast-segment-everything-with-text-prompt/app.py DELETED
@@ -1,17 +0,0 @@
1
- import os
2
-
3
-
4
- github_user = os.environ.get("GITHUB_USER")
5
- github_token = os.environ.get("GITHUB_TOKEN")
6
-
7
- repo_name = "annotation-ai/mlwiz-technical-demo"
8
-
9
- os.system(f"export GITHUB_USER={github_user}")
10
- os.system(f"export GITHUB_TOKEN={github_token}")
11
- os.system(f"git clone https://{github_user}:{github_token}@github.com/{repo_name}")
12
-
13
- cwd0 = os.getcwd()
14
- cwd1 = os.path.join(cwd0, "mlwiz-technical-demo/sam")
15
- os.chdir(cwd1)
16
- os.system("pip install -r requirements.txt")
17
- os.system("python app_everything_text.py")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/psa_mask.py DELETED
@@ -1,92 +0,0 @@
1
- # Modified from https://github.com/hszhao/semseg/blob/master/lib/psa
2
- from torch import nn
3
- from torch.autograd import Function
4
- from torch.nn.modules.utils import _pair
5
-
6
- from ..utils import ext_loader
7
-
8
- ext_module = ext_loader.load_ext('_ext',
9
- ['psamask_forward', 'psamask_backward'])
10
-
11
-
12
- class PSAMaskFunction(Function):
13
-
14
- @staticmethod
15
- def symbolic(g, input, psa_type, mask_size):
16
- return g.op(
17
- 'mmcv::MMCVPSAMask',
18
- input,
19
- psa_type_i=psa_type,
20
- mask_size_i=mask_size)
21
-
22
- @staticmethod
23
- def forward(ctx, input, psa_type, mask_size):
24
- ctx.psa_type = psa_type
25
- ctx.mask_size = _pair(mask_size)
26
- ctx.save_for_backward(input)
27
-
28
- h_mask, w_mask = ctx.mask_size
29
- batch_size, channels, h_feature, w_feature = input.size()
30
- assert channels == h_mask * w_mask
31
- output = input.new_zeros(
32
- (batch_size, h_feature * w_feature, h_feature, w_feature))
33
-
34
- ext_module.psamask_forward(
35
- input,
36
- output,
37
- psa_type=psa_type,
38
- num_=batch_size,
39
- h_feature=h_feature,
40
- w_feature=w_feature,
41
- h_mask=h_mask,
42
- w_mask=w_mask,
43
- half_h_mask=(h_mask - 1) // 2,
44
- half_w_mask=(w_mask - 1) // 2)
45
- return output
46
-
47
- @staticmethod
48
- def backward(ctx, grad_output):
49
- input = ctx.saved_tensors[0]
50
- psa_type = ctx.psa_type
51
- h_mask, w_mask = ctx.mask_size
52
- batch_size, channels, h_feature, w_feature = input.size()
53
- grad_input = grad_output.new_zeros(
54
- (batch_size, channels, h_feature, w_feature))
55
- ext_module.psamask_backward(
56
- grad_output,
57
- grad_input,
58
- psa_type=psa_type,
59
- num_=batch_size,
60
- h_feature=h_feature,
61
- w_feature=w_feature,
62
- h_mask=h_mask,
63
- w_mask=w_mask,
64
- half_h_mask=(h_mask - 1) // 2,
65
- half_w_mask=(w_mask - 1) // 2)
66
- return grad_input, None, None, None
67
-
68
-
69
- psa_mask = PSAMaskFunction.apply
70
-
71
-
72
- class PSAMask(nn.Module):
73
-
74
- def __init__(self, psa_type, mask_size=None):
75
- super(PSAMask, self).__init__()
76
- assert psa_type in ['collect', 'distribute']
77
- if psa_type == 'collect':
78
- psa_type_enum = 0
79
- else:
80
- psa_type_enum = 1
81
- self.psa_type_enum = psa_type_enum
82
- self.mask_size = mask_size
83
- self.psa_type = psa_type
84
-
85
- def forward(self, input):
86
- return psa_mask(input, self.psa_type_enum, self.mask_size)
87
-
88
- def __repr__(self):
89
- s = self.__class__.__name__
90
- s += f'(psa_type={self.psa_type}, '
91
- s += f'mask_size={self.mask_size})'
92
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/dpm_solver/dpm_solver.py DELETED
@@ -1,1154 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- import math
4
- from tqdm import tqdm
5
-
6
-
7
- class NoiseScheduleVP:
8
- def __init__(
9
- self,
10
- schedule='discrete',
11
- betas=None,
12
- alphas_cumprod=None,
13
- continuous_beta_0=0.1,
14
- continuous_beta_1=20.,
15
- ):
16
- """Create a wrapper class for the forward SDE (VP type).
17
- ***
18
- Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
- We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
- ***
21
- The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
- We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
- Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
- log_alpha_t = self.marginal_log_mean_coeff(t)
25
- sigma_t = self.marginal_std(t)
26
- lambda_t = self.marginal_lambda(t)
27
- Moreover, as lambda(t) is an invertible function, we also support its inverse function:
28
- t = self.inverse_lambda(lambda_t)
29
- ===============================================================
30
- We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
31
- 1. For discrete-time DPMs:
32
- For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
33
- t_i = (i + 1) / N
34
- e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
35
- We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
36
- Args:
37
- betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
38
- alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
39
- Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
40
- **Important**: Please pay special attention for the args for `alphas_cumprod`:
41
- The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
42
- q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
43
- Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
44
- alpha_{t_n} = \sqrt{\hat{alpha_n}},
45
- and
46
- log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
47
- 2. For continuous-time DPMs:
48
- We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
49
- schedule are the default settings in DDPM and improved-DDPM:
50
- Args:
51
- beta_min: A `float` number. The smallest beta for the linear schedule.
52
- beta_max: A `float` number. The largest beta for the linear schedule.
53
- cosine_s: A `float` number. The hyperparameter in the cosine schedule.
54
- cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
55
- T: A `float` number. The ending time of the forward process.
56
- ===============================================================
57
- Args:
58
- schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
59
- 'linear' or 'cosine' for continuous-time DPMs.
60
- Returns:
61
- A wrapper object of the forward SDE (VP type).
62
-
63
- ===============================================================
64
- Example:
65
- # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
66
- >>> ns = NoiseScheduleVP('discrete', betas=betas)
67
- # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
68
- >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
69
- # For continuous-time DPMs (VPSDE), linear schedule:
70
- >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
71
- """
72
-
73
- if schedule not in ['discrete', 'linear', 'cosine']:
74
- raise ValueError(
75
- "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
76
- schedule))
77
-
78
- self.schedule = schedule
79
- if schedule == 'discrete':
80
- if betas is not None:
81
- log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
82
- else:
83
- assert alphas_cumprod is not None
84
- log_alphas = 0.5 * torch.log(alphas_cumprod)
85
- self.total_N = len(log_alphas)
86
- self.T = 1.
87
- self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
88
- self.log_alpha_array = log_alphas.reshape((1, -1,))
89
- else:
90
- self.total_N = 1000
91
- self.beta_0 = continuous_beta_0
92
- self.beta_1 = continuous_beta_1
93
- self.cosine_s = 0.008
94
- self.cosine_beta_max = 999.
95
- self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
96
- 1. + self.cosine_s) / math.pi - self.cosine_s
97
- self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
98
- self.schedule = schedule
99
- if schedule == 'cosine':
100
- # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
101
- # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
102
- self.T = 0.9946
103
- else:
104
- self.T = 1.
105
-
106
- def marginal_log_mean_coeff(self, t):
107
- """
108
- Compute log(alpha_t) of a given continuous-time label t in [0, T].
109
- """
110
- if self.schedule == 'discrete':
111
- return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
112
- self.log_alpha_array.to(t.device)).reshape((-1))
113
- elif self.schedule == 'linear':
114
- return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
115
- elif self.schedule == 'cosine':
116
- log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
117
- log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
118
- return log_alpha_t
119
-
120
- def marginal_alpha(self, t):
121
- """
122
- Compute alpha_t of a given continuous-time label t in [0, T].
123
- """
124
- return torch.exp(self.marginal_log_mean_coeff(t))
125
-
126
- def marginal_std(self, t):
127
- """
128
- Compute sigma_t of a given continuous-time label t in [0, T].
129
- """
130
- return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
131
-
132
- def marginal_lambda(self, t):
133
- """
134
- Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
135
- """
136
- log_mean_coeff = self.marginal_log_mean_coeff(t)
137
- log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
138
- return log_mean_coeff - log_std
139
-
140
- def inverse_lambda(self, lamb):
141
- """
142
- Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
143
- """
144
- if self.schedule == 'linear':
145
- tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
146
- Delta = self.beta_0 ** 2 + tmp
147
- return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
148
- elif self.schedule == 'discrete':
149
- log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
150
- t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
151
- torch.flip(self.t_array.to(lamb.device), [1]))
152
- return t.reshape((-1,))
153
- else:
154
- log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
155
- t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
156
- 1. + self.cosine_s) / math.pi - self.cosine_s
157
- t = t_fn(log_alpha)
158
- return t
159
-
160
-
161
- def model_wrapper(
162
- model,
163
- noise_schedule,
164
- model_type="noise",
165
- model_kwargs={},
166
- guidance_type="uncond",
167
- condition=None,
168
- unconditional_condition=None,
169
- guidance_scale=1.,
170
- classifier_fn=None,
171
- classifier_kwargs={},
172
- ):
173
- """Create a wrapper function for the noise prediction model.
174
- DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
175
- firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
176
- We support four types of the diffusion model by setting `model_type`:
177
- 1. "noise": noise prediction model. (Trained by predicting noise).
178
- 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
179
- 3. "v": velocity prediction model. (Trained by predicting the velocity).
180
- The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
181
- [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
182
- arXiv preprint arXiv:2202.00512 (2022).
183
- [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
184
- arXiv preprint arXiv:2210.02303 (2022).
185
-
186
- 4. "score": marginal score function. (Trained by denoising score matching).
187
- Note that the score function and the noise prediction model follows a simple relationship:
188
- ```
189
- noise(x_t, t) = -sigma_t * score(x_t, t)
190
- ```
191
- We support three types of guided sampling by DPMs by setting `guidance_type`:
192
- 1. "uncond": unconditional sampling by DPMs.
193
- The input `model` has the following format:
194
- ``
195
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
196
- ``
197
- 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
198
- The input `model` has the following format:
199
- ``
200
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
201
- ``
202
- The input `classifier_fn` has the following format:
203
- ``
204
- classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
205
- ``
206
- [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
207
- in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
208
- 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
209
- The input `model` has the following format:
210
- ``
211
- model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
212
- ``
213
- And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
214
- [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
215
- arXiv preprint arXiv:2207.12598 (2022).
216
-
217
- The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
218
- or continuous-time labels (i.e. epsilon to T).
219
- We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
220
- ``
221
- def model_fn(x, t_continuous) -> noise:
222
- t_input = get_model_input_time(t_continuous)
223
- return noise_pred(model, x, t_input, **model_kwargs)
224
- ``
225
- where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
226
- ===============================================================
227
- Args:
228
- model: A diffusion model with the corresponding format described above.
229
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
230
- model_type: A `str`. The parameterization type of the diffusion model.
231
- "noise" or "x_start" or "v" or "score".
232
- model_kwargs: A `dict`. A dict for the other inputs of the model function.
233
- guidance_type: A `str`. The type of the guidance for sampling.
234
- "uncond" or "classifier" or "classifier-free".
235
- condition: A pytorch tensor. The condition for the guided sampling.
236
- Only used for "classifier" or "classifier-free" guidance type.
237
- unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
238
- Only used for "classifier-free" guidance type.
239
- guidance_scale: A `float`. The scale for the guided sampling.
240
- classifier_fn: A classifier function. Only used for the classifier guidance.
241
- classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
242
- Returns:
243
- A noise prediction model that accepts the noised data and the continuous time as the inputs.
244
- """
245
-
246
- def get_model_input_time(t_continuous):
247
- """
248
- Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
249
- For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
250
- For continuous-time DPMs, we just use `t_continuous`.
251
- """
252
- if noise_schedule.schedule == 'discrete':
253
- return (t_continuous - 1. / noise_schedule.total_N) * 1000.
254
- else:
255
- return t_continuous
256
-
257
- def noise_pred_fn(x, t_continuous, cond=None):
258
- if t_continuous.reshape((-1,)).shape[0] == 1:
259
- t_continuous = t_continuous.expand((x.shape[0]))
260
- t_input = get_model_input_time(t_continuous)
261
- if cond is None:
262
- output = model(x, t_input, **model_kwargs)
263
- else:
264
- output = model(x, t_input, cond, **model_kwargs)
265
- if model_type == "noise":
266
- return output
267
- elif model_type == "x_start":
268
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
269
- dims = x.dim()
270
- return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
271
- elif model_type == "v":
272
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
273
- dims = x.dim()
274
- return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
275
- elif model_type == "score":
276
- sigma_t = noise_schedule.marginal_std(t_continuous)
277
- dims = x.dim()
278
- return -expand_dims(sigma_t, dims) * output
279
-
280
- def cond_grad_fn(x, t_input):
281
- """
282
- Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
283
- """
284
- with torch.enable_grad():
285
- x_in = x.detach().requires_grad_(True)
286
- log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
287
- return torch.autograd.grad(log_prob.sum(), x_in)[0]
288
-
289
- def model_fn(x, t_continuous):
290
- """
291
- The noise predicition model function that is used for DPM-Solver.
292
- """
293
- if t_continuous.reshape((-1,)).shape[0] == 1:
294
- t_continuous = t_continuous.expand((x.shape[0]))
295
- if guidance_type == "uncond":
296
- return noise_pred_fn(x, t_continuous)
297
- elif guidance_type == "classifier":
298
- assert classifier_fn is not None
299
- t_input = get_model_input_time(t_continuous)
300
- cond_grad = cond_grad_fn(x, t_input)
301
- sigma_t = noise_schedule.marginal_std(t_continuous)
302
- noise = noise_pred_fn(x, t_continuous)
303
- return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
304
- elif guidance_type == "classifier-free":
305
- if guidance_scale == 1. or unconditional_condition is None:
306
- return noise_pred_fn(x, t_continuous, cond=condition)
307
- else:
308
- x_in = torch.cat([x] * 2)
309
- t_in = torch.cat([t_continuous] * 2)
310
- c_in = torch.cat([unconditional_condition, condition])
311
- noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
312
- return noise_uncond + guidance_scale * (noise - noise_uncond)
313
-
314
- assert model_type in ["noise", "x_start", "v"]
315
- assert guidance_type in ["uncond", "classifier", "classifier-free"]
316
- return model_fn
317
-
318
-
319
- class DPM_Solver:
320
- def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
321
- """Construct a DPM-Solver.
322
- We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
323
- If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
324
- If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
325
- In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
326
- The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
327
- Args:
328
- model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
329
- ``
330
- def model_fn(x, t_continuous):
331
- return noise
332
- ``
333
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
334
- predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
335
- thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
336
- max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
337
-
338
- [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
339
- """
340
- self.model = model_fn
341
- self.noise_schedule = noise_schedule
342
- self.predict_x0 = predict_x0
343
- self.thresholding = thresholding
344
- self.max_val = max_val
345
-
346
- def noise_prediction_fn(self, x, t):
347
- """
348
- Return the noise prediction model.
349
- """
350
- return self.model(x, t)
351
-
352
- def data_prediction_fn(self, x, t):
353
- """
354
- Return the data prediction model (with thresholding).
355
- """
356
- noise = self.noise_prediction_fn(x, t)
357
- dims = x.dim()
358
- alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
359
- x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
360
- if self.thresholding:
361
- p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
362
- s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
363
- s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
364
- x0 = torch.clamp(x0, -s, s) / s
365
- return x0
366
-
367
- def model_fn(self, x, t):
368
- """
369
- Convert the model to the noise prediction model or the data prediction model.
370
- """
371
- if self.predict_x0:
372
- return self.data_prediction_fn(x, t)
373
- else:
374
- return self.noise_prediction_fn(x, t)
375
-
376
- def get_time_steps(self, skip_type, t_T, t_0, N, device):
377
- """Compute the intermediate time steps for sampling.
378
- Args:
379
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
380
- - 'logSNR': uniform logSNR for the time steps.
381
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
382
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
383
- t_T: A `float`. The starting time of the sampling (default is T).
384
- t_0: A `float`. The ending time of the sampling (default is epsilon).
385
- N: A `int`. The total number of the spacing of the time steps.
386
- device: A torch device.
387
- Returns:
388
- A pytorch tensor of the time steps, with the shape (N + 1,).
389
- """
390
- if skip_type == 'logSNR':
391
- lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
392
- lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
393
- logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
394
- return self.noise_schedule.inverse_lambda(logSNR_steps)
395
- elif skip_type == 'time_uniform':
396
- return torch.linspace(t_T, t_0, N + 1).to(device)
397
- elif skip_type == 'time_quadratic':
398
- t_order = 2
399
- t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
400
- return t
401
- else:
402
- raise ValueError(
403
- "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
404
-
405
- def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
406
- """
407
- Get the order of each step for sampling by the singlestep DPM-Solver.
408
- We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
409
- Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
410
- - If order == 1:
411
- We take `steps` of DPM-Solver-1 (i.e. DDIM).
412
- - If order == 2:
413
- - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
414
- - If steps % 2 == 0, we use K steps of DPM-Solver-2.
415
- - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
416
- - If order == 3:
417
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
418
- - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
419
- - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
420
- - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
421
- ============================================
422
- Args:
423
- order: A `int`. The max order for the solver (2 or 3).
424
- steps: A `int`. The total number of function evaluations (NFE).
425
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
426
- - 'logSNR': uniform logSNR for the time steps.
427
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
428
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
429
- t_T: A `float`. The starting time of the sampling (default is T).
430
- t_0: A `float`. The ending time of the sampling (default is epsilon).
431
- device: A torch device.
432
- Returns:
433
- orders: A list of the solver order of each step.
434
- """
435
- if order == 3:
436
- K = steps // 3 + 1
437
- if steps % 3 == 0:
438
- orders = [3, ] * (K - 2) + [2, 1]
439
- elif steps % 3 == 1:
440
- orders = [3, ] * (K - 1) + [1]
441
- else:
442
- orders = [3, ] * (K - 1) + [2]
443
- elif order == 2:
444
- if steps % 2 == 0:
445
- K = steps // 2
446
- orders = [2, ] * K
447
- else:
448
- K = steps // 2 + 1
449
- orders = [2, ] * (K - 1) + [1]
450
- elif order == 1:
451
- K = 1
452
- orders = [1, ] * steps
453
- else:
454
- raise ValueError("'order' must be '1' or '2' or '3'.")
455
- if skip_type == 'logSNR':
456
- # To reproduce the results in DPM-Solver paper
457
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
458
- else:
459
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
460
- torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
461
- return timesteps_outer, orders
462
-
463
- def denoise_to_zero_fn(self, x, s):
464
- """
465
- Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
466
- """
467
- return self.data_prediction_fn(x, s)
468
-
469
- def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
470
- """
471
- DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
472
- Args:
473
- x: A pytorch tensor. The initial value at time `s`.
474
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
475
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
476
- model_s: A pytorch tensor. The model function evaluated at time `s`.
477
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
478
- return_intermediate: A `bool`. If true, also return the model value at time `s`.
479
- Returns:
480
- x_t: A pytorch tensor. The approximated solution at time `t`.
481
- """
482
- ns = self.noise_schedule
483
- dims = x.dim()
484
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
485
- h = lambda_t - lambda_s
486
- log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
487
- sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
488
- alpha_t = torch.exp(log_alpha_t)
489
-
490
- if self.predict_x0:
491
- phi_1 = torch.expm1(-h)
492
- if model_s is None:
493
- model_s = self.model_fn(x, s)
494
- x_t = (
495
- expand_dims(sigma_t / sigma_s, dims) * x
496
- - expand_dims(alpha_t * phi_1, dims) * model_s
497
- )
498
- if return_intermediate:
499
- return x_t, {'model_s': model_s}
500
- else:
501
- return x_t
502
- else:
503
- phi_1 = torch.expm1(h)
504
- if model_s is None:
505
- model_s = self.model_fn(x, s)
506
- x_t = (
507
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
508
- - expand_dims(sigma_t * phi_1, dims) * model_s
509
- )
510
- if return_intermediate:
511
- return x_t, {'model_s': model_s}
512
- else:
513
- return x_t
514
-
515
- def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
516
- solver_type='dpm_solver'):
517
- """
518
- Singlestep solver DPM-Solver-2 from time `s` to time `t`.
519
- Args:
520
- x: A pytorch tensor. The initial value at time `s`.
521
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
522
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
523
- r1: A `float`. The hyperparameter of the second-order solver.
524
- model_s: A pytorch tensor. The model function evaluated at time `s`.
525
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
526
- return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
527
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
528
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
529
- Returns:
530
- x_t: A pytorch tensor. The approximated solution at time `t`.
531
- """
532
- if solver_type not in ['dpm_solver', 'taylor']:
533
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
534
- if r1 is None:
535
- r1 = 0.5
536
- ns = self.noise_schedule
537
- dims = x.dim()
538
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
539
- h = lambda_t - lambda_s
540
- lambda_s1 = lambda_s + r1 * h
541
- s1 = ns.inverse_lambda(lambda_s1)
542
- log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
543
- s1), ns.marginal_log_mean_coeff(t)
544
- sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
545
- alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
546
-
547
- if self.predict_x0:
548
- phi_11 = torch.expm1(-r1 * h)
549
- phi_1 = torch.expm1(-h)
550
-
551
- if model_s is None:
552
- model_s = self.model_fn(x, s)
553
- x_s1 = (
554
- expand_dims(sigma_s1 / sigma_s, dims) * x
555
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
556
- )
557
- model_s1 = self.model_fn(x_s1, s1)
558
- if solver_type == 'dpm_solver':
559
- x_t = (
560
- expand_dims(sigma_t / sigma_s, dims) * x
561
- - expand_dims(alpha_t * phi_1, dims) * model_s
562
- - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
563
- )
564
- elif solver_type == 'taylor':
565
- x_t = (
566
- expand_dims(sigma_t / sigma_s, dims) * x
567
- - expand_dims(alpha_t * phi_1, dims) * model_s
568
- + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
569
- model_s1 - model_s)
570
- )
571
- else:
572
- phi_11 = torch.expm1(r1 * h)
573
- phi_1 = torch.expm1(h)
574
-
575
- if model_s is None:
576
- model_s = self.model_fn(x, s)
577
- x_s1 = (
578
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
579
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
580
- )
581
- model_s1 = self.model_fn(x_s1, s1)
582
- if solver_type == 'dpm_solver':
583
- x_t = (
584
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
585
- - expand_dims(sigma_t * phi_1, dims) * model_s
586
- - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
587
- )
588
- elif solver_type == 'taylor':
589
- x_t = (
590
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
591
- - expand_dims(sigma_t * phi_1, dims) * model_s
592
- - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
593
- )
594
- if return_intermediate:
595
- return x_t, {'model_s': model_s, 'model_s1': model_s1}
596
- else:
597
- return x_t
598
-
599
- def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
600
- return_intermediate=False, solver_type='dpm_solver'):
601
- """
602
- Singlestep solver DPM-Solver-3 from time `s` to time `t`.
603
- Args:
604
- x: A pytorch tensor. The initial value at time `s`.
605
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
606
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
607
- r1: A `float`. The hyperparameter of the third-order solver.
608
- r2: A `float`. The hyperparameter of the third-order solver.
609
- model_s: A pytorch tensor. The model function evaluated at time `s`.
610
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
611
- model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
612
- If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
613
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
614
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
615
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
616
- Returns:
617
- x_t: A pytorch tensor. The approximated solution at time `t`.
618
- """
619
- if solver_type not in ['dpm_solver', 'taylor']:
620
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
621
- if r1 is None:
622
- r1 = 1. / 3.
623
- if r2 is None:
624
- r2 = 2. / 3.
625
- ns = self.noise_schedule
626
- dims = x.dim()
627
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
628
- h = lambda_t - lambda_s
629
- lambda_s1 = lambda_s + r1 * h
630
- lambda_s2 = lambda_s + r2 * h
631
- s1 = ns.inverse_lambda(lambda_s1)
632
- s2 = ns.inverse_lambda(lambda_s2)
633
- log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
634
- s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
635
- sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
636
- s2), ns.marginal_std(t)
637
- alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
638
-
639
- if self.predict_x0:
640
- phi_11 = torch.expm1(-r1 * h)
641
- phi_12 = torch.expm1(-r2 * h)
642
- phi_1 = torch.expm1(-h)
643
- phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
644
- phi_2 = phi_1 / h + 1.
645
- phi_3 = phi_2 / h - 0.5
646
-
647
- if model_s is None:
648
- model_s = self.model_fn(x, s)
649
- if model_s1 is None:
650
- x_s1 = (
651
- expand_dims(sigma_s1 / sigma_s, dims) * x
652
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
653
- )
654
- model_s1 = self.model_fn(x_s1, s1)
655
- x_s2 = (
656
- expand_dims(sigma_s2 / sigma_s, dims) * x
657
- - expand_dims(alpha_s2 * phi_12, dims) * model_s
658
- + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
659
- )
660
- model_s2 = self.model_fn(x_s2, s2)
661
- if solver_type == 'dpm_solver':
662
- x_t = (
663
- expand_dims(sigma_t / sigma_s, dims) * x
664
- - expand_dims(alpha_t * phi_1, dims) * model_s
665
- + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
666
- )
667
- elif solver_type == 'taylor':
668
- D1_0 = (1. / r1) * (model_s1 - model_s)
669
- D1_1 = (1. / r2) * (model_s2 - model_s)
670
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
671
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
672
- x_t = (
673
- expand_dims(sigma_t / sigma_s, dims) * x
674
- - expand_dims(alpha_t * phi_1, dims) * model_s
675
- + expand_dims(alpha_t * phi_2, dims) * D1
676
- - expand_dims(alpha_t * phi_3, dims) * D2
677
- )
678
- else:
679
- phi_11 = torch.expm1(r1 * h)
680
- phi_12 = torch.expm1(r2 * h)
681
- phi_1 = torch.expm1(h)
682
- phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
683
- phi_2 = phi_1 / h - 1.
684
- phi_3 = phi_2 / h - 0.5
685
-
686
- if model_s is None:
687
- model_s = self.model_fn(x, s)
688
- if model_s1 is None:
689
- x_s1 = (
690
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
691
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
692
- )
693
- model_s1 = self.model_fn(x_s1, s1)
694
- x_s2 = (
695
- expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
696
- - expand_dims(sigma_s2 * phi_12, dims) * model_s
697
- - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
698
- )
699
- model_s2 = self.model_fn(x_s2, s2)
700
- if solver_type == 'dpm_solver':
701
- x_t = (
702
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
703
- - expand_dims(sigma_t * phi_1, dims) * model_s
704
- - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
705
- )
706
- elif solver_type == 'taylor':
707
- D1_0 = (1. / r1) * (model_s1 - model_s)
708
- D1_1 = (1. / r2) * (model_s2 - model_s)
709
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
710
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
711
- x_t = (
712
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
713
- - expand_dims(sigma_t * phi_1, dims) * model_s
714
- - expand_dims(sigma_t * phi_2, dims) * D1
715
- - expand_dims(sigma_t * phi_3, dims) * D2
716
- )
717
-
718
- if return_intermediate:
719
- return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
720
- else:
721
- return x_t
722
-
723
- def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
724
- """
725
- Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
726
- Args:
727
- x: A pytorch tensor. The initial value at time `s`.
728
- model_prev_list: A list of pytorch tensor. The previous computed model values.
729
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
730
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
731
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
732
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
733
- Returns:
734
- x_t: A pytorch tensor. The approximated solution at time `t`.
735
- """
736
- if solver_type not in ['dpm_solver', 'taylor']:
737
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
738
- ns = self.noise_schedule
739
- dims = x.dim()
740
- model_prev_1, model_prev_0 = model_prev_list
741
- t_prev_1, t_prev_0 = t_prev_list
742
- lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
743
- t_prev_0), ns.marginal_lambda(t)
744
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
745
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
746
- alpha_t = torch.exp(log_alpha_t)
747
-
748
- h_0 = lambda_prev_0 - lambda_prev_1
749
- h = lambda_t - lambda_prev_0
750
- r0 = h_0 / h
751
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
752
- if self.predict_x0:
753
- if solver_type == 'dpm_solver':
754
- x_t = (
755
- expand_dims(sigma_t / sigma_prev_0, dims) * x
756
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
757
- - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
758
- )
759
- elif solver_type == 'taylor':
760
- x_t = (
761
- expand_dims(sigma_t / sigma_prev_0, dims) * x
762
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
763
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
764
- )
765
- else:
766
- if solver_type == 'dpm_solver':
767
- x_t = (
768
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
769
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
770
- - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
771
- )
772
- elif solver_type == 'taylor':
773
- x_t = (
774
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
775
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
776
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
777
- )
778
- return x_t
779
-
780
- def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
781
- """
782
- Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
783
- Args:
784
- x: A pytorch tensor. The initial value at time `s`.
785
- model_prev_list: A list of pytorch tensor. The previous computed model values.
786
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
787
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
788
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
789
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
790
- Returns:
791
- x_t: A pytorch tensor. The approximated solution at time `t`.
792
- """
793
- ns = self.noise_schedule
794
- dims = x.dim()
795
- model_prev_2, model_prev_1, model_prev_0 = model_prev_list
796
- t_prev_2, t_prev_1, t_prev_0 = t_prev_list
797
- lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
798
- t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
799
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
800
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
801
- alpha_t = torch.exp(log_alpha_t)
802
-
803
- h_1 = lambda_prev_1 - lambda_prev_2
804
- h_0 = lambda_prev_0 - lambda_prev_1
805
- h = lambda_t - lambda_prev_0
806
- r0, r1 = h_0 / h, h_1 / h
807
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
808
- D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
809
- D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
810
- D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
811
- if self.predict_x0:
812
- x_t = (
813
- expand_dims(sigma_t / sigma_prev_0, dims) * x
814
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
815
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
816
- - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
817
- )
818
- else:
819
- x_t = (
820
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
821
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
822
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
823
- - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
824
- )
825
- return x_t
826
-
827
- def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
828
- r2=None):
829
- """
830
- Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
831
- Args:
832
- x: A pytorch tensor. The initial value at time `s`.
833
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
834
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
835
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
836
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
837
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
838
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
839
- r1: A `float`. The hyperparameter of the second-order or third-order solver.
840
- r2: A `float`. The hyperparameter of the third-order solver.
841
- Returns:
842
- x_t: A pytorch tensor. The approximated solution at time `t`.
843
- """
844
- if order == 1:
845
- return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
846
- elif order == 2:
847
- return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
848
- solver_type=solver_type, r1=r1)
849
- elif order == 3:
850
- return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
851
- solver_type=solver_type, r1=r1, r2=r2)
852
- else:
853
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
854
-
855
- def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
856
- """
857
- Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
858
- Args:
859
- x: A pytorch tensor. The initial value at time `s`.
860
- model_prev_list: A list of pytorch tensor. The previous computed model values.
861
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
862
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
863
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
864
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
865
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
866
- Returns:
867
- x_t: A pytorch tensor. The approximated solution at time `t`.
868
- """
869
- if order == 1:
870
- return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
871
- elif order == 2:
872
- return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
873
- elif order == 3:
874
- return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
875
- else:
876
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
877
-
878
- def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
879
- solver_type='dpm_solver'):
880
- """
881
- The adaptive step size solver based on singlestep DPM-Solver.
882
- Args:
883
- x: A pytorch tensor. The initial value at time `t_T`.
884
- order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
885
- t_T: A `float`. The starting time of the sampling (default is T).
886
- t_0: A `float`. The ending time of the sampling (default is epsilon).
887
- h_init: A `float`. The initial step size (for logSNR).
888
- atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
889
- rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
890
- theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
891
- t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
892
- current time and `t_0` is less than `t_err`. The default setting is 1e-5.
893
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
894
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
895
- Returns:
896
- x_0: A pytorch tensor. The approximated solution at time `t_0`.
897
- [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
898
- """
899
- ns = self.noise_schedule
900
- s = t_T * torch.ones((x.shape[0],)).to(x)
901
- lambda_s = ns.marginal_lambda(s)
902
- lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
903
- h = h_init * torch.ones_like(s).to(x)
904
- x_prev = x
905
- nfe = 0
906
- if order == 2:
907
- r1 = 0.5
908
- lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
909
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
910
- solver_type=solver_type,
911
- **kwargs)
912
- elif order == 3:
913
- r1, r2 = 1. / 3., 2. / 3.
914
- lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
915
- return_intermediate=True,
916
- solver_type=solver_type)
917
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
918
- solver_type=solver_type,
919
- **kwargs)
920
- else:
921
- raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
922
- while torch.abs((s - t_0)).mean() > t_err:
923
- t = ns.inverse_lambda(lambda_s + h)
924
- x_lower, lower_noise_kwargs = lower_update(x, s, t)
925
- x_higher = higher_update(x, s, t, **lower_noise_kwargs)
926
- delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
927
- norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
928
- E = norm_fn((x_higher - x_lower) / delta).max()
929
- if torch.all(E <= 1.):
930
- x = x_higher
931
- s = t
932
- x_prev = x_lower
933
- lambda_s = ns.marginal_lambda(s)
934
- h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
935
- nfe += order
936
- print('adaptive solver nfe', nfe)
937
- return x
938
-
939
- def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
940
- method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
941
- atol=0.0078, rtol=0.05,
942
- ):
943
- """
944
- Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
945
- =====================================================
946
- We support the following algorithms for both noise prediction model and data prediction model:
947
- - 'singlestep':
948
- Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
949
- We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
950
- The total number of function evaluations (NFE) == `steps`.
951
- Given a fixed NFE == `steps`, the sampling procedure is:
952
- - If `order` == 1:
953
- - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
954
- - If `order` == 2:
955
- - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
956
- - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
957
- - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
958
- - If `order` == 3:
959
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
960
- - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
961
- - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
962
- - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
963
- - 'multistep':
964
- Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
965
- We initialize the first `order` values by lower order multistep solvers.
966
- Given a fixed NFE == `steps`, the sampling procedure is:
967
- Denote K = steps.
968
- - If `order` == 1:
969
- - We use K steps of DPM-Solver-1 (i.e. DDIM).
970
- - If `order` == 2:
971
- - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
972
- - If `order` == 3:
973
- - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
974
- - 'singlestep_fixed':
975
- Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
976
- We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
977
- - 'adaptive':
978
- Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
979
- We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
980
- You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
981
- (NFE) and the sample quality.
982
- - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
983
- - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
984
- =====================================================
985
- Some advices for choosing the algorithm:
986
- - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
987
- Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
988
- e.g.
989
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
990
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
991
- skip_type='time_uniform', method='singlestep')
992
- - For **guided sampling with large guidance scale** by DPMs:
993
- Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
994
- e.g.
995
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
996
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
997
- skip_type='time_uniform', method='multistep')
998
- We support three types of `skip_type`:
999
- - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1000
- - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1001
- - 'time_quadratic': quadratic time for the time steps.
1002
- =====================================================
1003
- Args:
1004
- x: A pytorch tensor. The initial value at time `t_start`
1005
- e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1006
- steps: A `int`. The total number of function evaluations (NFE).
1007
- t_start: A `float`. The starting time of the sampling.
1008
- If `T` is None, we use self.noise_schedule.T (default is 1.0).
1009
- t_end: A `float`. The ending time of the sampling.
1010
- If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1011
- e.g. if total_N == 1000, we have `t_end` == 1e-3.
1012
- For discrete-time DPMs:
1013
- - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1014
- For continuous-time DPMs:
1015
- - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1016
- order: A `int`. The order of DPM-Solver.
1017
- skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1018
- method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1019
- denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1020
- Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1021
- This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1022
- score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1023
- for diffusion models sampling by diffusion SDEs for low-resolutional images
1024
- (such as CIFAR-10). However, we observed that such trick does not matter for
1025
- high-resolutional images. As it needs an additional NFE, we do not recommend
1026
- it for high-resolutional images.
1027
- lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1028
- Only valid for `method=multistep` and `steps < 15`. We empirically find that
1029
- this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1030
- (especially for steps <= 10). So we recommend to set it to be `True`.
1031
- solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1032
- atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1033
- rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1034
- Returns:
1035
- x_end: A pytorch tensor. The approximated solution at time `t_end`.
1036
- """
1037
- t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1038
- t_T = self.noise_schedule.T if t_start is None else t_start
1039
- device = x.device
1040
- if method == 'adaptive':
1041
- with torch.no_grad():
1042
- x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
1043
- solver_type=solver_type)
1044
- elif method == 'multistep':
1045
- assert steps >= order
1046
- timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1047
- assert timesteps.shape[0] - 1 == steps
1048
- with torch.no_grad():
1049
- vec_t = timesteps[0].expand((x.shape[0]))
1050
- model_prev_list = [self.model_fn(x, vec_t)]
1051
- t_prev_list = [vec_t]
1052
- # Init the first `order` values by lower order multistep DPM-Solver.
1053
- for init_order in tqdm(range(1, order), desc="DPM init order"):
1054
- vec_t = timesteps[init_order].expand(x.shape[0])
1055
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
1056
- solver_type=solver_type)
1057
- model_prev_list.append(self.model_fn(x, vec_t))
1058
- t_prev_list.append(vec_t)
1059
- # Compute the remaining values by `order`-th order multistep DPM-Solver.
1060
- for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
1061
- vec_t = timesteps[step].expand(x.shape[0])
1062
- if lower_order_final and steps < 15:
1063
- step_order = min(order, steps + 1 - step)
1064
- else:
1065
- step_order = order
1066
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
1067
- solver_type=solver_type)
1068
- for i in range(order - 1):
1069
- t_prev_list[i] = t_prev_list[i + 1]
1070
- model_prev_list[i] = model_prev_list[i + 1]
1071
- t_prev_list[-1] = vec_t
1072
- # We do not need to evaluate the final model value.
1073
- if step < steps:
1074
- model_prev_list[-1] = self.model_fn(x, vec_t)
1075
- elif method in ['singlestep', 'singlestep_fixed']:
1076
- if method == 'singlestep':
1077
- timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
1078
- skip_type=skip_type,
1079
- t_T=t_T, t_0=t_0,
1080
- device=device)
1081
- elif method == 'singlestep_fixed':
1082
- K = steps // order
1083
- orders = [order, ] * K
1084
- timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1085
- for i, order in enumerate(orders):
1086
- t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1087
- timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
1088
- N=order, device=device)
1089
- lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1090
- vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1091
- h = lambda_inner[-1] - lambda_inner[0]
1092
- r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1093
- r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1094
- x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1095
- if denoise_to_zero:
1096
- x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1097
- return x
1098
-
1099
-
1100
- #############################################################
1101
- # other utility functions
1102
- #############################################################
1103
-
1104
- def interpolate_fn(x, xp, yp):
1105
- """
1106
- A piecewise linear function y = f(x), using xp and yp as keypoints.
1107
- We implement f(x) in a differentiable way (i.e. applicable for autograd).
1108
- The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1109
- Args:
1110
- x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1111
- xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1112
- yp: PyTorch tensor with shape [C, K].
1113
- Returns:
1114
- The function values f(x), with shape [N, C].
1115
- """
1116
- N, K = x.shape[0], xp.shape[1]
1117
- all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1118
- sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1119
- x_idx = torch.argmin(x_indices, dim=2)
1120
- cand_start_idx = x_idx - 1
1121
- start_idx = torch.where(
1122
- torch.eq(x_idx, 0),
1123
- torch.tensor(1, device=x.device),
1124
- torch.where(
1125
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1126
- ),
1127
- )
1128
- end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1129
- start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1130
- end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1131
- start_idx2 = torch.where(
1132
- torch.eq(x_idx, 0),
1133
- torch.tensor(0, device=x.device),
1134
- torch.where(
1135
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1136
- ),
1137
- )
1138
- y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1139
- start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1140
- end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1141
- cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1142
- return cand
1143
-
1144
-
1145
- def expand_dims(v, dims):
1146
- """
1147
- Expand the tensor `v` to the dim `dims`.
1148
- Args:
1149
- `v`: a PyTorch tensor with shape [N].
1150
- `dim`: a `int`.
1151
- Returns:
1152
- a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1153
- """
1154
- return v[(...,) + (None,) * (dims - 1)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AtomdffAI/wechatgpt4atom/docker/sample-chatgpt-on-wechat/Makefile DELETED
@@ -1,26 +0,0 @@
1
- IMG:=`cat Name`
2
- MOUNT:=
3
- PORT_MAP:=
4
- DOTENV:=.env
5
- CONTAINER_NAME:=sample-chatgpt-on-wechat
6
-
7
- echo:
8
- echo $(IMG)
9
-
10
- run_d:
11
- docker rm $(CONTAINER_NAME) || echo
12
- docker run -dt --name $(CONTAINER_NAME) $(PORT_MAP) \
13
- --env-file=$(DOTENV) \
14
- $(MOUNT) $(IMG)
15
-
16
- run_i:
17
- docker rm $(CONTAINER_NAME) || echo
18
- docker run -it --name $(CONTAINER_NAME) $(PORT_MAP) \
19
- --env-file=$(DOTENV) \
20
- $(MOUNT) $(IMG)
21
-
22
- stop:
23
- docker stop $(CONTAINER_NAME)
24
-
25
- rm: stop
26
- docker rm $(CONTAINER_NAME)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avkash/WebcamFaceProcessing/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: WebcamFaceProcessing
3
- emoji: 📉
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.10.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/options/train_options.py DELETED
@@ -1,84 +0,0 @@
1
- from argparse import ArgumentParser
2
- from configs.paths_config import model_paths
3
-
4
-
5
- class TrainOptions:
6
-
7
- def __init__(self):
8
- self.parser = ArgumentParser()
9
- self.initialize()
10
-
11
- def initialize(self):
12
- self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory')
13
- self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str,
14
- help='Type of dataset/experiment to run')
15
- self.parser.add_argument('--encoder_type', default='Encoder4Editing', type=str, help='Which encoder to use')
16
-
17
- self.parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')
18
- self.parser.add_argument('--test_batch_size', default=2, type=int, help='Batch size for testing and inference')
19
- self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')
20
- self.parser.add_argument('--test_workers', default=2, type=int,
21
- help='Number of test/inference dataloader workers')
22
-
23
- self.parser.add_argument('--learning_rate', default=0.0001, type=float, help='Optimizer learning rate')
24
- self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use')
25
- self.parser.add_argument('--train_decoder', default=False, type=bool, help='Whether to train the decoder model')
26
- self.parser.add_argument('--start_from_latent_avg', action='store_true',
27
- help='Whether to add average latent vector to generate codes from encoder.')
28
- self.parser.add_argument('--lpips_type', default='alex', type=str, help='LPIPS backbone')
29
-
30
- self.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')
31
- self.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor')
32
- self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')
33
-
34
- self.parser.add_argument('--stylegan_weights', default=model_paths['stylegan_ffhq'], type=str,
35
- help='Path to StyleGAN model weights')
36
- self.parser.add_argument('--stylegan_size', default=1024, type=int,
37
- help='size of pretrained StyleGAN Generator')
38
- self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pSp model checkpoint')
39
-
40
- self.parser.add_argument('--max_steps', default=500000, type=int, help='Maximum number of training steps')
41
- self.parser.add_argument('--image_interval', default=100, type=int,
42
- help='Interval for logging train images during training')
43
- self.parser.add_argument('--board_interval', default=50, type=int,
44
- help='Interval for logging metrics to tensorboard')
45
- self.parser.add_argument('--val_interval', default=1000, type=int, help='Validation interval')
46
- self.parser.add_argument('--save_interval', default=None, type=int, help='Model checkpoint interval')
47
-
48
- # Discriminator flags
49
- self.parser.add_argument('--w_discriminator_lambda', default=0, type=float, help='Dw loss multiplier')
50
- self.parser.add_argument('--w_discriminator_lr', default=2e-5, type=float, help='Dw learning rate')
51
- self.parser.add_argument("--r1", type=float, default=10, help="weight of the r1 regularization")
52
- self.parser.add_argument("--d_reg_every", type=int, default=16,
53
- help="interval for applying r1 regularization")
54
- self.parser.add_argument('--use_w_pool', action='store_true',
55
- help='Whether to store a latnet codes pool for the discriminator\'s training')
56
- self.parser.add_argument("--w_pool_size", type=int, default=50,
57
- help="W\'s pool size, depends on --use_w_pool")
58
-
59
- # e4e specific
60
- self.parser.add_argument('--delta_norm', type=int, default=2, help="norm type of the deltas")
61
- self.parser.add_argument('--delta_norm_lambda', type=float, default=2e-4, help="lambda for delta norm loss")
62
-
63
- # Progressive training
64
- self.parser.add_argument('--progressive_steps', nargs='+', type=int, default=None,
65
- help="The training steps of training new deltas. steps[i] starts the delta_i training")
66
- self.parser.add_argument('--progressive_start', type=int, default=None,
67
- help="The training step to start training the deltas, overrides progressive_steps")
68
- self.parser.add_argument('--progressive_step_every', type=int, default=2_000,
69
- help="Amount of training steps for each progressive step")
70
-
71
- # Save additional training info to enable future training continuation from produced checkpoints
72
- self.parser.add_argument('--save_training_data', action='store_true',
73
- help='Save intermediate training data to resume training from the checkpoint')
74
- self.parser.add_argument('--sub_exp_dir', default=None, type=str, help='Name of sub experiment directory')
75
- self.parser.add_argument('--keep_optimizer', action='store_true',
76
- help='Whether to continue from the checkpoint\'s optimizer')
77
- self.parser.add_argument('--resume_training_from_ckpt', default=None, type=str,
78
- help='Path to training checkpoint, works when --save_training_data was set to True')
79
- self.parser.add_argument('--update_param_list', nargs='+', type=str, default=None,
80
- help="Name of training parameters to update the loaded training checkpoint")
81
-
82
- def parse(self):
83
- opts = self.parser.parse_args()
84
- return opts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/transforms/custom_transform.py DELETED
@@ -1,115 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
- # Part of the code is from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/data/transforms.py
4
- # Modified by Xingyi Zhou
5
- # The original code is under Apache-2.0 License
6
- import numpy as np
7
- import torch
8
- import torch.nn.functional as F
9
- from fvcore.transforms.transform import (
10
- CropTransform,
11
- HFlipTransform,
12
- NoOpTransform,
13
- Transform,
14
- TransformList,
15
- )
16
- from PIL import Image
17
-
18
- try:
19
- import cv2 # noqa
20
- except ImportError:
21
- # OpenCV is an optional dependency at the moment
22
- pass
23
-
24
- __all__ = [
25
- "EfficientDetResizeCropTransform",
26
- ]
27
-
28
-
29
- class EfficientDetResizeCropTransform(Transform):
30
- """
31
- """
32
-
33
- def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, \
34
- target_size, interp=None):
35
- """
36
- Args:
37
- h, w (int): original image size
38
- new_h, new_w (int): new image size
39
- interp: PIL interpolation methods, defaults to bilinear.
40
- """
41
- # TODO decide on PIL vs opencv
42
- super().__init__()
43
- if interp is None:
44
- interp = Image.BILINEAR
45
- self._set_attributes(locals())
46
-
47
- def apply_image(self, img, interp=None):
48
- assert len(img.shape) <= 4
49
-
50
- if img.dtype == np.uint8:
51
- pil_image = Image.fromarray(img)
52
- interp_method = interp if interp is not None else self.interp
53
- pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method)
54
- ret = np.asarray(pil_image)
55
- right = min(self.scaled_w, self.offset_x + self.target_size[1])
56
- lower = min(self.scaled_h, self.offset_y + self.target_size[0])
57
- if len(ret.shape) <= 3:
58
- ret = ret[self.offset_y: lower, self.offset_x: right]
59
- else:
60
- ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
61
- else:
62
- # PIL only supports uint8
63
- img = torch.from_numpy(img)
64
- shape = list(img.shape)
65
- shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
66
- img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
67
- _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
68
- mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
69
- img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False)
70
- shape[:2] = (self.scaled_h, self.scaled_w)
71
- ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
72
- right = min(self.scaled_w, self.offset_x + self.target_size[1])
73
- lower = min(self.scaled_h, self.offset_y + self.target_size[0])
74
- if len(ret.shape) <= 3:
75
- ret = ret[self.offset_y: lower, self.offset_x: right]
76
- else:
77
- ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
78
- return ret
79
-
80
-
81
- def apply_coords(self, coords):
82
- coords[:, 0] = coords[:, 0] * self.img_scale
83
- coords[:, 1] = coords[:, 1] * self.img_scale
84
- coords[:, 0] -= self.offset_x
85
- coords[:, 1] -= self.offset_y
86
- return coords
87
-
88
-
89
- def apply_segmentation(self, segmentation):
90
- segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
91
- return segmentation
92
-
93
-
94
- def inverse(self):
95
- raise NotImplementedError
96
-
97
-
98
- def inverse_apply_coords(self, coords):
99
- coords[:, 0] += self.offset_x
100
- coords[:, 1] += self.offset_y
101
- coords[:, 0] = coords[:, 0] / self.img_scale
102
- coords[:, 1] = coords[:, 1] / self.img_scale
103
- return coords
104
-
105
-
106
- def inverse_apply_box(self, box: np.ndarray) -> np.ndarray:
107
- """
108
- """
109
- idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
110
- coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)
111
- coords = self.inverse_apply_coords(coords).reshape((-1, 4, 2))
112
- minxy = coords.min(axis=1)
113
- maxxy = coords.max(axis=1)
114
- trans_boxes = np.concatenate((minxy, maxxy), axis=1)
115
- return trans_boxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/soft_nms.py DELETED
@@ -1,177 +0,0 @@
1
- import torch
2
-
3
- from detectron2.structures import Boxes, RotatedBoxes, pairwise_iou, pairwise_iou_rotated
4
-
5
-
6
- def soft_nms(boxes, scores, method, gaussian_sigma, linear_threshold, prune_threshold):
7
- """
8
- Performs soft non-maximum suppression algorithm on axis aligned boxes
9
-
10
- Args:
11
- boxes (Tensor[N, 5]):
12
- boxes where NMS will be performed. They
13
- are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
14
- scores (Tensor[N]):
15
- scores for each one of the boxes
16
- method (str):
17
- one of ['gaussian', 'linear', 'hard']
18
- see paper for details. users encouraged not to use "hard", as this is the
19
- same nms available elsewhere in detectron2
20
- gaussian_sigma (float):
21
- parameter for Gaussian penalty function
22
- linear_threshold (float):
23
- iou threshold for applying linear decay. Nt from the paper
24
- re-used as threshold for standard "hard" nms
25
- prune_threshold (float):
26
- boxes with scores below this threshold are pruned at each iteration.
27
- Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
28
-
29
- Returns:
30
- tuple(Tensor, Tensor):
31
- [0]: int64 tensor with the indices of the elements that have been kept
32
- by Soft NMS, sorted in decreasing order of scores
33
- [1]: float tensor with the re-scored scores of the elements that were kept
34
- """
35
- return _soft_nms(
36
- Boxes,
37
- pairwise_iou,
38
- boxes,
39
- scores,
40
- method,
41
- gaussian_sigma,
42
- linear_threshold,
43
- prune_threshold,
44
- )
45
-
46
-
47
- def batched_soft_nms(
48
- boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold
49
- ):
50
- """
51
- Performs soft non-maximum suppression in a batched fashion.
52
-
53
- Each index value correspond to a category, and NMS
54
- will not be applied between elements of different categories.
55
-
56
- Args:
57
- boxes (Tensor[N, 4]):
58
- boxes where NMS will be performed. They
59
- are expected to be in (x1, y1, x2, y2) format
60
- scores (Tensor[N]):
61
- scores for each one of the boxes
62
- idxs (Tensor[N]):
63
- indices of the categories for each one of the boxes.
64
- method (str):
65
- one of ['gaussian', 'linear', 'hard']
66
- see paper for details. users encouraged not to use "hard", as this is the
67
- same nms available elsewhere in detectron2
68
- gaussian_sigma (float):
69
- parameter for Gaussian penalty function
70
- linear_threshold (float):
71
- iou threshold for applying linear decay. Nt from the paper
72
- re-used as threshold for standard "hard" nms
73
- prune_threshold (float):
74
- boxes with scores below this threshold are pruned at each iteration.
75
- Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
76
- Returns:
77
- tuple(Tensor, Tensor):
78
- [0]: int64 tensor with the indices of the elements that have been kept
79
- by Soft NMS, sorted in decreasing order of scores
80
- [1]: float tensor with the re-scored scores of the elements that were kept
81
- """
82
- if boxes.numel() == 0:
83
- return (
84
- torch.empty((0,), dtype=torch.int64, device=boxes.device),
85
- torch.empty((0,), dtype=torch.float32, device=scores.device),
86
- )
87
- # strategy: in order to perform NMS independently per class.
88
- # we add an offset to all the boxes. The offset is dependent
89
- # only on the class idx, and is large enough so that boxes
90
- # from different classes do not overlap
91
- max_coordinate = boxes.max()
92
- offsets = idxs.to(boxes) * (max_coordinate + 1)
93
- boxes_for_nms = boxes + offsets[:, None]
94
- return soft_nms(
95
- boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold
96
- )
97
-
98
-
99
- def _soft_nms(
100
- box_class,
101
- pairwise_iou_func,
102
- boxes,
103
- scores,
104
- method,
105
- gaussian_sigma,
106
- linear_threshold,
107
- prune_threshold,
108
- ):
109
- """
110
- Soft non-max suppression algorithm.
111
-
112
- Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
113
- (https://arxiv.org/abs/1704.04503)
114
-
115
- Args:
116
- box_class (cls): one of Box, RotatedBoxes
117
- pairwise_iou_func (func): one of pairwise_iou, pairwise_iou_rotated
118
- boxes (Tensor[N, ?]):
119
- boxes where NMS will be performed
120
- if Boxes, in (x1, y1, x2, y2) format
121
- if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
122
- scores (Tensor[N]):
123
- scores for each one of the boxes
124
- method (str):
125
- one of ['gaussian', 'linear', 'hard']
126
- see paper for details. users encouraged not to use "hard", as this is the
127
- same nms available elsewhere in detectron2
128
- gaussian_sigma (float):
129
- parameter for Gaussian penalty function
130
- linear_threshold (float):
131
- iou threshold for applying linear decay. Nt from the paper
132
- re-used as threshold for standard "hard" nms
133
- prune_threshold (float):
134
- boxes with scores below this threshold are pruned at each iteration.
135
- Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
136
-
137
- Returns:
138
- tuple(Tensor, Tensor):
139
- [0]: int64 tensor with the indices of the elements that have been kept
140
- by Soft NMS, sorted in decreasing order of scores
141
- [1]: float tensor with the re-scored scores of the elements that were kept
142
- """
143
- boxes = boxes.clone()
144
- scores = scores.clone()
145
- idxs = torch.arange(scores.size()[0])
146
-
147
- idxs_out = []
148
- scores_out = []
149
-
150
- while scores.numel() > 0:
151
- top_idx = torch.argmax(scores)
152
- idxs_out.append(idxs[top_idx].item())
153
- scores_out.append(scores[top_idx].item())
154
-
155
- top_box = boxes[top_idx]
156
- ious = pairwise_iou_func(box_class(top_box.unsqueeze(0)), box_class(boxes))[0]
157
-
158
- if method == "linear":
159
- decay = torch.ones_like(ious)
160
- decay_mask = ious > linear_threshold
161
- decay[decay_mask] = 1 - ious[decay_mask]
162
- elif method == "gaussian":
163
- decay = torch.exp(-torch.pow(ious, 2) / gaussian_sigma)
164
- elif method == "hard": # standard NMS
165
- decay = (ious < linear_threshold).float()
166
- else:
167
- raise NotImplementedError("{} soft nms method not implemented.".format(method))
168
-
169
- scores *= decay
170
- keep = scores > prune_threshold
171
- keep[top_idx] = False
172
-
173
- boxes = boxes[keep]
174
- scores = scores[keep]
175
- idxs = idxs[keep]
176
-
177
- return torch.tensor(idxs_out).to(boxes.device), torch.tensor(scores_out).to(scores.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AxelBell/EasyOCR_text_recognition/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: EasyOCR
3
- emoji: 👁️‍🗨️
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.42.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/8 Reglas De La Piscina Bola Apk.md DELETED
@@ -1,66 +0,0 @@
1
-
2
- <h1>8 reglas de piscina de bolas APK: Una aplicación práctica para los amantes de la piscina</h1>
3
- <p>Si te gusta jugar al billar, sabes lo importante que es seguir las reglas y llevar un registro de la puntuación. Pero a veces, puede ser difícil recordar todos los detalles o encontrar un árbitro confiable. Es por eso que necesita 8 Reglas de piscina de bolas APK, una aplicación gratuita que le ayuda a jugar al billar como un profesional. En este artículo, le diremos qué es 8 Reglas del Pool de Bolas APK, cómo descargarlo e instalarlo, cómo usarlo y qué beneficios ofrece. </p>
4
- <h2>8 reglas de la piscina bola apk</h2><br /><p><b><b>Download Zip</b> &#10031;&#10031;&#10031; <a href="https://bltlly.com/2v6Mg8">https://bltlly.com/2v6Mg8</a></b></p><br /><br />
5
- <h2>¿Qué es 8 reglas de piscina de bolas APK? </h2>
6
- <p>8 Reglas de la piscina de bolas APK es una aplicación para Android que actúa como árbitro y un marcador para juegos de billar. Se basa en las normas oficiales de la Australian Eight Ball Federation (AEBF), que son ampliamente utilizadas en torneos y competiciones. La aplicación tiene cuatro características principales: cronómetro, reglas, marcador y ajustes. Puedes usarlo para cronometrar tus disparos, revisar las reglas, actualizar la puntuación y personalizar tus preferencias. </p>
7
- <h3>Características de 8 reglas de piscina de bolas APK</h3>
8
- <p>Estas son algunas de las características que hacen de 8 reglas de piscina de bolas APK una aplicación útil para los amantes de la piscina:</p>
9
- <h4>Cronómetro</h4>
10
- <p>La función de cronómetro le permite establecer un límite de tiempo para cada disparo. El tiempo predeterminado es de 60 segundos, con una advertencia a 30 segundos y una cuenta atrás a 5 segundos. También puede agregar una extensión de 30 segundos si es necesario. El cronómetro sonará cuando se acabe el tiempo, indicando una falta. También puede pausar o reiniciar el cronómetro en cualquier momento. </p>
11
- <p></p>
12
- <h4>Reglas</h4>
13
- <p>La función de reglas le da acceso a las reglas oficiales de la AEBF para el pool de 8 bolas. Puede navegar a través de diferentes categorías, tales como reglas generales, faltas, tiros, bolas y bastidores. También puedes buscar términos o palabras clave específicos. Las reglas son claras y concisas, con ejemplos e ilustraciones para ayudarte a entenderlas mejor. </p>
14
- <h4>Marcador</h4>
15
-
16
- <h4>Ajustes</h4>
17
- <p>La función de configuración le permite personalizar su aplicación de acuerdo a sus preferencias. Puede cambiar el idioma (inglés o francés), el sonido (encendido o apagado), la vibración (encendido o apagado) y el tema (claro u oscuro). También puede ponerse en contacto con el desarrollador o calificar la aplicación de esta función. </p>
18
- <h3> Cómo descargar e instalar 8 reglas de piscina de bolas APK? </h3>
19
- <p>Descargar e instalar 8 reglas de piscina de bolas APK es fácil y rápido. Estos son los pasos que debe seguir:</p>
20
- <ol>
21
- <li>Ir a [este enlace]( 1 ) y haga clic en "Descargar APK". </li>
22
- <li>Espere a que la descarga termine y abra el archivo. </li>
23
- <li>Si se le solicita, permita la instalación desde fuentes desconocidas. </li>
24
- <li>Siga las instrucciones en la pantalla y complete la instalación. </li>
25
- <li>Iniciar la aplicación y disfrutar de jugar al billar con tus amigos. </li>
26
- </ol>
27
- <h3>Cómo utilizar 8 reglas de piscina de bolas APK? </h3>
28
- <p>Uso de 8 reglas de piscina de bolas APK es simple e intuitivo. Aquí hay algunos consejos sobre cómo usarlo:</p>
29
- <h4>Iniciar un nuevo juego</h4>
30
- <p>Para iniciar un nuevo juego, toque en el botón "New Game" en la pantalla de inicio. Introduzca los nombres de los jugadores o equipos y elija sus colores. Toca "Iniciar juego" para comenzar. </p>
31
- <h4>Utilice el cronómetro</h4>
32
- <p>Para usar el cronómetro, toque el botón "Cronómetro" en la esquina inferior derecha de la pantalla. El cronómetro comenzará la cuenta atrás desde 60 segundos. Puede pausar o restablecer el cronómetro pulsando sobre él. También puede agregar una extensión de 30 segundos tocando el botón "Extensión". El cronómetro sonará cuando el tiempo termine, indicando una falta. </p>
33
- <h4>Compruebe las reglas</h4>
34
-
35
- <h4>Actualizar el marcador</h4>
36
- <p>Para actualizar el marcador, toque el botón "Marcador" en la esquina superior derecha de la pantalla. Verás los nombres y colores de los jugadores o equipos, y sus puntos. Para sumar o restar puntos, toca los botones "+" o "-" junto a cada jugador o equipo. La aplicación calculará automáticamente la puntuación total y mostrará el ganador al final del juego. También puede guardar o eliminar el historial de partituras tocando el botón "Historial". </p>
37
- <h4>Personalizar la configuración</h4>
38
- <p>Para personalizar la configuración, toque en el botón "Configuración" en la esquina superior izquierda de la pantalla. Verá una lista de opciones, como idioma, sonido, vibración y tema. Toque en cualquier opción para cambiarla según sus preferencias. También puede ponerse en contacto con el desarrollador o calificar la aplicación de esta función. </p>
39
- <h2>Los beneficios de usar 8 reglas de piscina de bolas APK</h2>
40
- <p>Usando 8 reglas de piscina de bolas APK tiene muchos beneficios para los amantes de la piscina. Aquí están algunos de ellos:</p>
41
- <h3>Conveniencia</h3>
42
- <p>Con 8 reglas de la piscina de bolas APK, no es necesario llevar un cronómetro físico, un libro de reglas, o un marcador de papel. Puede tener todo lo que necesita en su smartphone. También puede acceder a la aplicación en cualquier momento y en cualquier lugar, siempre y cuando tenga una conexión a Internet. </p>
43
- <h3>Precisión</h3>
44
- <p>Con 8 reglas del grupo de bolas APK, no es necesario depender de su memoria o conjeturas para seguir las reglas y realizar un seguimiento de la puntuación. La aplicación le proporciona información precisa y actualizada basada en las normas oficiales de la AEBF. También puedes evitar errores o disputas humanas usando la aplicación como árbitro y marcador. </p>
45
- <h3>Equidad</h3>
46
- <p>Con 8 reglas de billar de bolas APK, usted no tiene que preocuparse de hacer trampa o sesgo al jugar al billar con sus amigos. La aplicación asegura que todo el mundo juega con las mismas reglas y tiene las mismas posibilidades de ganar. También puede disfrutar de una competencia amistosa y justa sin ningún argumento o conflicto. </p>
47
- <h3>Diversión</h3>
48
-
49
- <h2>Conclusión</h2>
50
- <p>8 Reglas de la piscina de bolas APK es una aplicación práctica para los amantes de la piscina que quieren jugar al billar como un profesional. Se basa en las reglas oficiales de la AEBF y tiene cuatro características principales: cronómetro, reglas, marcador y ajustes. Puedes usarlo para cronometrar tus disparos, revisar las reglas, actualizar la puntuación y personalizar tus preferencias. Puedes descargarlo e instalarlo gratis desde [este enlace] y usarlo en cualquier momento y en cualquier lugar. El uso de 8 reglas de piscina de bolas APK tiene muchos beneficios, tales como la comodidad, precisión, equidad y diversión. Es una aplicación imprescindible para los amantes de la piscina que quieren jugar al billar como un profesional. </p>
51
- <p>Aquí hay algunas preguntas frecuentes que es posible que tenga acerca de 8 reglas de piscina de bolas APK:</p>
52
- <ul>
53
- <li>Q: Es 8 reglas de piscina de bolas APK seguro de usar? </li>
54
- <li>A: Sí, 8 reglas de la piscina de bolas APK es seguro de usar. No contiene ningún virus, malware o spyware. Tampoco recopila ni comparte datos personales de su dispositivo. </li>
55
- <li>Q: Es 8 reglas de piscina de bolas APK compatible con mi dispositivo? </li>
56
- <li>A: 8 reglas de la piscina de bolas APK es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.4 o superior. Puede comprobar la compatibilidad de su dispositivo visitando [este enlace] y haciendo clic en "Comprobar compatibilidad". </li>
57
- <li>Q: Es 8 reglas de piscina de bolas APK disponible para dispositivos iOS? </li>
58
- <li>A: No, 8 reglas de la piscina de bolas APK no está disponible para dispositivos iOS. Sin embargo, puede usar una aplicación similar llamada 8 Reglas de la piscina de bolas - Herramienta de árbitro, que está disponible en la App Store.</li>
59
- <li>Q: ¿Puedo usar 8 reglas de piscina de bolas APK sin conexión? </li>
60
- <li>A: Sí, puede usar 8 reglas de piscina de bolas APK sin conexión. Sin embargo, necesitará una conexión a Internet para descargar e instalar la aplicación, y para acceder a algunas de las funciones, como las reglas y la configuración. </li>
61
- <li>Q: ¿Puedo compartir 8 reglas de piscina de bolas APK con mis amigos? </li>
62
- <li>A: Sí, puede compartir 8 reglas de piscina de bolas APK con tus amigos. Puede enviarles el enlace para descargar la aplicación, o usar el botón "Compartir" en la aplicación para enviarles el archivo APK a través de Bluetooth, correo electrónico u otras aplicaciones. </li>
63
-
64
- <p>Espero que haya disfrutado de la lectura de este artículo y aprendido algo nuevo acerca de 8 Reglas de la piscina de bolas APK. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Gracias por su tiempo y atención. </p> 64aa2da5cf<br />
65
- <br />
66
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_ratio.py DELETED
@@ -1,160 +0,0 @@
1
- import sys
2
- from fractions import Fraction
3
- from math import ceil
4
- from typing import cast, List, Optional, Sequence
5
-
6
- if sys.version_info >= (3, 8):
7
- from typing import Protocol
8
- else:
9
- from pip._vendor.typing_extensions import Protocol # pragma: no cover
10
-
11
-
12
- class Edge(Protocol):
13
- """Any object that defines an edge (such as Layout)."""
14
-
15
- size: Optional[int] = None
16
- ratio: int = 1
17
- minimum_size: int = 1
18
-
19
-
20
- def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
21
- """Divide total space to satisfy size, ratio, and minimum_size, constraints.
22
-
23
- The returned list of integers should add up to total in most cases, unless it is
24
- impossible to satisfy all the constraints. For instance, if there are two edges
25
- with a minimum size of 20 each and `total` is 30 then the returned list will be
26
- greater than total. In practice, this would mean that a Layout object would
27
- clip the rows that would overflow the screen height.
28
-
29
- Args:
30
- total (int): Total number of characters.
31
- edges (List[Edge]): Edges within total space.
32
-
33
- Returns:
34
- List[int]: Number of characters for each edge.
35
- """
36
- # Size of edge or None for yet to be determined
37
- sizes = [(edge.size or None) for edge in edges]
38
-
39
- _Fraction = Fraction
40
-
41
- # While any edges haven't been calculated
42
- while None in sizes:
43
- # Get flexible edges and index to map these back on to sizes list
44
- flexible_edges = [
45
- (index, edge)
46
- for index, (size, edge) in enumerate(zip(sizes, edges))
47
- if size is None
48
- ]
49
- # Remaining space in total
50
- remaining = total - sum(size or 0 for size in sizes)
51
- if remaining <= 0:
52
- # No room for flexible edges
53
- return [
54
- ((edge.minimum_size or 1) if size is None else size)
55
- for size, edge in zip(sizes, edges)
56
- ]
57
- # Calculate number of characters in a ratio portion
58
- portion = _Fraction(
59
- remaining, sum((edge.ratio or 1) for _, edge in flexible_edges)
60
- )
61
-
62
- # If any edges will be less than their minimum, replace size with the minimum
63
- for index, edge in flexible_edges:
64
- if portion * edge.ratio <= edge.minimum_size:
65
- sizes[index] = edge.minimum_size
66
- # New fixed size will invalidate calculations, so we need to repeat the process
67
- break
68
- else:
69
- # Distribute flexible space and compensate for rounding error
70
- # Since edge sizes can only be integers we need to add the remainder
71
- # to the following line
72
- remainder = _Fraction(0)
73
- for index, edge in flexible_edges:
74
- size, remainder = divmod(portion * edge.ratio + remainder, 1)
75
- sizes[index] = size
76
- break
77
- # Sizes now contains integers only
78
- return cast(List[int], sizes)
79
-
80
-
81
- def ratio_reduce(
82
- total: int, ratios: List[int], maximums: List[int], values: List[int]
83
- ) -> List[int]:
84
- """Divide an integer total in to parts based on ratios.
85
-
86
- Args:
87
- total (int): The total to divide.
88
- ratios (List[int]): A list of integer ratios.
89
- maximums (List[int]): List of maximums values for each slot.
90
- values (List[int]): List of values
91
-
92
- Returns:
93
- List[int]: A list of integers guaranteed to sum to total.
94
- """
95
- ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)]
96
- total_ratio = sum(ratios)
97
- if not total_ratio:
98
- return values[:]
99
- total_remaining = total
100
- result: List[int] = []
101
- append = result.append
102
- for ratio, maximum, value in zip(ratios, maximums, values):
103
- if ratio and total_ratio > 0:
104
- distributed = min(maximum, round(ratio * total_remaining / total_ratio))
105
- append(value - distributed)
106
- total_remaining -= distributed
107
- total_ratio -= ratio
108
- else:
109
- append(value)
110
- return result
111
-
112
-
113
- def ratio_distribute(
114
- total: int, ratios: List[int], minimums: Optional[List[int]] = None
115
- ) -> List[int]:
116
- """Distribute an integer total in to parts based on ratios.
117
-
118
- Args:
119
- total (int): The total to divide.
120
- ratios (List[int]): A list of integer ratios.
121
- minimums (List[int]): List of minimum values for each slot.
122
-
123
- Returns:
124
- List[int]: A list of integers guaranteed to sum to total.
125
- """
126
- if minimums:
127
- ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)]
128
- total_ratio = sum(ratios)
129
- assert total_ratio > 0, "Sum of ratios must be > 0"
130
-
131
- total_remaining = total
132
- distributed_total: List[int] = []
133
- append = distributed_total.append
134
- if minimums is None:
135
- _minimums = [0] * len(ratios)
136
- else:
137
- _minimums = minimums
138
- for ratio, minimum in zip(ratios, _minimums):
139
- if total_ratio > 0:
140
- distributed = max(minimum, ceil(ratio * total_remaining / total_ratio))
141
- else:
142
- distributed = total_remaining
143
- append(distributed)
144
- total_ratio -= ratio
145
- total_remaining -= distributed
146
- return distributed_total
147
-
148
-
149
- if __name__ == "__main__":
150
- from dataclasses import dataclass
151
-
152
- @dataclass
153
- class E:
154
-
155
- size: Optional[int] = None
156
- ratio: int = 1
157
- minimum_size: int = 1
158
-
159
- resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)])
160
- print(sum(resolved))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/_collections.py DELETED
@@ -1,56 +0,0 @@
1
- import collections
2
- import itertools
3
-
4
-
5
- # from jaraco.collections 3.5.1
6
- class DictStack(list, collections.abc.Mapping):
7
- """
8
- A stack of dictionaries that behaves as a view on those dictionaries,
9
- giving preference to the last.
10
-
11
- >>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
12
- >>> stack['a']
13
- 2
14
- >>> stack['b']
15
- 2
16
- >>> stack['c']
17
- 2
18
- >>> len(stack)
19
- 3
20
- >>> stack.push(dict(a=3))
21
- >>> stack['a']
22
- 3
23
- >>> set(stack.keys()) == set(['a', 'b', 'c'])
24
- True
25
- >>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
26
- True
27
- >>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
28
- True
29
- >>> d = stack.pop()
30
- >>> stack['a']
31
- 2
32
- >>> d = stack.pop()
33
- >>> stack['a']
34
- 1
35
- >>> stack.get('b', None)
36
- >>> 'c' in stack
37
- True
38
- """
39
-
40
- def __iter__(self):
41
- dicts = list.__iter__(self)
42
- return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
43
-
44
- def __getitem__(self, key):
45
- for scope in reversed(tuple(list.__iter__(self))):
46
- if key in scope:
47
- return scope[key]
48
- raise KeyError(key)
49
-
50
- push = list.append
51
-
52
- def __contains__(self, other):
53
- return collections.abc.Mapping.__contains__(self, other)
54
-
55
- def __len__(self):
56
- return len(list(iter(self)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Binettebob22/fast_diffusion2/index.html DELETED
@@ -1,16 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="twitter:card" content="player"/>
6
- <meta name="twitter:site" content=""/>
7
- <meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
8
- <meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
9
- <meta name="twitter:player:width" content="100%"/>
10
- <meta name="twitter:player:height" content="600"/>
11
- <meta property="og:title" content="Embedded Live Viewer"/>
12
- <meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
13
- <meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
14
- <!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
15
- </head>
16
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/app.py DELETED
@@ -1,2 +0,0 @@
1
- from demo import *
2
- launch_demo()
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/testing.py DELETED
@@ -1,78 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import numpy as np
4
- import pprint
5
- import sys
6
- from collections import OrderedDict
7
- from collections.abc import Mapping
8
-
9
-
10
- def print_csv_format(results):
11
- """
12
- Print main metrics in a format similar to Detectron,
13
- so that they are easy to copypaste into a spreadsheet.
14
-
15
- Args:
16
- results (OrderedDict[dict]): task_name -> {metric -> score}
17
- """
18
- assert isinstance(results, OrderedDict), results # unordered results cannot be properly printed
19
- logger = logging.getLogger(__name__)
20
- for task, res in results.items():
21
- # Don't print "AP-category" metrics since they are usually not tracked.
22
- important_res = [(k, v) for k, v in res.items() if "-" not in k]
23
- logger.info("copypaste: Task: {}".format(task))
24
- logger.info("copypaste: " + ",".join([k[0] for k in important_res]))
25
- logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res]))
26
-
27
-
28
- def verify_results(cfg, results):
29
- """
30
- Args:
31
- results (OrderedDict[dict]): task_name -> {metric -> score}
32
-
33
- Returns:
34
- bool: whether the verification succeeds or not
35
- """
36
- expected_results = cfg.TEST.EXPECTED_RESULTS
37
- if not len(expected_results):
38
- return True
39
-
40
- ok = True
41
- for task, metric, expected, tolerance in expected_results:
42
- actual = results[task][metric]
43
- if not np.isfinite(actual):
44
- ok = False
45
- diff = abs(actual - expected)
46
- if diff > tolerance:
47
- ok = False
48
-
49
- logger = logging.getLogger(__name__)
50
- if not ok:
51
- logger.error("Result verification failed!")
52
- logger.error("Expected Results: " + str(expected_results))
53
- logger.error("Actual Results: " + pprint.pformat(results))
54
-
55
- sys.exit(1)
56
- else:
57
- logger.info("Results verification passed.")
58
- return ok
59
-
60
-
61
- def flatten_results_dict(results):
62
- """
63
- Expand a hierarchical dict of scalars into a flat dict of scalars.
64
- If results[k1][k2][k3] = v, the returned dict will have the entry
65
- {"k1/k2/k3": v}.
66
-
67
- Args:
68
- results (dict):
69
- """
70
- r = {}
71
- for k, v in results.items():
72
- if isinstance(v, Mapping):
73
- v = flatten_results_dict(v)
74
- for kk, vv in v.items():
75
- r[k + "/" + kk] = vv
76
- else:
77
- r[k] = v
78
- return r
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/allocator/tagged_allocator.h DELETED
@@ -1,101 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/type_traits/pointer_traits.h>
21
- #include <thrust/iterator/iterator_traits.h>
22
-
23
- namespace thrust
24
- {
25
- namespace detail
26
- {
27
-
28
- template<typename T, typename Tag, typename Pointer> class tagged_allocator;
29
-
30
- template<typename Tag, typename Pointer>
31
- class tagged_allocator<void, Tag, Pointer>
32
- {
33
- public:
34
- typedef void value_type;
35
- typedef typename thrust::detail::pointer_traits<Pointer>::template rebind<void>::other pointer;
36
- typedef typename thrust::detail::pointer_traits<Pointer>::template rebind<const void>::other const_pointer;
37
- typedef std::size_t size_type;
38
- typedef typename thrust::detail::pointer_traits<Pointer>::difference_type difference_type;
39
- typedef Tag system_type;
40
-
41
- template<typename U>
42
- struct rebind
43
- {
44
- typedef tagged_allocator<U,Tag,Pointer> other;
45
- }; // end rebind
46
- };
47
-
48
- template<typename T, typename Tag, typename Pointer>
49
- class tagged_allocator
50
- {
51
- public:
52
- typedef T value_type;
53
- typedef typename thrust::detail::pointer_traits<Pointer>::template rebind<T>::other pointer;
54
- typedef typename thrust::detail::pointer_traits<Pointer>::template rebind<const T>::other const_pointer;
55
- typedef typename thrust::iterator_reference<pointer>::type reference;
56
- typedef typename thrust::iterator_reference<const_pointer>::type const_reference;
57
- typedef std::size_t size_type;
58
- typedef typename thrust::detail::pointer_traits<pointer>::difference_type difference_type;
59
- typedef Tag system_type;
60
-
61
- template<typename U>
62
- struct rebind
63
- {
64
- typedef tagged_allocator<U,Tag,Pointer> other;
65
- }; // end rebind
66
-
67
- __host__ __device__
68
- inline tagged_allocator();
69
-
70
- __host__ __device__
71
- inline tagged_allocator(const tagged_allocator &);
72
-
73
- template<typename U, typename OtherPointer>
74
- __host__ __device__
75
- inline tagged_allocator(const tagged_allocator<U, Tag, OtherPointer> &);
76
-
77
- __host__ __device__
78
- inline ~tagged_allocator();
79
-
80
- __host__ __device__
81
- pointer address(reference x) const;
82
-
83
- __host__ __device__
84
- const_pointer address(const_reference x) const;
85
-
86
- size_type max_size() const;
87
- };
88
-
89
- template<typename T1, typename Pointer1, typename T2, typename Pointer2, typename Tag>
90
- __host__ __device__
91
- bool operator==(const tagged_allocator<T1,Pointer1,Tag> &, const tagged_allocator<T2,Pointer2,Tag> &);
92
-
93
- template<typename T1, typename Pointer1, typename T2, typename Pointer2, typename Tag>
94
- __host__ __device__
95
- bool operator!=(const tagged_allocator<T1,Pointer1,Tag> &, const tagged_allocator<T2,Pointer2,Tag> &);
96
-
97
- } // end detail
98
- } // end thrust
99
-
100
- #include <thrust/detail/allocator/tagged_allocator.inl>
101
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/copy.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits copy
22
- #include <thrust/system/detail/sequential/copy.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/necks/fpn_carafe.py DELETED
@@ -1,267 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
3
- from mmcv.ops.carafe import CARAFEPack
4
-
5
- from ..builder import NECKS
6
-
7
-
8
- @NECKS.register_module()
9
- class FPN_CARAFE(nn.Module):
10
- """FPN_CARAFE is a more flexible implementation of FPN. It allows more
11
- choice for upsample methods during the top-down pathway.
12
-
13
- It can reproduce the performance of ICCV 2019 paper
14
- CARAFE: Content-Aware ReAssembly of FEatures
15
- Please refer to https://arxiv.org/abs/1905.02188 for more details.
16
-
17
- Args:
18
- in_channels (list[int]): Number of channels for each input feature map.
19
- out_channels (int): Output channels of feature pyramids.
20
- num_outs (int): Number of output stages.
21
- start_level (int): Start level of feature pyramids.
22
- (Default: 0)
23
- end_level (int): End level of feature pyramids.
24
- (Default: -1 indicates the last level).
25
- norm_cfg (dict): Dictionary to construct and config norm layer.
26
- activate (str): Type of activation function in ConvModule
27
- (Default: None indicates w/o activation).
28
- order (dict): Order of components in ConvModule.
29
- upsample (str): Type of upsample layer.
30
- upsample_cfg (dict): Dictionary to construct and config upsample layer.
31
- """
32
-
33
- def __init__(self,
34
- in_channels,
35
- out_channels,
36
- num_outs,
37
- start_level=0,
38
- end_level=-1,
39
- norm_cfg=None,
40
- act_cfg=None,
41
- order=('conv', 'norm', 'act'),
42
- upsample_cfg=dict(
43
- type='carafe',
44
- up_kernel=5,
45
- up_group=1,
46
- encoder_kernel=3,
47
- encoder_dilation=1)):
48
- super(FPN_CARAFE, self).__init__()
49
- assert isinstance(in_channels, list)
50
- self.in_channels = in_channels
51
- self.out_channels = out_channels
52
- self.num_ins = len(in_channels)
53
- self.num_outs = num_outs
54
- self.norm_cfg = norm_cfg
55
- self.act_cfg = act_cfg
56
- self.with_bias = norm_cfg is None
57
- self.upsample_cfg = upsample_cfg.copy()
58
- self.upsample = self.upsample_cfg.get('type')
59
- self.relu = nn.ReLU(inplace=False)
60
-
61
- self.order = order
62
- assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
63
-
64
- assert self.upsample in [
65
- 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
66
- ]
67
- if self.upsample in ['deconv', 'pixel_shuffle']:
68
- assert hasattr(
69
- self.upsample_cfg,
70
- 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
71
- self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
72
-
73
- if end_level == -1:
74
- self.backbone_end_level = self.num_ins
75
- assert num_outs >= self.num_ins - start_level
76
- else:
77
- # if end_level < inputs, no extra level is allowed
78
- self.backbone_end_level = end_level
79
- assert end_level <= len(in_channels)
80
- assert num_outs == end_level - start_level
81
- self.start_level = start_level
82
- self.end_level = end_level
83
-
84
- self.lateral_convs = nn.ModuleList()
85
- self.fpn_convs = nn.ModuleList()
86
- self.upsample_modules = nn.ModuleList()
87
-
88
- for i in range(self.start_level, self.backbone_end_level):
89
- l_conv = ConvModule(
90
- in_channels[i],
91
- out_channels,
92
- 1,
93
- norm_cfg=norm_cfg,
94
- bias=self.with_bias,
95
- act_cfg=act_cfg,
96
- inplace=False,
97
- order=self.order)
98
- fpn_conv = ConvModule(
99
- out_channels,
100
- out_channels,
101
- 3,
102
- padding=1,
103
- norm_cfg=self.norm_cfg,
104
- bias=self.with_bias,
105
- act_cfg=act_cfg,
106
- inplace=False,
107
- order=self.order)
108
- if i != self.backbone_end_level - 1:
109
- upsample_cfg_ = self.upsample_cfg.copy()
110
- if self.upsample == 'deconv':
111
- upsample_cfg_.update(
112
- in_channels=out_channels,
113
- out_channels=out_channels,
114
- kernel_size=self.upsample_kernel,
115
- stride=2,
116
- padding=(self.upsample_kernel - 1) // 2,
117
- output_padding=(self.upsample_kernel - 1) // 2)
118
- elif self.upsample == 'pixel_shuffle':
119
- upsample_cfg_.update(
120
- in_channels=out_channels,
121
- out_channels=out_channels,
122
- scale_factor=2,
123
- upsample_kernel=self.upsample_kernel)
124
- elif self.upsample == 'carafe':
125
- upsample_cfg_.update(channels=out_channels, scale_factor=2)
126
- else:
127
- # suppress warnings
128
- align_corners = (None
129
- if self.upsample == 'nearest' else False)
130
- upsample_cfg_.update(
131
- scale_factor=2,
132
- mode=self.upsample,
133
- align_corners=align_corners)
134
- upsample_module = build_upsample_layer(upsample_cfg_)
135
- self.upsample_modules.append(upsample_module)
136
- self.lateral_convs.append(l_conv)
137
- self.fpn_convs.append(fpn_conv)
138
-
139
- # add extra conv layers (e.g., RetinaNet)
140
- extra_out_levels = (
141
- num_outs - self.backbone_end_level + self.start_level)
142
- if extra_out_levels >= 1:
143
- for i in range(extra_out_levels):
144
- in_channels = (
145
- self.in_channels[self.backbone_end_level -
146
- 1] if i == 0 else out_channels)
147
- extra_l_conv = ConvModule(
148
- in_channels,
149
- out_channels,
150
- 3,
151
- stride=2,
152
- padding=1,
153
- norm_cfg=norm_cfg,
154
- bias=self.with_bias,
155
- act_cfg=act_cfg,
156
- inplace=False,
157
- order=self.order)
158
- if self.upsample == 'deconv':
159
- upsampler_cfg_ = dict(
160
- in_channels=out_channels,
161
- out_channels=out_channels,
162
- kernel_size=self.upsample_kernel,
163
- stride=2,
164
- padding=(self.upsample_kernel - 1) // 2,
165
- output_padding=(self.upsample_kernel - 1) // 2)
166
- elif self.upsample == 'pixel_shuffle':
167
- upsampler_cfg_ = dict(
168
- in_channels=out_channels,
169
- out_channels=out_channels,
170
- scale_factor=2,
171
- upsample_kernel=self.upsample_kernel)
172
- elif self.upsample == 'carafe':
173
- upsampler_cfg_ = dict(
174
- channels=out_channels,
175
- scale_factor=2,
176
- **self.upsample_cfg)
177
- else:
178
- # suppress warnings
179
- align_corners = (None
180
- if self.upsample == 'nearest' else False)
181
- upsampler_cfg_ = dict(
182
- scale_factor=2,
183
- mode=self.upsample,
184
- align_corners=align_corners)
185
- upsampler_cfg_['type'] = self.upsample
186
- upsample_module = build_upsample_layer(upsampler_cfg_)
187
- extra_fpn_conv = ConvModule(
188
- out_channels,
189
- out_channels,
190
- 3,
191
- padding=1,
192
- norm_cfg=self.norm_cfg,
193
- bias=self.with_bias,
194
- act_cfg=act_cfg,
195
- inplace=False,
196
- order=self.order)
197
- self.upsample_modules.append(upsample_module)
198
- self.fpn_convs.append(extra_fpn_conv)
199
- self.lateral_convs.append(extra_l_conv)
200
-
201
- # default init_weights for conv(msra) and norm in ConvModule
202
- def init_weights(self):
203
- """Initialize the weights of module."""
204
- for m in self.modules():
205
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
206
- xavier_init(m, distribution='uniform')
207
- for m in self.modules():
208
- if isinstance(m, CARAFEPack):
209
- m.init_weights()
210
-
211
- def slice_as(self, src, dst):
212
- """Slice ``src`` as ``dst``
213
-
214
- Note:
215
- ``src`` should have the same or larger size than ``dst``.
216
-
217
- Args:
218
- src (torch.Tensor): Tensors to be sliced.
219
- dst (torch.Tensor): ``src`` will be sliced to have the same
220
- size as ``dst``.
221
-
222
- Returns:
223
- torch.Tensor: Sliced tensor.
224
- """
225
- assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
226
- if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
227
- return src
228
- else:
229
- return src[:, :, :dst.size(2), :dst.size(3)]
230
-
231
- def tensor_add(self, a, b):
232
- """Add tensors ``a`` and ``b`` that might have different sizes."""
233
- if a.size() == b.size():
234
- c = a + b
235
- else:
236
- c = a + self.slice_as(b, a)
237
- return c
238
-
239
- def forward(self, inputs):
240
- """Forward function."""
241
- assert len(inputs) == len(self.in_channels)
242
-
243
- # build laterals
244
- laterals = []
245
- for i, lateral_conv in enumerate(self.lateral_convs):
246
- if i <= self.backbone_end_level - self.start_level:
247
- input = inputs[min(i + self.start_level, len(inputs) - 1)]
248
- else:
249
- input = laterals[-1]
250
- lateral = lateral_conv(input)
251
- laterals.append(lateral)
252
-
253
- # build top-down path
254
- for i in range(len(laterals) - 1, 0, -1):
255
- if self.upsample is not None:
256
- upsample_feat = self.upsample_modules[i - 1](laterals[i])
257
- else:
258
- upsample_feat = laterals[i]
259
- laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
260
-
261
- # build outputs
262
- num_conv_outs = len(self.fpn_convs)
263
- outs = []
264
- for i in range(num_conv_outs):
265
- out = self.fpn_convs[i](laterals[i])
266
- outs.append(out)
267
- return tuple(outs)